Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1890 /*
1891 Define this macro to 0/1 to disable/enable support for recording functionality,
1892 available through VmaAllocatorCreateInfo::pRecordSettings.
1893 */
1894 #ifndef VMA_RECORDING_ENABLED
1895  #define VMA_RECORDING_ENABLED 0
1896 #endif
1897 
1898 #ifndef NOMINMAX
1899  #define NOMINMAX // For windows.h
1900 #endif
1901 
1902 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
1903  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
1904  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
1905  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1906  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1907  extern PFN_vkAllocateMemory vkAllocateMemory;
1908  extern PFN_vkFreeMemory vkFreeMemory;
1909  extern PFN_vkMapMemory vkMapMemory;
1910  extern PFN_vkUnmapMemory vkUnmapMemory;
1911  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1912  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1913  extern PFN_vkBindBufferMemory vkBindBufferMemory;
1914  extern PFN_vkBindImageMemory vkBindImageMemory;
1915  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1916  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1917  extern PFN_vkCreateBuffer vkCreateBuffer;
1918  extern PFN_vkDestroyBuffer vkDestroyBuffer;
1919  extern PFN_vkCreateImage vkCreateImage;
1920  extern PFN_vkDestroyImage vkDestroyImage;
1921  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1922  #if VMA_VULKAN_VERSION >= 1001000
1923  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
1924  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
1925  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
1926  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
1927  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
1928  #endif // #if VMA_VULKAN_VERSION >= 1001000
1929 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
1930 
1931 #ifndef VULKAN_H_
1932  #include <vulkan/vulkan.h>
1933 #endif
1934 
1935 #if VMA_RECORDING_ENABLED
1936  #if defined(_WIN32)
1937  #include <windows.h>
1938  #else
1939  #error VMA Recording functionality is not yet available for non-Windows platforms
1940  #endif
1941 #endif
1942 
1943 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1944 // where AAA = major, BBB = minor, CCC = patch.
1945 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1946 #if !defined(VMA_VULKAN_VERSION)
1947  #if defined(VK_VERSION_1_2)
1948  #define VMA_VULKAN_VERSION 1002000
1949  #elif defined(VK_VERSION_1_1)
1950  #define VMA_VULKAN_VERSION 1001000
1951  #else
1952  #define VMA_VULKAN_VERSION 1000000
1953  #endif
1954 #endif
1955 
1956 #if !defined(VMA_DEDICATED_ALLOCATION)
1957  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1958  #define VMA_DEDICATED_ALLOCATION 1
1959  #else
1960  #define VMA_DEDICATED_ALLOCATION 0
1961  #endif
1962 #endif
1963 
1964 #if !defined(VMA_BIND_MEMORY2)
1965  #if VK_KHR_bind_memory2
1966  #define VMA_BIND_MEMORY2 1
1967  #else
1968  #define VMA_BIND_MEMORY2 0
1969  #endif
1970 #endif
1971 
1972 #if !defined(VMA_MEMORY_BUDGET)
1973  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1974  #define VMA_MEMORY_BUDGET 1
1975  #else
1976  #define VMA_MEMORY_BUDGET 0
1977  #endif
1978 #endif
1979 
1980 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
1981 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
1982  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
1983  #define VMA_BUFFER_DEVICE_ADDRESS 1
1984  #else
1985  #define VMA_BUFFER_DEVICE_ADDRESS 0
1986  #endif
1987 #endif
1988 
1989 // Define these macros to decorate all public functions with additional code,
1990 // before and after returned type, appropriately. This may be useful for
1991 // exporing the functions when compiling VMA as a separate library. Example:
1992 // #define VMA_CALL_PRE __declspec(dllexport)
1993 // #define VMA_CALL_POST __cdecl
1994 #ifndef VMA_CALL_PRE
1995  #define VMA_CALL_PRE
1996 #endif
1997 #ifndef VMA_CALL_POST
1998  #define VMA_CALL_POST
1999 #endif
2000 
2001 // Define this macro to decorate pointers with an attribute specifying the
2002 // length of the array they point to if they are not null.
2003 //
2004 // The length may be one of
2005 // - The name of another parameter in the argument list where the pointer is declared
2006 // - The name of another member in the struct where the pointer is declared
2007 // - The name of a member of a struct type, meaning the value of that member in
2008 // the context of the call. For example
2009 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2010 // this means the number of memory heaps available in the device associated
2011 // with the VmaAllocator being dealt with.
2012 #ifndef VMA_LEN_IF_NOT_NULL
2013  #define VMA_LEN_IF_NOT_NULL(len)
2014 #endif
2015 
2016 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2017 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2018 #ifndef VMA_NULLABLE
2019  #ifdef __clang__
2020  #define VMA_NULLABLE _Nullable
2021  #else
2022  #define VMA_NULLABLE
2023  #endif
2024 #endif
2025 
2026 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2027 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2028 #ifndef VMA_NOT_NULL
2029  #ifdef __clang__
2030  #define VMA_NOT_NULL _Nonnull
2031  #else
2032  #define VMA_NOT_NULL
2033  #endif
2034 #endif
2035 
2036 // If non-dispatchable handles are represented as pointers then we can give
2037 // then nullability annotations
2038 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2039  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2040  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2041  #else
2042  #define VMA_NOT_NULL_NON_DISPATCHABLE
2043  #endif
2044 #endif
2045 
2046 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2047  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2048  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2049  #else
2050  #define VMA_NULLABLE_NON_DISPATCHABLE
2051  #endif
2052 #endif
2053 
2063 VK_DEFINE_HANDLE(VmaAllocator)
2064 
2065 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2067  VmaAllocator VMA_NOT_NULL allocator,
2068  uint32_t memoryType,
2069  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2070  VkDeviceSize size,
2071  void* VMA_NULLABLE pUserData);
2073 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2074  VmaAllocator VMA_NOT_NULL allocator,
2075  uint32_t memoryType,
2076  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2077  VkDeviceSize size,
2078  void* VMA_NULLABLE pUserData);
2079 
2093  void* VMA_NULLABLE pUserData;
2095 
2191 
2194 typedef VkFlags VmaAllocatorCreateFlags;
2195 
2200 typedef struct VmaVulkanFunctions {
2201  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2202  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2203  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2204  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2205  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2206  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2207  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2208  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2209  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2210  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2211  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2212  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2213  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2214  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2215  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2216  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2217  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2218 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2219  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2220  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2221 #endif
2222 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2223  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2224  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2225 #endif
2226 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2227  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2228 #endif
2230 
2232 typedef enum VmaRecordFlagBits {
2239 
2242 typedef VkFlags VmaRecordFlags;
2243 
2245 typedef struct VmaRecordSettings
2246 {
2256  const char* VMA_NOT_NULL pFilePath;
2258 
2261 {
2265 
2266  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2268 
2269  VkDevice VMA_NOT_NULL device;
2271 
2274 
2275  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2277 
2317  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2318 
2330  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2335  VkInstance VMA_NOT_NULL instance;
2346 
2348 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2349  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2350  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2351 
2353 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2354  VmaAllocator VMA_NULLABLE allocator);
2355 
2358 typedef struct VmaAllocatorInfo
2359 {
2364  VkInstance VMA_NOT_NULL instance;
2369  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2374  VkDevice VMA_NOT_NULL device;
2376 
2382 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2383 
2388 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2389  VmaAllocator VMA_NOT_NULL allocator,
2390  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2391 
2396 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2397  VmaAllocator VMA_NOT_NULL allocator,
2398  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2399 
2406 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2407  VmaAllocator VMA_NOT_NULL allocator,
2408  uint32_t memoryTypeIndex,
2409  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2410 
2419 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2420  VmaAllocator VMA_NOT_NULL allocator,
2421  uint32_t frameIndex);
2422 
2425 typedef struct VmaStatInfo
2426 {
2428  uint32_t blockCount;
2434  VkDeviceSize usedBytes;
2436  VkDeviceSize unusedBytes;
2437  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2438  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2439 } VmaStatInfo;
2440 
2442 typedef struct VmaStats
2443 {
2444  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2445  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2447 } VmaStats;
2448 
2458 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2459  VmaAllocator VMA_NOT_NULL allocator,
2460  VmaStats* VMA_NOT_NULL pStats);
2461 
2464 typedef struct VmaBudget
2465 {
2468  VkDeviceSize blockBytes;
2469 
2479  VkDeviceSize allocationBytes;
2480 
2489  VkDeviceSize usage;
2490 
2500  VkDeviceSize budget;
2501 } VmaBudget;
2502 
2513 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2514  VmaAllocator VMA_NOT_NULL allocator,
2515  VmaBudget* VMA_NOT_NULL pBudget);
2516 
2517 #ifndef VMA_STATS_STRING_ENABLED
2518 #define VMA_STATS_STRING_ENABLED 1
2519 #endif
2520 
2521 #if VMA_STATS_STRING_ENABLED
2522 
2524 
2526 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2527  VmaAllocator VMA_NOT_NULL allocator,
2528  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2529  VkBool32 detailedMap);
2530 
2531 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2532  VmaAllocator VMA_NOT_NULL allocator,
2533  char* VMA_NULLABLE pStatsString);
2534 
2535 #endif // #if VMA_STATS_STRING_ENABLED
2536 
2545 VK_DEFINE_HANDLE(VmaPool)
2546 
2547 typedef enum VmaMemoryUsage
2548 {
2610 
2612 } VmaMemoryUsage;
2613 
2623 
2688 
2704 
2714 
2721 
2725 
2727 {
2740  VkMemoryPropertyFlags requiredFlags;
2745  VkMemoryPropertyFlags preferredFlags;
2753  uint32_t memoryTypeBits;
2759  VmaPool VMA_NULLABLE pool;
2766  void* VMA_NULLABLE pUserData;
2768 
2785 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2786  VmaAllocator VMA_NOT_NULL allocator,
2787  uint32_t memoryTypeBits,
2788  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2789  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2790 
2803 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2804  VmaAllocator VMA_NOT_NULL allocator,
2805  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2806  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2807  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2808 
2821 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2822  VmaAllocator VMA_NOT_NULL allocator,
2823  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2824  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2825  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2826 
2847 
2864 
2875 
2881 
2884 typedef VkFlags VmaPoolCreateFlags;
2885 
2888 typedef struct VmaPoolCreateInfo {
2903  VkDeviceSize blockSize;
2932 
2935 typedef struct VmaPoolStats {
2938  VkDeviceSize size;
2941  VkDeviceSize unusedSize;
2954  VkDeviceSize unusedRangeSizeMax;
2957  size_t blockCount;
2958 } VmaPoolStats;
2959 
2966 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2967  VmaAllocator VMA_NOT_NULL allocator,
2968  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
2969  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
2970 
2973 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2974  VmaAllocator VMA_NOT_NULL allocator,
2975  VmaPool VMA_NULLABLE pool);
2976 
2983 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2984  VmaAllocator VMA_NOT_NULL allocator,
2985  VmaPool VMA_NOT_NULL pool,
2986  VmaPoolStats* VMA_NOT_NULL pPoolStats);
2987 
2994 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2995  VmaAllocator VMA_NOT_NULL allocator,
2996  VmaPool VMA_NOT_NULL pool,
2997  size_t* VMA_NULLABLE pLostAllocationCount);
2998 
3013 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3014 
3021 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3022  VmaAllocator VMA_NOT_NULL allocator,
3023  VmaPool VMA_NOT_NULL pool,
3024  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3025 
3031 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3032  VmaAllocator VMA_NOT_NULL allocator,
3033  VmaPool VMA_NOT_NULL pool,
3034  const char* VMA_NULLABLE pName);
3035 
3060 VK_DEFINE_HANDLE(VmaAllocation)
3061 
3062 
3064 typedef struct VmaAllocationInfo {
3069  uint32_t memoryType;
3078  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3083  VkDeviceSize offset;
3088  VkDeviceSize size;
3097  void* VMA_NULLABLE pMappedData;
3102  void* VMA_NULLABLE pUserData;
3104 
3115 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3116  VmaAllocator VMA_NOT_NULL allocator,
3117  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3118  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3119  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3120  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3121 
3141 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3142  VmaAllocator VMA_NOT_NULL allocator,
3143  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3144  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3145  size_t allocationCount,
3146  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3147  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3148 
3155 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3156  VmaAllocator VMA_NOT_NULL allocator,
3157  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3158  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3159  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3160  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3161 
3163 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3164  VmaAllocator VMA_NOT_NULL allocator,
3165  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3166  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3167  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3168  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3169 
3174 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3175  VmaAllocator VMA_NOT_NULL allocator,
3176  const VmaAllocation VMA_NULLABLE allocation);
3177 
3188 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3189  VmaAllocator VMA_NOT_NULL allocator,
3190  size_t allocationCount,
3191  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3192 
3200 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3201  VmaAllocator VMA_NOT_NULL allocator,
3202  VmaAllocation VMA_NOT_NULL allocation,
3203  VkDeviceSize newSize);
3204 
3221 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3222  VmaAllocator VMA_NOT_NULL allocator,
3223  VmaAllocation VMA_NOT_NULL allocation,
3224  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3225 
3240 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3241  VmaAllocator VMA_NOT_NULL allocator,
3242  VmaAllocation VMA_NOT_NULL allocation);
3243 
3257 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3258  VmaAllocator VMA_NOT_NULL allocator,
3259  VmaAllocation VMA_NOT_NULL allocation,
3260  void* VMA_NULLABLE pUserData);
3261 
3272 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3273  VmaAllocator VMA_NOT_NULL allocator,
3274  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3275 
3314 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3315  VmaAllocator VMA_NOT_NULL allocator,
3316  VmaAllocation VMA_NOT_NULL allocation,
3317  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3318 
3327 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3328  VmaAllocator VMA_NOT_NULL allocator,
3329  VmaAllocation VMA_NOT_NULL allocation);
3330 
3352 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3353  VmaAllocator VMA_NOT_NULL allocator,
3354  VmaAllocation VMA_NOT_NULL allocation,
3355  VkDeviceSize offset,
3356  VkDeviceSize size);
3357 
3379 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3380  VmaAllocator VMA_NOT_NULL allocator,
3381  VmaAllocation VMA_NOT_NULL allocation,
3382  VkDeviceSize offset,
3383  VkDeviceSize size);
3384 
3399 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3400  VmaAllocator VMA_NOT_NULL allocator,
3401  uint32_t allocationCount,
3402  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3403  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3404  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3405 
3420 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3421  VmaAllocator VMA_NOT_NULL allocator,
3422  uint32_t allocationCount,
3423  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3424  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3425  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3426 
3443 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3444 
3451 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3452 
3453 typedef enum VmaDefragmentationFlagBits {
3458 typedef VkFlags VmaDefragmentationFlags;
3459 
3464 typedef struct VmaDefragmentationInfo2 {
3479  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3485  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3488  uint32_t poolCount;
3504  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3509  VkDeviceSize maxCpuBytesToMove;
3519  VkDeviceSize maxGpuBytesToMove;
3533  VkCommandBuffer VMA_NULLABLE commandBuffer;
3535 
3538  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3539  VkDeviceSize offset;
3541 
3547  uint32_t moveCount;
3548  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3550 
3555 typedef struct VmaDefragmentationInfo {
3560  VkDeviceSize maxBytesToMove;
3567 
3569 typedef struct VmaDefragmentationStats {
3571  VkDeviceSize bytesMoved;
3573  VkDeviceSize bytesFreed;
3579 
3609 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3610  VmaAllocator VMA_NOT_NULL allocator,
3611  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3612  VmaDefragmentationStats* VMA_NULLABLE pStats,
3613  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3614 
3620 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3621  VmaAllocator VMA_NOT_NULL allocator,
3622  VmaDefragmentationContext VMA_NULLABLE context);
3623 
3624 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3625  VmaAllocator VMA_NOT_NULL allocator,
3626  VmaDefragmentationContext VMA_NULLABLE context,
3627  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3628 );
3629 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3630  VmaAllocator VMA_NOT_NULL allocator,
3631  VmaDefragmentationContext VMA_NULLABLE context
3632 );
3633 
3674 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3675  VmaAllocator VMA_NOT_NULL allocator,
3676  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3677  size_t allocationCount,
3678  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3679  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3680  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3681 
3694 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3695  VmaAllocator VMA_NOT_NULL allocator,
3696  VmaAllocation VMA_NOT_NULL allocation,
3697  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3698 
3709 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3710  VmaAllocator VMA_NOT_NULL allocator,
3711  VmaAllocation VMA_NOT_NULL allocation,
3712  VkDeviceSize allocationLocalOffset,
3713  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3714  const void* VMA_NULLABLE pNext);
3715 
3728 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3729  VmaAllocator VMA_NOT_NULL allocator,
3730  VmaAllocation VMA_NOT_NULL allocation,
3731  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3732 
3743 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3744  VmaAllocator VMA_NOT_NULL allocator,
3745  VmaAllocation VMA_NOT_NULL allocation,
3746  VkDeviceSize allocationLocalOffset,
3747  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3748  const void* VMA_NULLABLE pNext);
3749 
3776 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3777  VmaAllocator VMA_NOT_NULL allocator,
3778  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3779  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3780  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3781  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3782  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3783 
3795 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3796  VmaAllocator VMA_NOT_NULL allocator,
3797  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3798  VmaAllocation VMA_NULLABLE allocation);
3799 
3801 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3802  VmaAllocator VMA_NOT_NULL allocator,
3803  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3804  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3805  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3806  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3807  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3808 
3820 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3821  VmaAllocator VMA_NOT_NULL allocator,
3822  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
3823  VmaAllocation VMA_NULLABLE allocation);
3824 
3825 #ifdef __cplusplus
3826 }
3827 #endif
3828 
3829 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3830 
3831 // For Visual Studio IntelliSense.
3832 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3833 #define VMA_IMPLEMENTATION
3834 #endif
3835 
3836 #ifdef VMA_IMPLEMENTATION
3837 #undef VMA_IMPLEMENTATION
3838 
3839 #include <cstdint>
3840 #include <cstdlib>
3841 #include <cstring>
3842 #include <utility>
3843 
3844 /*******************************************************************************
3845 CONFIGURATION SECTION
3846 
3847 Define some of these macros before each #include of this header or change them
3848 here if you need other then default behavior depending on your environment.
3849 */
3850 
3851 /*
3852 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3853 internally, like:
3854 
3855  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3856 */
3857 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3858  #define VMA_STATIC_VULKAN_FUNCTIONS 1
3859 #endif
3860 
3861 /*
3862 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3863 internally, like:
3864 
3865  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
3866 */
3867 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
3868  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
3869 #endif
3870 
3871 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3872 //#define VMA_USE_STL_CONTAINERS 1
3873 
3874 /* Set this macro to 1 to make the library including and using STL containers:
3875 std::pair, std::vector, std::list, std::unordered_map.
3876 
3877 Set it to 0 or undefined to make the library using its own implementation of
3878 the containers.
3879 */
3880 #if VMA_USE_STL_CONTAINERS
3881  #define VMA_USE_STL_VECTOR 1
3882  #define VMA_USE_STL_UNORDERED_MAP 1
3883  #define VMA_USE_STL_LIST 1
3884 #endif
3885 
3886 #ifndef VMA_USE_STL_SHARED_MUTEX
3887  // Compiler conforms to C++17.
3888  #if __cplusplus >= 201703L
3889  #define VMA_USE_STL_SHARED_MUTEX 1
3890  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3891  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3892  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3893  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3894  #define VMA_USE_STL_SHARED_MUTEX 1
3895  #else
3896  #define VMA_USE_STL_SHARED_MUTEX 0
3897  #endif
3898 #endif
3899 
3900 /*
3901 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3902 Library has its own container implementation.
3903 */
3904 #if VMA_USE_STL_VECTOR
3905  #include <vector>
3906 #endif
3907 
3908 #if VMA_USE_STL_UNORDERED_MAP
3909  #include <unordered_map>
3910 #endif
3911 
3912 #if VMA_USE_STL_LIST
3913  #include <list>
3914 #endif
3915 
3916 /*
3917 Following headers are used in this CONFIGURATION section only, so feel free to
3918 remove them if not needed.
3919 */
3920 #include <cassert> // for assert
3921 #include <algorithm> // for min, max
3922 #include <mutex>
3923 
3924 #ifndef VMA_NULL
3925  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3926  #define VMA_NULL nullptr
3927 #endif
3928 
3929 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3930 #include <cstdlib>
3931 void *aligned_alloc(size_t alignment, size_t size)
3932 {
3933  // alignment must be >= sizeof(void*)
3934  if(alignment < sizeof(void*))
3935  {
3936  alignment = sizeof(void*);
3937  }
3938 
3939  return memalign(alignment, size);
3940 }
3941 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3942 #include <cstdlib>
3943 void *aligned_alloc(size_t alignment, size_t size)
3944 {
3945  // alignment must be >= sizeof(void*)
3946  if(alignment < sizeof(void*))
3947  {
3948  alignment = sizeof(void*);
3949  }
3950 
3951  void *pointer;
3952  if(posix_memalign(&pointer, alignment, size) == 0)
3953  return pointer;
3954  return VMA_NULL;
3955 }
3956 #endif
3957 
3958 // If your compiler is not compatible with C++11 and definition of
3959 // aligned_alloc() function is missing, uncommeting following line may help:
3960 
3961 //#include <malloc.h>
3962 
3963 // Normal assert to check for programmer's errors, especially in Debug configuration.
3964 #ifndef VMA_ASSERT
3965  #ifdef NDEBUG
3966  #define VMA_ASSERT(expr)
3967  #else
3968  #define VMA_ASSERT(expr) assert(expr)
3969  #endif
3970 #endif
3971 
3972 // Assert that will be called very often, like inside data structures e.g. operator[].
3973 // Making it non-empty can make program slow.
3974 #ifndef VMA_HEAVY_ASSERT
3975  #ifdef NDEBUG
3976  #define VMA_HEAVY_ASSERT(expr)
3977  #else
3978  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3979  #endif
3980 #endif
3981 
3982 #ifndef VMA_ALIGN_OF
3983  #define VMA_ALIGN_OF(type) (__alignof(type))
3984 #endif
3985 
3986 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3987  #if defined(_WIN32)
3988  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3989  #else
3990  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3991  #endif
3992 #endif
3993 
3994 #ifndef VMA_SYSTEM_FREE
3995  #if defined(_WIN32)
3996  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3997  #else
3998  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3999  #endif
4000 #endif
4001 
4002 #ifndef VMA_MIN
4003  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4004 #endif
4005 
4006 #ifndef VMA_MAX
4007  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4008 #endif
4009 
4010 #ifndef VMA_SWAP
4011  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4012 #endif
4013 
4014 #ifndef VMA_SORT
4015  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4016 #endif
4017 
4018 #ifndef VMA_DEBUG_LOG
4019  #define VMA_DEBUG_LOG(format, ...)
4020  /*
4021  #define VMA_DEBUG_LOG(format, ...) do { \
4022  printf(format, __VA_ARGS__); \
4023  printf("\n"); \
4024  } while(false)
4025  */
4026 #endif
4027 
4028 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4029 #if VMA_STATS_STRING_ENABLED
4030  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
4031  {
4032  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4033  }
4034  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
4035  {
4036  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4037  }
4038  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
4039  {
4040  snprintf(outStr, strLen, "%p", ptr);
4041  }
4042 #endif
4043 
4044 #ifndef VMA_MUTEX
4045  class VmaMutex
4046  {
4047  public:
4048  void Lock() { m_Mutex.lock(); }
4049  void Unlock() { m_Mutex.unlock(); }
4050  bool TryLock() { return m_Mutex.try_lock(); }
4051  private:
4052  std::mutex m_Mutex;
4053  };
4054  #define VMA_MUTEX VmaMutex
4055 #endif
4056 
4057 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4058 #ifndef VMA_RW_MUTEX
4059  #if VMA_USE_STL_SHARED_MUTEX
4060  // Use std::shared_mutex from C++17.
4061  #include <shared_mutex>
4062  class VmaRWMutex
4063  {
4064  public:
4065  void LockRead() { m_Mutex.lock_shared(); }
4066  void UnlockRead() { m_Mutex.unlock_shared(); }
4067  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4068  void LockWrite() { m_Mutex.lock(); }
4069  void UnlockWrite() { m_Mutex.unlock(); }
4070  bool TryLockWrite() { return m_Mutex.try_lock(); }
4071  private:
4072  std::shared_mutex m_Mutex;
4073  };
4074  #define VMA_RW_MUTEX VmaRWMutex
4075  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4076  // Use SRWLOCK from WinAPI.
4077  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4078  class VmaRWMutex
4079  {
4080  public:
4081  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4082  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4083  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4084  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4085  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4086  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4087  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4088  private:
4089  SRWLOCK m_Lock;
4090  };
4091  #define VMA_RW_MUTEX VmaRWMutex
4092  #else
4093  // Less efficient fallback: Use normal mutex.
4094  class VmaRWMutex
4095  {
4096  public:
4097  void LockRead() { m_Mutex.Lock(); }
4098  void UnlockRead() { m_Mutex.Unlock(); }
4099  bool TryLockRead() { return m_Mutex.TryLock(); }
4100  void LockWrite() { m_Mutex.Lock(); }
4101  void UnlockWrite() { m_Mutex.Unlock(); }
4102  bool TryLockWrite() { return m_Mutex.TryLock(); }
4103  private:
4104  VMA_MUTEX m_Mutex;
4105  };
4106  #define VMA_RW_MUTEX VmaRWMutex
4107  #endif // #if VMA_USE_STL_SHARED_MUTEX
4108 #endif // #ifndef VMA_RW_MUTEX
4109 
4110 /*
4111 If providing your own implementation, you need to implement a subset of std::atomic.
4112 */
4113 #ifndef VMA_ATOMIC_UINT32
4114  #include <atomic>
4115  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4116 #endif
4117 
4118 #ifndef VMA_ATOMIC_UINT64
4119  #include <atomic>
4120  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4121 #endif
4122 
4123 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4124 
4128  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4129 #endif
4130 
4131 #ifndef VMA_DEBUG_ALIGNMENT
4132 
4136  #define VMA_DEBUG_ALIGNMENT (1)
4137 #endif
4138 
4139 #ifndef VMA_DEBUG_MARGIN
4140 
4144  #define VMA_DEBUG_MARGIN (0)
4145 #endif
4146 
4147 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4148 
4152  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4153 #endif
4154 
4155 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4156 
4161  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4162 #endif
4163 
4164 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4165 
4169  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4170 #endif
4171 
4172 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4173 
4177  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4178 #endif
4179 
4180 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4181  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4183 #endif
4184 
4185 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4186  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4188 #endif
4189 
4190 #ifndef VMA_CLASS_NO_COPY
4191  #define VMA_CLASS_NO_COPY(className) \
4192  private: \
4193  className(const className&) = delete; \
4194  className& operator=(const className&) = delete;
4195 #endif
4196 
4197 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4198 
4199 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4200 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4201 
4202 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4203 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4204 
4205 /*******************************************************************************
4206 END OF CONFIGURATION
4207 */
4208 
4209 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4210 
4211 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4212 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4213 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4214 
4215 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4216 
4217 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4218  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4219 
4220 // Returns number of bits set to 1 in (v).
4221 static inline uint32_t VmaCountBitsSet(uint32_t v)
4222 {
4223  uint32_t c = v - ((v >> 1) & 0x55555555);
4224  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4225  c = ((c >> 4) + c) & 0x0F0F0F0F;
4226  c = ((c >> 8) + c) & 0x00FF00FF;
4227  c = ((c >> 16) + c) & 0x0000FFFF;
4228  return c;
4229 }
4230 
4231 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4232 // Use types like uint32_t, uint64_t as T.
4233 template <typename T>
4234 static inline T VmaAlignUp(T val, T align)
4235 {
4236  return (val + align - 1) / align * align;
4237 }
4238 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4239 // Use types like uint32_t, uint64_t as T.
4240 template <typename T>
4241 static inline T VmaAlignDown(T val, T align)
4242 {
4243  return val / align * align;
4244 }
4245 
4246 // Division with mathematical rounding to nearest number.
4247 template <typename T>
4248 static inline T VmaRoundDiv(T x, T y)
4249 {
4250  return (x + (y / (T)2)) / y;
4251 }
4252 
4253 /*
4254 Returns true if given number is a power of two.
4255 T must be unsigned integer number or signed integer but always nonnegative.
4256 For 0 returns true.
4257 */
4258 template <typename T>
4259 inline bool VmaIsPow2(T x)
4260 {
4261  return (x & (x-1)) == 0;
4262 }
4263 
4264 // Returns smallest power of 2 greater or equal to v.
4265 static inline uint32_t VmaNextPow2(uint32_t v)
4266 {
4267  v--;
4268  v |= v >> 1;
4269  v |= v >> 2;
4270  v |= v >> 4;
4271  v |= v >> 8;
4272  v |= v >> 16;
4273  v++;
4274  return v;
4275 }
4276 static inline uint64_t VmaNextPow2(uint64_t v)
4277 {
4278  v--;
4279  v |= v >> 1;
4280  v |= v >> 2;
4281  v |= v >> 4;
4282  v |= v >> 8;
4283  v |= v >> 16;
4284  v |= v >> 32;
4285  v++;
4286  return v;
4287 }
4288 
4289 // Returns largest power of 2 less or equal to v.
4290 static inline uint32_t VmaPrevPow2(uint32_t v)
4291 {
4292  v |= v >> 1;
4293  v |= v >> 2;
4294  v |= v >> 4;
4295  v |= v >> 8;
4296  v |= v >> 16;
4297  v = v ^ (v >> 1);
4298  return v;
4299 }
4300 static inline uint64_t VmaPrevPow2(uint64_t v)
4301 {
4302  v |= v >> 1;
4303  v |= v >> 2;
4304  v |= v >> 4;
4305  v |= v >> 8;
4306  v |= v >> 16;
4307  v |= v >> 32;
4308  v = v ^ (v >> 1);
4309  return v;
4310 }
4311 
4312 static inline bool VmaStrIsEmpty(const char* pStr)
4313 {
4314  return pStr == VMA_NULL || *pStr == '\0';
4315 }
4316 
4317 #if VMA_STATS_STRING_ENABLED
4318 
4319 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4320 {
4321  switch(algorithm)
4322  {
4324  return "Linear";
4326  return "Buddy";
4327  case 0:
4328  return "Default";
4329  default:
4330  VMA_ASSERT(0);
4331  return "";
4332  }
4333 }
4334 
4335 #endif // #if VMA_STATS_STRING_ENABLED
4336 
4337 #ifndef VMA_SORT
4338 
4339 template<typename Iterator, typename Compare>
4340 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4341 {
4342  Iterator centerValue = end; --centerValue;
4343  Iterator insertIndex = beg;
4344  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4345  {
4346  if(cmp(*memTypeIndex, *centerValue))
4347  {
4348  if(insertIndex != memTypeIndex)
4349  {
4350  VMA_SWAP(*memTypeIndex, *insertIndex);
4351  }
4352  ++insertIndex;
4353  }
4354  }
4355  if(insertIndex != centerValue)
4356  {
4357  VMA_SWAP(*insertIndex, *centerValue);
4358  }
4359  return insertIndex;
4360 }
4361 
4362 template<typename Iterator, typename Compare>
4363 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4364 {
4365  if(beg < end)
4366  {
4367  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4368  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4369  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4370  }
4371 }
4372 
4373 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4374 
4375 #endif // #ifndef VMA_SORT
4376 
4377 /*
4378 Returns true if two memory blocks occupy overlapping pages.
4379 ResourceA must be in less memory offset than ResourceB.
4380 
4381 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4382 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4383 */
4384 static inline bool VmaBlocksOnSamePage(
4385  VkDeviceSize resourceAOffset,
4386  VkDeviceSize resourceASize,
4387  VkDeviceSize resourceBOffset,
4388  VkDeviceSize pageSize)
4389 {
4390  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4391  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4392  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4393  VkDeviceSize resourceBStart = resourceBOffset;
4394  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4395  return resourceAEndPage == resourceBStartPage;
4396 }
4397 
4398 enum VmaSuballocationType
4399 {
4400  VMA_SUBALLOCATION_TYPE_FREE = 0,
4401  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4402  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4403  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4404  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4405  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4406  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4407 };
4408 
4409 /*
4410 Returns true if given suballocation types could conflict and must respect
4411 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4412 or linear image and another one is optimal image. If type is unknown, behave
4413 conservatively.
4414 */
4415 static inline bool VmaIsBufferImageGranularityConflict(
4416  VmaSuballocationType suballocType1,
4417  VmaSuballocationType suballocType2)
4418 {
4419  if(suballocType1 > suballocType2)
4420  {
4421  VMA_SWAP(suballocType1, suballocType2);
4422  }
4423 
4424  switch(suballocType1)
4425  {
4426  case VMA_SUBALLOCATION_TYPE_FREE:
4427  return false;
4428  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4429  return true;
4430  case VMA_SUBALLOCATION_TYPE_BUFFER:
4431  return
4432  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4433  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4434  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4435  return
4436  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4437  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4438  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4439  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4440  return
4441  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4442  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4443  return false;
4444  default:
4445  VMA_ASSERT(0);
4446  return true;
4447  }
4448 }
4449 
4450 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4451 {
4452 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4453  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4454  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4455  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4456  {
4457  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4458  }
4459 #else
4460  // no-op
4461 #endif
4462 }
4463 
4464 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4465 {
4466 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4467  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4468  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4469  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4470  {
4471  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4472  {
4473  return false;
4474  }
4475  }
4476 #endif
4477  return true;
4478 }
4479 
4480 /*
4481 Fills structure with parameters of an example buffer to be used for transfers
4482 during GPU memory defragmentation.
4483 */
4484 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4485 {
4486  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4487  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4488  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4489  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4490 }
4491 
4492 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4493 struct VmaMutexLock
4494 {
4495  VMA_CLASS_NO_COPY(VmaMutexLock)
4496 public:
4497  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4498  m_pMutex(useMutex ? &mutex : VMA_NULL)
4499  { if(m_pMutex) { m_pMutex->Lock(); } }
4500  ~VmaMutexLock()
4501  { if(m_pMutex) { m_pMutex->Unlock(); } }
4502 private:
4503  VMA_MUTEX* m_pMutex;
4504 };
4505 
4506 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4507 struct VmaMutexLockRead
4508 {
4509  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4510 public:
4511  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4512  m_pMutex(useMutex ? &mutex : VMA_NULL)
4513  { if(m_pMutex) { m_pMutex->LockRead(); } }
4514  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4515 private:
4516  VMA_RW_MUTEX* m_pMutex;
4517 };
4518 
4519 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4520 struct VmaMutexLockWrite
4521 {
4522  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4523 public:
4524  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4525  m_pMutex(useMutex ? &mutex : VMA_NULL)
4526  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4527  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4528 private:
4529  VMA_RW_MUTEX* m_pMutex;
4530 };
4531 
4532 #if VMA_DEBUG_GLOBAL_MUTEX
4533  static VMA_MUTEX gDebugGlobalMutex;
4534  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4535 #else
4536  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4537 #endif
4538 
4539 // Minimum size of a free suballocation to register it in the free suballocation collection.
4540 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4541 
4542 /*
4543 Performs binary search and returns iterator to first element that is greater or
4544 equal to (key), according to comparison (cmp).
4545 
4546 Cmp should return true if first argument is less than second argument.
4547 
4548 Returned value is the found element, if present in the collection or place where
4549 new element with value (key) should be inserted.
4550 */
4551 template <typename CmpLess, typename IterT, typename KeyT>
4552 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4553 {
4554  size_t down = 0, up = (end - beg);
4555  while(down < up)
4556  {
4557  const size_t mid = (down + up) / 2;
4558  if(cmp(*(beg+mid), key))
4559  {
4560  down = mid + 1;
4561  }
4562  else
4563  {
4564  up = mid;
4565  }
4566  }
4567  return beg + down;
4568 }
4569 
4570 template<typename CmpLess, typename IterT, typename KeyT>
4571 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4572 {
4573  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4574  beg, end, value, cmp);
4575  if(it == end ||
4576  (!cmp(*it, value) && !cmp(value, *it)))
4577  {
4578  return it;
4579  }
4580  return end;
4581 }
4582 
4583 /*
4584 Returns true if all pointers in the array are not-null and unique.
4585 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4586 T must be pointer type, e.g. VmaAllocation, VmaPool.
4587 */
4588 template<typename T>
4589 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4590 {
4591  for(uint32_t i = 0; i < count; ++i)
4592  {
4593  const T iPtr = arr[i];
4594  if(iPtr == VMA_NULL)
4595  {
4596  return false;
4597  }
4598  for(uint32_t j = i + 1; j < count; ++j)
4599  {
4600  if(iPtr == arr[j])
4601  {
4602  return false;
4603  }
4604  }
4605  }
4606  return true;
4607 }
4608 
4609 template<typename MainT, typename NewT>
4610 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4611 {
4612  newStruct->pNext = mainStruct->pNext;
4613  mainStruct->pNext = newStruct;
4614 }
4615 
4617 // Memory allocation
4618 
4619 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4620 {
4621  if((pAllocationCallbacks != VMA_NULL) &&
4622  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4623  {
4624  return (*pAllocationCallbacks->pfnAllocation)(
4625  pAllocationCallbacks->pUserData,
4626  size,
4627  alignment,
4628  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4629  }
4630  else
4631  {
4632  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4633  }
4634 }
4635 
4636 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4637 {
4638  if((pAllocationCallbacks != VMA_NULL) &&
4639  (pAllocationCallbacks->pfnFree != VMA_NULL))
4640  {
4641  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4642  }
4643  else
4644  {
4645  VMA_SYSTEM_FREE(ptr);
4646  }
4647 }
4648 
4649 template<typename T>
4650 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4651 {
4652  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4653 }
4654 
4655 template<typename T>
4656 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4657 {
4658  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4659 }
4660 
4661 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4662 
4663 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4664 
4665 template<typename T>
4666 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4667 {
4668  ptr->~T();
4669  VmaFree(pAllocationCallbacks, ptr);
4670 }
4671 
4672 template<typename T>
4673 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4674 {
4675  if(ptr != VMA_NULL)
4676  {
4677  for(size_t i = count; i--; )
4678  {
4679  ptr[i].~T();
4680  }
4681  VmaFree(pAllocationCallbacks, ptr);
4682  }
4683 }
4684 
4685 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4686 {
4687  if(srcStr != VMA_NULL)
4688  {
4689  const size_t len = strlen(srcStr);
4690  char* const result = vma_new_array(allocs, char, len + 1);
4691  memcpy(result, srcStr, len + 1);
4692  return result;
4693  }
4694  else
4695  {
4696  return VMA_NULL;
4697  }
4698 }
4699 
4700 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4701 {
4702  if(str != VMA_NULL)
4703  {
4704  const size_t len = strlen(str);
4705  vma_delete_array(allocs, str, len + 1);
4706  }
4707 }
4708 
4709 // STL-compatible allocator.
4710 template<typename T>
4711 class VmaStlAllocator
4712 {
4713 public:
4714  const VkAllocationCallbacks* const m_pCallbacks;
4715  typedef T value_type;
4716 
4717  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4718  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4719 
4720  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4721  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4722 
4723  template<typename U>
4724  bool operator==(const VmaStlAllocator<U>& rhs) const
4725  {
4726  return m_pCallbacks == rhs.m_pCallbacks;
4727  }
4728  template<typename U>
4729  bool operator!=(const VmaStlAllocator<U>& rhs) const
4730  {
4731  return m_pCallbacks != rhs.m_pCallbacks;
4732  }
4733 
4734  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4735 };
4736 
4737 #if VMA_USE_STL_VECTOR
4738 
4739 #define VmaVector std::vector
4740 
4741 template<typename T, typename allocatorT>
4742 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4743 {
4744  vec.insert(vec.begin() + index, item);
4745 }
4746 
4747 template<typename T, typename allocatorT>
4748 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4749 {
4750  vec.erase(vec.begin() + index);
4751 }
4752 
4753 #else // #if VMA_USE_STL_VECTOR
4754 
4755 /* Class with interface compatible with subset of std::vector.
4756 T must be POD because constructors and destructors are not called and memcpy is
4757 used for these objects. */
4758 template<typename T, typename AllocatorT>
4759 class VmaVector
4760 {
4761 public:
4762  typedef T value_type;
4763 
4764  VmaVector(const AllocatorT& allocator) :
4765  m_Allocator(allocator),
4766  m_pArray(VMA_NULL),
4767  m_Count(0),
4768  m_Capacity(0)
4769  {
4770  }
4771 
4772  VmaVector(size_t count, const AllocatorT& allocator) :
4773  m_Allocator(allocator),
4774  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4775  m_Count(count),
4776  m_Capacity(count)
4777  {
4778  }
4779 
4780  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4781  // value is unused.
4782  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4783  : VmaVector(count, allocator) {}
4784 
4785  VmaVector(const VmaVector<T, AllocatorT>& src) :
4786  m_Allocator(src.m_Allocator),
4787  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4788  m_Count(src.m_Count),
4789  m_Capacity(src.m_Count)
4790  {
4791  if(m_Count != 0)
4792  {
4793  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4794  }
4795  }
4796 
4797  ~VmaVector()
4798  {
4799  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4800  }
4801 
4802  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4803  {
4804  if(&rhs != this)
4805  {
4806  resize(rhs.m_Count);
4807  if(m_Count != 0)
4808  {
4809  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4810  }
4811  }
4812  return *this;
4813  }
4814 
4815  bool empty() const { return m_Count == 0; }
4816  size_t size() const { return m_Count; }
4817  T* data() { return m_pArray; }
4818  const T* data() const { return m_pArray; }
4819 
4820  T& operator[](size_t index)
4821  {
4822  VMA_HEAVY_ASSERT(index < m_Count);
4823  return m_pArray[index];
4824  }
4825  const T& operator[](size_t index) const
4826  {
4827  VMA_HEAVY_ASSERT(index < m_Count);
4828  return m_pArray[index];
4829  }
4830 
4831  T& front()
4832  {
4833  VMA_HEAVY_ASSERT(m_Count > 0);
4834  return m_pArray[0];
4835  }
4836  const T& front() const
4837  {
4838  VMA_HEAVY_ASSERT(m_Count > 0);
4839  return m_pArray[0];
4840  }
4841  T& back()
4842  {
4843  VMA_HEAVY_ASSERT(m_Count > 0);
4844  return m_pArray[m_Count - 1];
4845  }
4846  const T& back() const
4847  {
4848  VMA_HEAVY_ASSERT(m_Count > 0);
4849  return m_pArray[m_Count - 1];
4850  }
4851 
4852  void reserve(size_t newCapacity, bool freeMemory = false)
4853  {
4854  newCapacity = VMA_MAX(newCapacity, m_Count);
4855 
4856  if((newCapacity < m_Capacity) && !freeMemory)
4857  {
4858  newCapacity = m_Capacity;
4859  }
4860 
4861  if(newCapacity != m_Capacity)
4862  {
4863  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4864  if(m_Count != 0)
4865  {
4866  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4867  }
4868  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4869  m_Capacity = newCapacity;
4870  m_pArray = newArray;
4871  }
4872  }
4873 
4874  void resize(size_t newCount, bool freeMemory = false)
4875  {
4876  size_t newCapacity = m_Capacity;
4877  if(newCount > m_Capacity)
4878  {
4879  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4880  }
4881  else if(freeMemory)
4882  {
4883  newCapacity = newCount;
4884  }
4885 
4886  if(newCapacity != m_Capacity)
4887  {
4888  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4889  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4890  if(elementsToCopy != 0)
4891  {
4892  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4893  }
4894  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4895  m_Capacity = newCapacity;
4896  m_pArray = newArray;
4897  }
4898 
4899  m_Count = newCount;
4900  }
4901 
4902  void clear(bool freeMemory = false)
4903  {
4904  resize(0, freeMemory);
4905  }
4906 
4907  void insert(size_t index, const T& src)
4908  {
4909  VMA_HEAVY_ASSERT(index <= m_Count);
4910  const size_t oldCount = size();
4911  resize(oldCount + 1);
4912  if(index < oldCount)
4913  {
4914  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4915  }
4916  m_pArray[index] = src;
4917  }
4918 
4919  void remove(size_t index)
4920  {
4921  VMA_HEAVY_ASSERT(index < m_Count);
4922  const size_t oldCount = size();
4923  if(index < oldCount - 1)
4924  {
4925  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4926  }
4927  resize(oldCount - 1);
4928  }
4929 
4930  void push_back(const T& src)
4931  {
4932  const size_t newIndex = size();
4933  resize(newIndex + 1);
4934  m_pArray[newIndex] = src;
4935  }
4936 
4937  void pop_back()
4938  {
4939  VMA_HEAVY_ASSERT(m_Count > 0);
4940  resize(size() - 1);
4941  }
4942 
4943  void push_front(const T& src)
4944  {
4945  insert(0, src);
4946  }
4947 
4948  void pop_front()
4949  {
4950  VMA_HEAVY_ASSERT(m_Count > 0);
4951  remove(0);
4952  }
4953 
4954  typedef T* iterator;
4955 
4956  iterator begin() { return m_pArray; }
4957  iterator end() { return m_pArray + m_Count; }
4958 
4959 private:
4960  AllocatorT m_Allocator;
4961  T* m_pArray;
4962  size_t m_Count;
4963  size_t m_Capacity;
4964 };
4965 
4966 template<typename T, typename allocatorT>
4967 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4968 {
4969  vec.insert(index, item);
4970 }
4971 
4972 template<typename T, typename allocatorT>
4973 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4974 {
4975  vec.remove(index);
4976 }
4977 
4978 #endif // #if VMA_USE_STL_VECTOR
4979 
4980 template<typename CmpLess, typename VectorT>
4981 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4982 {
4983  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4984  vector.data(),
4985  vector.data() + vector.size(),
4986  value,
4987  CmpLess()) - vector.data();
4988  VmaVectorInsert(vector, indexToInsert, value);
4989  return indexToInsert;
4990 }
4991 
4992 template<typename CmpLess, typename VectorT>
4993 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4994 {
4995  CmpLess comparator;
4996  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4997  vector.begin(),
4998  vector.end(),
4999  value,
5000  comparator);
5001  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5002  {
5003  size_t indexToRemove = it - vector.begin();
5004  VmaVectorRemove(vector, indexToRemove);
5005  return true;
5006  }
5007  return false;
5008 }
5009 
5011 // class VmaSmallVector
5012 
5013 /*
5014 This is a vector (a variable-sized array), optimized for the case when the array is small.
5015 
5016 It contains some number of elements in-place, which allows it to avoid heap allocation
5017 when the actual number of elements is below that threshold. This allows normal "small"
5018 cases to be fast without losing generality for large inputs.
5019 */
5020 
5021 template<typename T, typename AllocatorT, size_t N>
5022 class VmaSmallVector
5023 {
5024 public:
5025  typedef T value_type;
5026 
5027  VmaSmallVector(const AllocatorT& allocator) :
5028  m_Count(0),
5029  m_DynamicArray(allocator)
5030  {
5031  }
5032  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5033  m_Count(count),
5034  m_DynamicArray(count > N ? count : 0, allocator)
5035  {
5036  }
5037  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5038  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5039  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5040  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5041 
5042  bool empty() const { return m_Count == 0; }
5043  size_t size() const { return m_Count; }
5044  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5045  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5046 
5047  T& operator[](size_t index)
5048  {
5049  VMA_HEAVY_ASSERT(index < m_Count);
5050  return data()[index];
5051  }
5052  const T& operator[](size_t index) const
5053  {
5054  VMA_HEAVY_ASSERT(index < m_Count);
5055  return data()[index];
5056  }
5057 
5058  T& front()
5059  {
5060  VMA_HEAVY_ASSERT(m_Count > 0);
5061  return data()[0];
5062  }
5063  const T& front() const
5064  {
5065  VMA_HEAVY_ASSERT(m_Count > 0);
5066  return data()[0];
5067  }
5068  T& back()
5069  {
5070  VMA_HEAVY_ASSERT(m_Count > 0);
5071  return data()[m_Count - 1];
5072  }
5073  const T& back() const
5074  {
5075  VMA_HEAVY_ASSERT(m_Count > 0);
5076  return data()[m_Count - 1];
5077  }
5078 
5079  void resize(size_t newCount, bool freeMemory = false)
5080  {
5081  if(newCount > N && m_Count > N)
5082  {
5083  // Any direction, staying in m_DynamicArray
5084  m_DynamicArray.resize(newCount, freeMemory);
5085  }
5086  else if(newCount > N && m_Count <= N)
5087  {
5088  // Growing, moving from m_StaticArray to m_DynamicArray
5089  m_DynamicArray.resize(newCount, freeMemory);
5090  if(m_Count > 0)
5091  {
5092  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5093  }
5094  }
5095  else if(newCount <= N && m_Count > N)
5096  {
5097  // Shrinking, moving from m_DynamicArray to m_StaticArray
5098  if(newCount > 0)
5099  {
5100  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5101  }
5102  m_DynamicArray.resize(0, freeMemory);
5103  }
5104  else
5105  {
5106  // Any direction, staying in m_StaticArray - nothing to do here
5107  }
5108  m_Count = newCount;
5109  }
5110 
5111  void clear(bool freeMemory = false)
5112  {
5113  m_DynamicArray.clear(freeMemory);
5114  m_Count = 0;
5115  }
5116 
5117  void insert(size_t index, const T& src)
5118  {
5119  VMA_HEAVY_ASSERT(index <= m_Count);
5120  const size_t oldCount = size();
5121  resize(oldCount + 1);
5122  T* const dataPtr = data();
5123  if(index < oldCount)
5124  {
5125  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5126  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5127  }
5128  dataPtr[index] = src;
5129  }
5130 
5131  void remove(size_t index)
5132  {
5133  VMA_HEAVY_ASSERT(index < m_Count);
5134  const size_t oldCount = size();
5135  if(index < oldCount - 1)
5136  {
5137  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5138  T* const dataPtr = data();
5139  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5140  }
5141  resize(oldCount - 1);
5142  }
5143 
5144  void push_back(const T& src)
5145  {
5146  const size_t newIndex = size();
5147  resize(newIndex + 1);
5148  data()[newIndex] = src;
5149  }
5150 
5151  void pop_back()
5152  {
5153  VMA_HEAVY_ASSERT(m_Count > 0);
5154  resize(size() - 1);
5155  }
5156 
5157  void push_front(const T& src)
5158  {
5159  insert(0, src);
5160  }
5161 
5162  void pop_front()
5163  {
5164  VMA_HEAVY_ASSERT(m_Count > 0);
5165  remove(0);
5166  }
5167 
5168  typedef T* iterator;
5169 
5170  iterator begin() { return data(); }
5171  iterator end() { return data() + m_Count; }
5172 
5173 private:
5174  size_t m_Count;
5175  T m_StaticArray[N]; // Used when m_Size <= N
5176  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5177 };
5178 
5180 // class VmaPoolAllocator
5181 
5182 /*
5183 Allocator for objects of type T using a list of arrays (pools) to speed up
5184 allocation. Number of elements that can be allocated is not bounded because
5185 allocator can create multiple blocks.
5186 */
5187 template<typename T>
5188 class VmaPoolAllocator
5189 {
5190  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5191 public:
5192  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5193  ~VmaPoolAllocator();
5194  template<typename... Types> T* Alloc(Types... args);
5195  void Free(T* ptr);
5196 
5197 private:
5198  union Item
5199  {
5200  uint32_t NextFreeIndex;
5201  alignas(T) char Value[sizeof(T)];
5202  };
5203 
5204  struct ItemBlock
5205  {
5206  Item* pItems;
5207  uint32_t Capacity;
5208  uint32_t FirstFreeIndex;
5209  };
5210 
5211  const VkAllocationCallbacks* m_pAllocationCallbacks;
5212  const uint32_t m_FirstBlockCapacity;
5213  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5214 
5215  ItemBlock& CreateNewBlock();
5216 };
5217 
5218 template<typename T>
5219 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5220  m_pAllocationCallbacks(pAllocationCallbacks),
5221  m_FirstBlockCapacity(firstBlockCapacity),
5222  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5223 {
5224  VMA_ASSERT(m_FirstBlockCapacity > 1);
5225 }
5226 
5227 template<typename T>
5228 VmaPoolAllocator<T>::~VmaPoolAllocator()
5229 {
5230  for(size_t i = m_ItemBlocks.size(); i--; )
5231  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5232  m_ItemBlocks.clear();
5233 }
5234 
5235 template<typename T>
5236 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5237 {
5238  for(size_t i = m_ItemBlocks.size(); i--; )
5239  {
5240  ItemBlock& block = m_ItemBlocks[i];
5241  // This block has some free items: Use first one.
5242  if(block.FirstFreeIndex != UINT32_MAX)
5243  {
5244  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5245  block.FirstFreeIndex = pItem->NextFreeIndex;
5246  T* result = (T*)&pItem->Value;
5247  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5248  return result;
5249  }
5250  }
5251 
5252  // No block has free item: Create new one and use it.
5253  ItemBlock& newBlock = CreateNewBlock();
5254  Item* const pItem = &newBlock.pItems[0];
5255  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5256  T* result = (T*)&pItem->Value;
5257  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5258  return result;
5259 }
5260 
5261 template<typename T>
5262 void VmaPoolAllocator<T>::Free(T* ptr)
5263 {
5264  // Search all memory blocks to find ptr.
5265  for(size_t i = m_ItemBlocks.size(); i--; )
5266  {
5267  ItemBlock& block = m_ItemBlocks[i];
5268 
5269  // Casting to union.
5270  Item* pItemPtr;
5271  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5272 
5273  // Check if pItemPtr is in address range of this block.
5274  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5275  {
5276  ptr->~T(); // Explicit destructor call.
5277  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5278  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5279  block.FirstFreeIndex = index;
5280  return;
5281  }
5282  }
5283  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5284 }
5285 
5286 template<typename T>
5287 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5288 {
5289  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5290  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5291 
5292  const ItemBlock newBlock = {
5293  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5294  newBlockCapacity,
5295  0 };
5296 
5297  m_ItemBlocks.push_back(newBlock);
5298 
5299  // Setup singly-linked list of all free items in this block.
5300  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5301  newBlock.pItems[i].NextFreeIndex = i + 1;
5302  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5303  return m_ItemBlocks.back();
5304 }
5305 
5307 // class VmaRawList, VmaList
5308 
5309 #if VMA_USE_STL_LIST
5310 
5311 #define VmaList std::list
5312 
5313 #else // #if VMA_USE_STL_LIST
5314 
5315 template<typename T>
5316 struct VmaListItem
5317 {
5318  VmaListItem* pPrev;
5319  VmaListItem* pNext;
5320  T Value;
5321 };
5322 
5323 // Doubly linked list.
5324 template<typename T>
5325 class VmaRawList
5326 {
5327  VMA_CLASS_NO_COPY(VmaRawList)
5328 public:
5329  typedef VmaListItem<T> ItemType;
5330 
5331  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5332  ~VmaRawList();
5333  void Clear();
5334 
5335  size_t GetCount() const { return m_Count; }
5336  bool IsEmpty() const { return m_Count == 0; }
5337 
5338  ItemType* Front() { return m_pFront; }
5339  const ItemType* Front() const { return m_pFront; }
5340  ItemType* Back() { return m_pBack; }
5341  const ItemType* Back() const { return m_pBack; }
5342 
5343  ItemType* PushBack();
5344  ItemType* PushFront();
5345  ItemType* PushBack(const T& value);
5346  ItemType* PushFront(const T& value);
5347  void PopBack();
5348  void PopFront();
5349 
5350  // Item can be null - it means PushBack.
5351  ItemType* InsertBefore(ItemType* pItem);
5352  // Item can be null - it means PushFront.
5353  ItemType* InsertAfter(ItemType* pItem);
5354 
5355  ItemType* InsertBefore(ItemType* pItem, const T& value);
5356  ItemType* InsertAfter(ItemType* pItem, const T& value);
5357 
5358  void Remove(ItemType* pItem);
5359 
5360 private:
5361  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5362  VmaPoolAllocator<ItemType> m_ItemAllocator;
5363  ItemType* m_pFront;
5364  ItemType* m_pBack;
5365  size_t m_Count;
5366 };
5367 
5368 template<typename T>
5369 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5370  m_pAllocationCallbacks(pAllocationCallbacks),
5371  m_ItemAllocator(pAllocationCallbacks, 128),
5372  m_pFront(VMA_NULL),
5373  m_pBack(VMA_NULL),
5374  m_Count(0)
5375 {
5376 }
5377 
5378 template<typename T>
5379 VmaRawList<T>::~VmaRawList()
5380 {
5381  // Intentionally not calling Clear, because that would be unnecessary
5382  // computations to return all items to m_ItemAllocator as free.
5383 }
5384 
5385 template<typename T>
5386 void VmaRawList<T>::Clear()
5387 {
5388  if(IsEmpty() == false)
5389  {
5390  ItemType* pItem = m_pBack;
5391  while(pItem != VMA_NULL)
5392  {
5393  ItemType* const pPrevItem = pItem->pPrev;
5394  m_ItemAllocator.Free(pItem);
5395  pItem = pPrevItem;
5396  }
5397  m_pFront = VMA_NULL;
5398  m_pBack = VMA_NULL;
5399  m_Count = 0;
5400  }
5401 }
5402 
5403 template<typename T>
5404 VmaListItem<T>* VmaRawList<T>::PushBack()
5405 {
5406  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5407  pNewItem->pNext = VMA_NULL;
5408  if(IsEmpty())
5409  {
5410  pNewItem->pPrev = VMA_NULL;
5411  m_pFront = pNewItem;
5412  m_pBack = pNewItem;
5413  m_Count = 1;
5414  }
5415  else
5416  {
5417  pNewItem->pPrev = m_pBack;
5418  m_pBack->pNext = pNewItem;
5419  m_pBack = pNewItem;
5420  ++m_Count;
5421  }
5422  return pNewItem;
5423 }
5424 
5425 template<typename T>
5426 VmaListItem<T>* VmaRawList<T>::PushFront()
5427 {
5428  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5429  pNewItem->pPrev = VMA_NULL;
5430  if(IsEmpty())
5431  {
5432  pNewItem->pNext = VMA_NULL;
5433  m_pFront = pNewItem;
5434  m_pBack = pNewItem;
5435  m_Count = 1;
5436  }
5437  else
5438  {
5439  pNewItem->pNext = m_pFront;
5440  m_pFront->pPrev = pNewItem;
5441  m_pFront = pNewItem;
5442  ++m_Count;
5443  }
5444  return pNewItem;
5445 }
5446 
5447 template<typename T>
5448 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5449 {
5450  ItemType* const pNewItem = PushBack();
5451  pNewItem->Value = value;
5452  return pNewItem;
5453 }
5454 
5455 template<typename T>
5456 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5457 {
5458  ItemType* const pNewItem = PushFront();
5459  pNewItem->Value = value;
5460  return pNewItem;
5461 }
5462 
5463 template<typename T>
5464 void VmaRawList<T>::PopBack()
5465 {
5466  VMA_HEAVY_ASSERT(m_Count > 0);
5467  ItemType* const pBackItem = m_pBack;
5468  ItemType* const pPrevItem = pBackItem->pPrev;
5469  if(pPrevItem != VMA_NULL)
5470  {
5471  pPrevItem->pNext = VMA_NULL;
5472  }
5473  m_pBack = pPrevItem;
5474  m_ItemAllocator.Free(pBackItem);
5475  --m_Count;
5476 }
5477 
5478 template<typename T>
5479 void VmaRawList<T>::PopFront()
5480 {
5481  VMA_HEAVY_ASSERT(m_Count > 0);
5482  ItemType* const pFrontItem = m_pFront;
5483  ItemType* const pNextItem = pFrontItem->pNext;
5484  if(pNextItem != VMA_NULL)
5485  {
5486  pNextItem->pPrev = VMA_NULL;
5487  }
5488  m_pFront = pNextItem;
5489  m_ItemAllocator.Free(pFrontItem);
5490  --m_Count;
5491 }
5492 
5493 template<typename T>
5494 void VmaRawList<T>::Remove(ItemType* pItem)
5495 {
5496  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5497  VMA_HEAVY_ASSERT(m_Count > 0);
5498 
5499  if(pItem->pPrev != VMA_NULL)
5500  {
5501  pItem->pPrev->pNext = pItem->pNext;
5502  }
5503  else
5504  {
5505  VMA_HEAVY_ASSERT(m_pFront == pItem);
5506  m_pFront = pItem->pNext;
5507  }
5508 
5509  if(pItem->pNext != VMA_NULL)
5510  {
5511  pItem->pNext->pPrev = pItem->pPrev;
5512  }
5513  else
5514  {
5515  VMA_HEAVY_ASSERT(m_pBack == pItem);
5516  m_pBack = pItem->pPrev;
5517  }
5518 
5519  m_ItemAllocator.Free(pItem);
5520  --m_Count;
5521 }
5522 
5523 template<typename T>
5524 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5525 {
5526  if(pItem != VMA_NULL)
5527  {
5528  ItemType* const prevItem = pItem->pPrev;
5529  ItemType* const newItem = m_ItemAllocator.Alloc();
5530  newItem->pPrev = prevItem;
5531  newItem->pNext = pItem;
5532  pItem->pPrev = newItem;
5533  if(prevItem != VMA_NULL)
5534  {
5535  prevItem->pNext = newItem;
5536  }
5537  else
5538  {
5539  VMA_HEAVY_ASSERT(m_pFront == pItem);
5540  m_pFront = newItem;
5541  }
5542  ++m_Count;
5543  return newItem;
5544  }
5545  else
5546  return PushBack();
5547 }
5548 
5549 template<typename T>
5550 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5551 {
5552  if(pItem != VMA_NULL)
5553  {
5554  ItemType* const nextItem = pItem->pNext;
5555  ItemType* const newItem = m_ItemAllocator.Alloc();
5556  newItem->pNext = nextItem;
5557  newItem->pPrev = pItem;
5558  pItem->pNext = newItem;
5559  if(nextItem != VMA_NULL)
5560  {
5561  nextItem->pPrev = newItem;
5562  }
5563  else
5564  {
5565  VMA_HEAVY_ASSERT(m_pBack == pItem);
5566  m_pBack = newItem;
5567  }
5568  ++m_Count;
5569  return newItem;
5570  }
5571  else
5572  return PushFront();
5573 }
5574 
5575 template<typename T>
5576 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5577 {
5578  ItemType* const newItem = InsertBefore(pItem);
5579  newItem->Value = value;
5580  return newItem;
5581 }
5582 
5583 template<typename T>
5584 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5585 {
5586  ItemType* const newItem = InsertAfter(pItem);
5587  newItem->Value = value;
5588  return newItem;
5589 }
5590 
5591 template<typename T, typename AllocatorT>
5592 class VmaList
5593 {
5594  VMA_CLASS_NO_COPY(VmaList)
5595 public:
5596  class iterator
5597  {
5598  public:
5599  iterator() :
5600  m_pList(VMA_NULL),
5601  m_pItem(VMA_NULL)
5602  {
5603  }
5604 
5605  T& operator*() const
5606  {
5607  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5608  return m_pItem->Value;
5609  }
5610  T* operator->() const
5611  {
5612  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5613  return &m_pItem->Value;
5614  }
5615 
5616  iterator& operator++()
5617  {
5618  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5619  m_pItem = m_pItem->pNext;
5620  return *this;
5621  }
5622  iterator& operator--()
5623  {
5624  if(m_pItem != VMA_NULL)
5625  {
5626  m_pItem = m_pItem->pPrev;
5627  }
5628  else
5629  {
5630  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5631  m_pItem = m_pList->Back();
5632  }
5633  return *this;
5634  }
5635 
5636  iterator operator++(int)
5637  {
5638  iterator result = *this;
5639  ++*this;
5640  return result;
5641  }
5642  iterator operator--(int)
5643  {
5644  iterator result = *this;
5645  --*this;
5646  return result;
5647  }
5648 
5649  bool operator==(const iterator& rhs) const
5650  {
5651  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5652  return m_pItem == rhs.m_pItem;
5653  }
5654  bool operator!=(const iterator& rhs) const
5655  {
5656  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5657  return m_pItem != rhs.m_pItem;
5658  }
5659 
5660  private:
5661  VmaRawList<T>* m_pList;
5662  VmaListItem<T>* m_pItem;
5663 
5664  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5665  m_pList(pList),
5666  m_pItem(pItem)
5667  {
5668  }
5669 
5670  friend class VmaList<T, AllocatorT>;
5671  };
5672 
5673  class const_iterator
5674  {
5675  public:
5676  const_iterator() :
5677  m_pList(VMA_NULL),
5678  m_pItem(VMA_NULL)
5679  {
5680  }
5681 
5682  const_iterator(const iterator& src) :
5683  m_pList(src.m_pList),
5684  m_pItem(src.m_pItem)
5685  {
5686  }
5687 
5688  const T& operator*() const
5689  {
5690  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5691  return m_pItem->Value;
5692  }
5693  const T* operator->() const
5694  {
5695  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5696  return &m_pItem->Value;
5697  }
5698 
5699  const_iterator& operator++()
5700  {
5701  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5702  m_pItem = m_pItem->pNext;
5703  return *this;
5704  }
5705  const_iterator& operator--()
5706  {
5707  if(m_pItem != VMA_NULL)
5708  {
5709  m_pItem = m_pItem->pPrev;
5710  }
5711  else
5712  {
5713  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5714  m_pItem = m_pList->Back();
5715  }
5716  return *this;
5717  }
5718 
5719  const_iterator operator++(int)
5720  {
5721  const_iterator result = *this;
5722  ++*this;
5723  return result;
5724  }
5725  const_iterator operator--(int)
5726  {
5727  const_iterator result = *this;
5728  --*this;
5729  return result;
5730  }
5731 
5732  bool operator==(const const_iterator& rhs) const
5733  {
5734  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5735  return m_pItem == rhs.m_pItem;
5736  }
5737  bool operator!=(const const_iterator& rhs) const
5738  {
5739  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5740  return m_pItem != rhs.m_pItem;
5741  }
5742 
5743  private:
5744  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5745  m_pList(pList),
5746  m_pItem(pItem)
5747  {
5748  }
5749 
5750  const VmaRawList<T>* m_pList;
5751  const VmaListItem<T>* m_pItem;
5752 
5753  friend class VmaList<T, AllocatorT>;
5754  };
5755 
5756  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5757 
5758  bool empty() const { return m_RawList.IsEmpty(); }
5759  size_t size() const { return m_RawList.GetCount(); }
5760 
5761  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5762  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5763 
5764  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5765  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5766 
5767  void clear() { m_RawList.Clear(); }
5768  void push_back(const T& value) { m_RawList.PushBack(value); }
5769  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5770  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5771 
5772 private:
5773  VmaRawList<T> m_RawList;
5774 };
5775 
5776 #endif // #if VMA_USE_STL_LIST
5777 
5779 // class VmaMap
5780 
5781 // Unused in this version.
5782 #if 0
5783 
5784 #if VMA_USE_STL_UNORDERED_MAP
5785 
5786 #define VmaPair std::pair
5787 
5788 #define VMA_MAP_TYPE(KeyT, ValueT) \
5789  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5790 
5791 #else // #if VMA_USE_STL_UNORDERED_MAP
5792 
5793 template<typename T1, typename T2>
5794 struct VmaPair
5795 {
5796  T1 first;
5797  T2 second;
5798 
5799  VmaPair() : first(), second() { }
5800  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5801 };
5802 
5803 /* Class compatible with subset of interface of std::unordered_map.
5804 KeyT, ValueT must be POD because they will be stored in VmaVector.
5805 */
5806 template<typename KeyT, typename ValueT>
5807 class VmaMap
5808 {
5809 public:
5810  typedef VmaPair<KeyT, ValueT> PairType;
5811  typedef PairType* iterator;
5812 
5813  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5814 
5815  iterator begin() { return m_Vector.begin(); }
5816  iterator end() { return m_Vector.end(); }
5817 
5818  void insert(const PairType& pair);
5819  iterator find(const KeyT& key);
5820  void erase(iterator it);
5821 
5822 private:
5823  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5824 };
5825 
5826 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5827 
5828 template<typename FirstT, typename SecondT>
5829 struct VmaPairFirstLess
5830 {
5831  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5832  {
5833  return lhs.first < rhs.first;
5834  }
5835  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5836  {
5837  return lhs.first < rhsFirst;
5838  }
5839 };
5840 
5841 template<typename KeyT, typename ValueT>
5842 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5843 {
5844  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5845  m_Vector.data(),
5846  m_Vector.data() + m_Vector.size(),
5847  pair,
5848  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5849  VmaVectorInsert(m_Vector, indexToInsert, pair);
5850 }
5851 
5852 template<typename KeyT, typename ValueT>
5853 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5854 {
5855  PairType* it = VmaBinaryFindFirstNotLess(
5856  m_Vector.data(),
5857  m_Vector.data() + m_Vector.size(),
5858  key,
5859  VmaPairFirstLess<KeyT, ValueT>());
5860  if((it != m_Vector.end()) && (it->first == key))
5861  {
5862  return it;
5863  }
5864  else
5865  {
5866  return m_Vector.end();
5867  }
5868 }
5869 
5870 template<typename KeyT, typename ValueT>
5871 void VmaMap<KeyT, ValueT>::erase(iterator it)
5872 {
5873  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5874 }
5875 
5876 #endif // #if VMA_USE_STL_UNORDERED_MAP
5877 
5878 #endif // #if 0
5879 
5881 
5882 class VmaDeviceMemoryBlock;
5883 
5884 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5885 
5886 struct VmaAllocation_T
5887 {
5888 private:
5889  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5890 
5891  enum FLAGS
5892  {
5893  FLAG_USER_DATA_STRING = 0x01,
5894  };
5895 
5896 public:
5897  enum ALLOCATION_TYPE
5898  {
5899  ALLOCATION_TYPE_NONE,
5900  ALLOCATION_TYPE_BLOCK,
5901  ALLOCATION_TYPE_DEDICATED,
5902  };
5903 
5904  /*
5905  This struct is allocated using VmaPoolAllocator.
5906  */
5907 
5908  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5909  m_Alignment{1},
5910  m_Size{0},
5911  m_pUserData{VMA_NULL},
5912  m_LastUseFrameIndex{currentFrameIndex},
5913  m_MemoryTypeIndex{0},
5914  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5915  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5916  m_MapCount{0},
5917  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5918  {
5919 #if VMA_STATS_STRING_ENABLED
5920  m_CreationFrameIndex = currentFrameIndex;
5921  m_BufferImageUsage = 0;
5922 #endif
5923  }
5924 
5925  ~VmaAllocation_T()
5926  {
5927  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5928 
5929  // Check if owned string was freed.
5930  VMA_ASSERT(m_pUserData == VMA_NULL);
5931  }
5932 
5933  void InitBlockAllocation(
5934  VmaDeviceMemoryBlock* block,
5935  VkDeviceSize offset,
5936  VkDeviceSize alignment,
5937  VkDeviceSize size,
5938  uint32_t memoryTypeIndex,
5939  VmaSuballocationType suballocationType,
5940  bool mapped,
5941  bool canBecomeLost)
5942  {
5943  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5944  VMA_ASSERT(block != VMA_NULL);
5945  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5946  m_Alignment = alignment;
5947  m_Size = size;
5948  m_MemoryTypeIndex = memoryTypeIndex;
5949  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5950  m_SuballocationType = (uint8_t)suballocationType;
5951  m_BlockAllocation.m_Block = block;
5952  m_BlockAllocation.m_Offset = offset;
5953  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5954  }
5955 
5956  void InitLost()
5957  {
5958  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5959  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5960  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5961  m_MemoryTypeIndex = 0;
5962  m_BlockAllocation.m_Block = VMA_NULL;
5963  m_BlockAllocation.m_Offset = 0;
5964  m_BlockAllocation.m_CanBecomeLost = true;
5965  }
5966 
5967  void ChangeBlockAllocation(
5968  VmaAllocator hAllocator,
5969  VmaDeviceMemoryBlock* block,
5970  VkDeviceSize offset);
5971 
5972  void ChangeOffset(VkDeviceSize newOffset);
5973 
5974  // pMappedData not null means allocation is created with MAPPED flag.
5975  void InitDedicatedAllocation(
5976  uint32_t memoryTypeIndex,
5977  VkDeviceMemory hMemory,
5978  VmaSuballocationType suballocationType,
5979  void* pMappedData,
5980  VkDeviceSize size)
5981  {
5982  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5983  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5984  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5985  m_Alignment = 0;
5986  m_Size = size;
5987  m_MemoryTypeIndex = memoryTypeIndex;
5988  m_SuballocationType = (uint8_t)suballocationType;
5989  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5990  m_DedicatedAllocation.m_hMemory = hMemory;
5991  m_DedicatedAllocation.m_pMappedData = pMappedData;
5992  }
5993 
5994  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5995  VkDeviceSize GetAlignment() const { return m_Alignment; }
5996  VkDeviceSize GetSize() const { return m_Size; }
5997  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5998  void* GetUserData() const { return m_pUserData; }
5999  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6000  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6001 
6002  VmaDeviceMemoryBlock* GetBlock() const
6003  {
6004  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6005  return m_BlockAllocation.m_Block;
6006  }
6007  VkDeviceSize GetOffset() const;
6008  VkDeviceMemory GetMemory() const;
6009  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6010  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6011  void* GetMappedData() const;
6012  bool CanBecomeLost() const;
6013 
6014  uint32_t GetLastUseFrameIndex() const
6015  {
6016  return m_LastUseFrameIndex.load();
6017  }
6018  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6019  {
6020  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6021  }
6022  /*
6023  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6024  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6025  - Else, returns false.
6026 
6027  If hAllocation is already lost, assert - you should not call it then.
6028  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6029  */
6030  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6031 
6032  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6033  {
6034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6035  outInfo.blockCount = 1;
6036  outInfo.allocationCount = 1;
6037  outInfo.unusedRangeCount = 0;
6038  outInfo.usedBytes = m_Size;
6039  outInfo.unusedBytes = 0;
6040  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6041  outInfo.unusedRangeSizeMin = UINT64_MAX;
6042  outInfo.unusedRangeSizeMax = 0;
6043  }
6044 
6045  void BlockAllocMap();
6046  void BlockAllocUnmap();
6047  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6048  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6049 
6050 #if VMA_STATS_STRING_ENABLED
6051  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6052  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6053 
6054  void InitBufferImageUsage(uint32_t bufferImageUsage)
6055  {
6056  VMA_ASSERT(m_BufferImageUsage == 0);
6057  m_BufferImageUsage = bufferImageUsage;
6058  }
6059 
6060  void PrintParameters(class VmaJsonWriter& json) const;
6061 #endif
6062 
6063 private:
6064  VkDeviceSize m_Alignment;
6065  VkDeviceSize m_Size;
6066  void* m_pUserData;
6067  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6068  uint32_t m_MemoryTypeIndex;
6069  uint8_t m_Type; // ALLOCATION_TYPE
6070  uint8_t m_SuballocationType; // VmaSuballocationType
6071  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6072  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6073  uint8_t m_MapCount;
6074  uint8_t m_Flags; // enum FLAGS
6075 
6076  // Allocation out of VmaDeviceMemoryBlock.
6077  struct BlockAllocation
6078  {
6079  VmaDeviceMemoryBlock* m_Block;
6080  VkDeviceSize m_Offset;
6081  bool m_CanBecomeLost;
6082  };
6083 
6084  // Allocation for an object that has its own private VkDeviceMemory.
6085  struct DedicatedAllocation
6086  {
6087  VkDeviceMemory m_hMemory;
6088  void* m_pMappedData; // Not null means memory is mapped.
6089  };
6090 
6091  union
6092  {
6093  // Allocation out of VmaDeviceMemoryBlock.
6094  BlockAllocation m_BlockAllocation;
6095  // Allocation for an object that has its own private VkDeviceMemory.
6096  DedicatedAllocation m_DedicatedAllocation;
6097  };
6098 
6099 #if VMA_STATS_STRING_ENABLED
6100  uint32_t m_CreationFrameIndex;
6101  uint32_t m_BufferImageUsage; // 0 if unknown.
6102 #endif
6103 
6104  void FreeUserDataString(VmaAllocator hAllocator);
6105 };
6106 
6107 /*
6108 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6109 allocated memory block or free.
6110 */
6111 struct VmaSuballocation
6112 {
6113  VkDeviceSize offset;
6114  VkDeviceSize size;
6115  VmaAllocation hAllocation;
6116  VmaSuballocationType type;
6117 };
6118 
6119 // Comparator for offsets.
6120 struct VmaSuballocationOffsetLess
6121 {
6122  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6123  {
6124  return lhs.offset < rhs.offset;
6125  }
6126 };
6127 struct VmaSuballocationOffsetGreater
6128 {
6129  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6130  {
6131  return lhs.offset > rhs.offset;
6132  }
6133 };
6134 
6135 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6136 
6137 // Cost of one additional allocation lost, as equivalent in bytes.
6138 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6139 
6140 enum class VmaAllocationRequestType
6141 {
6142  Normal,
6143  // Used by "Linear" algorithm.
6144  UpperAddress,
6145  EndOf1st,
6146  EndOf2nd,
6147 };
6148 
6149 /*
6150 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6151 
6152 If canMakeOtherLost was false:
6153 - item points to a FREE suballocation.
6154 - itemsToMakeLostCount is 0.
6155 
6156 If canMakeOtherLost was true:
6157 - item points to first of sequence of suballocations, which are either FREE,
6158  or point to VmaAllocations that can become lost.
6159 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6160  the requested allocation to succeed.
6161 */
6162 struct VmaAllocationRequest
6163 {
6164  VkDeviceSize offset;
6165  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6166  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6167  VmaSuballocationList::iterator item;
6168  size_t itemsToMakeLostCount;
6169  void* customData;
6170  VmaAllocationRequestType type;
6171 
6172  VkDeviceSize CalcCost() const
6173  {
6174  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6175  }
6176 };
6177 
6178 /*
6179 Data structure used for bookkeeping of allocations and unused ranges of memory
6180 in a single VkDeviceMemory block.
6181 */
6182 class VmaBlockMetadata
6183 {
6184 public:
6185  VmaBlockMetadata(VmaAllocator hAllocator);
6186  virtual ~VmaBlockMetadata() { }
6187  virtual void Init(VkDeviceSize size) { m_Size = size; }
6188 
6189  // Validates all data structures inside this object. If not valid, returns false.
6190  virtual bool Validate() const = 0;
6191  VkDeviceSize GetSize() const { return m_Size; }
6192  virtual size_t GetAllocationCount() const = 0;
6193  virtual VkDeviceSize GetSumFreeSize() const = 0;
6194  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6195  // Returns true if this block is empty - contains only single free suballocation.
6196  virtual bool IsEmpty() const = 0;
6197 
6198  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6199  // Shouldn't modify blockCount.
6200  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6201 
6202 #if VMA_STATS_STRING_ENABLED
6203  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6204 #endif
6205 
6206  // Tries to find a place for suballocation with given parameters inside this block.
6207  // If succeeded, fills pAllocationRequest and returns true.
6208  // If failed, returns false.
6209  virtual bool CreateAllocationRequest(
6210  uint32_t currentFrameIndex,
6211  uint32_t frameInUseCount,
6212  VkDeviceSize bufferImageGranularity,
6213  VkDeviceSize allocSize,
6214  VkDeviceSize allocAlignment,
6215  bool upperAddress,
6216  VmaSuballocationType allocType,
6217  bool canMakeOtherLost,
6218  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6219  uint32_t strategy,
6220  VmaAllocationRequest* pAllocationRequest) = 0;
6221 
6222  virtual bool MakeRequestedAllocationsLost(
6223  uint32_t currentFrameIndex,
6224  uint32_t frameInUseCount,
6225  VmaAllocationRequest* pAllocationRequest) = 0;
6226 
6227  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6228 
6229  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6230 
6231  // Makes actual allocation based on request. Request must already be checked and valid.
6232  virtual void Alloc(
6233  const VmaAllocationRequest& request,
6234  VmaSuballocationType type,
6235  VkDeviceSize allocSize,
6236  VmaAllocation hAllocation) = 0;
6237 
6238  // Frees suballocation assigned to given memory region.
6239  virtual void Free(const VmaAllocation allocation) = 0;
6240  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6241 
6242 protected:
6243  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6244 
6245 #if VMA_STATS_STRING_ENABLED
6246  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6247  VkDeviceSize unusedBytes,
6248  size_t allocationCount,
6249  size_t unusedRangeCount) const;
6250  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6251  VkDeviceSize offset,
6252  VmaAllocation hAllocation) const;
6253  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6254  VkDeviceSize offset,
6255  VkDeviceSize size) const;
6256  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6257 #endif
6258 
6259 private:
6260  VkDeviceSize m_Size;
6261  const VkAllocationCallbacks* m_pAllocationCallbacks;
6262 };
6263 
6264 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6265  VMA_ASSERT(0 && "Validation failed: " #cond); \
6266  return false; \
6267  } } while(false)
6268 
6269 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6270 {
6271  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6272 public:
6273  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6274  virtual ~VmaBlockMetadata_Generic();
6275  virtual void Init(VkDeviceSize size);
6276 
6277  virtual bool Validate() const;
6278  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6279  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6280  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6281  virtual bool IsEmpty() const;
6282 
6283  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6284  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6285 
6286 #if VMA_STATS_STRING_ENABLED
6287  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6288 #endif
6289 
6290  virtual bool CreateAllocationRequest(
6291  uint32_t currentFrameIndex,
6292  uint32_t frameInUseCount,
6293  VkDeviceSize bufferImageGranularity,
6294  VkDeviceSize allocSize,
6295  VkDeviceSize allocAlignment,
6296  bool upperAddress,
6297  VmaSuballocationType allocType,
6298  bool canMakeOtherLost,
6299  uint32_t strategy,
6300  VmaAllocationRequest* pAllocationRequest);
6301 
6302  virtual bool MakeRequestedAllocationsLost(
6303  uint32_t currentFrameIndex,
6304  uint32_t frameInUseCount,
6305  VmaAllocationRequest* pAllocationRequest);
6306 
6307  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6308 
6309  virtual VkResult CheckCorruption(const void* pBlockData);
6310 
6311  virtual void Alloc(
6312  const VmaAllocationRequest& request,
6313  VmaSuballocationType type,
6314  VkDeviceSize allocSize,
6315  VmaAllocation hAllocation);
6316 
6317  virtual void Free(const VmaAllocation allocation);
6318  virtual void FreeAtOffset(VkDeviceSize offset);
6319 
6321  // For defragmentation
6322 
6323  bool IsBufferImageGranularityConflictPossible(
6324  VkDeviceSize bufferImageGranularity,
6325  VmaSuballocationType& inOutPrevSuballocType) const;
6326 
6327 private:
6328  friend class VmaDefragmentationAlgorithm_Generic;
6329  friend class VmaDefragmentationAlgorithm_Fast;
6330 
6331  uint32_t m_FreeCount;
6332  VkDeviceSize m_SumFreeSize;
6333  VmaSuballocationList m_Suballocations;
6334  // Suballocations that are free and have size greater than certain threshold.
6335  // Sorted by size, ascending.
6336  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6337 
6338  bool ValidateFreeSuballocationList() const;
6339 
6340  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6341  // If yes, fills pOffset and returns true. If no, returns false.
6342  bool CheckAllocation(
6343  uint32_t currentFrameIndex,
6344  uint32_t frameInUseCount,
6345  VkDeviceSize bufferImageGranularity,
6346  VkDeviceSize allocSize,
6347  VkDeviceSize allocAlignment,
6348  VmaSuballocationType allocType,
6349  VmaSuballocationList::const_iterator suballocItem,
6350  bool canMakeOtherLost,
6351  VkDeviceSize* pOffset,
6352  size_t* itemsToMakeLostCount,
6353  VkDeviceSize* pSumFreeSize,
6354  VkDeviceSize* pSumItemSize) const;
6355  // Given free suballocation, it merges it with following one, which must also be free.
6356  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6357  // Releases given suballocation, making it free.
6358  // Merges it with adjacent free suballocations if applicable.
6359  // Returns iterator to new free suballocation at this place.
6360  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6361  // Given free suballocation, it inserts it into sorted list of
6362  // m_FreeSuballocationsBySize if it's suitable.
6363  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6364  // Given free suballocation, it removes it from sorted list of
6365  // m_FreeSuballocationsBySize if it's suitable.
6366  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6367 };
6368 
6369 /*
6370 Allocations and their references in internal data structure look like this:
6371 
6372 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6373 
6374  0 +-------+
6375  | |
6376  | |
6377  | |
6378  +-------+
6379  | Alloc | 1st[m_1stNullItemsBeginCount]
6380  +-------+
6381  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6382  +-------+
6383  | ... |
6384  +-------+
6385  | Alloc | 1st[1st.size() - 1]
6386  +-------+
6387  | |
6388  | |
6389  | |
6390 GetSize() +-------+
6391 
6392 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6393 
6394  0 +-------+
6395  | Alloc | 2nd[0]
6396  +-------+
6397  | Alloc | 2nd[1]
6398  +-------+
6399  | ... |
6400  +-------+
6401  | Alloc | 2nd[2nd.size() - 1]
6402  +-------+
6403  | |
6404  | |
6405  | |
6406  +-------+
6407  | Alloc | 1st[m_1stNullItemsBeginCount]
6408  +-------+
6409  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6410  +-------+
6411  | ... |
6412  +-------+
6413  | Alloc | 1st[1st.size() - 1]
6414  +-------+
6415  | |
6416 GetSize() +-------+
6417 
6418 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6419 
6420  0 +-------+
6421  | |
6422  | |
6423  | |
6424  +-------+
6425  | Alloc | 1st[m_1stNullItemsBeginCount]
6426  +-------+
6427  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6428  +-------+
6429  | ... |
6430  +-------+
6431  | Alloc | 1st[1st.size() - 1]
6432  +-------+
6433  | |
6434  | |
6435  | |
6436  +-------+
6437  | Alloc | 2nd[2nd.size() - 1]
6438  +-------+
6439  | ... |
6440  +-------+
6441  | Alloc | 2nd[1]
6442  +-------+
6443  | Alloc | 2nd[0]
6444 GetSize() +-------+
6445 
6446 */
6447 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6448 {
6449  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6450 public:
6451  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6452  virtual ~VmaBlockMetadata_Linear();
6453  virtual void Init(VkDeviceSize size);
6454 
6455  virtual bool Validate() const;
6456  virtual size_t GetAllocationCount() const;
6457  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6458  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6459  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6460 
6461  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6462  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6463 
6464 #if VMA_STATS_STRING_ENABLED
6465  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6466 #endif
6467 
6468  virtual bool CreateAllocationRequest(
6469  uint32_t currentFrameIndex,
6470  uint32_t frameInUseCount,
6471  VkDeviceSize bufferImageGranularity,
6472  VkDeviceSize allocSize,
6473  VkDeviceSize allocAlignment,
6474  bool upperAddress,
6475  VmaSuballocationType allocType,
6476  bool canMakeOtherLost,
6477  uint32_t strategy,
6478  VmaAllocationRequest* pAllocationRequest);
6479 
6480  virtual bool MakeRequestedAllocationsLost(
6481  uint32_t currentFrameIndex,
6482  uint32_t frameInUseCount,
6483  VmaAllocationRequest* pAllocationRequest);
6484 
6485  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6486 
6487  virtual VkResult CheckCorruption(const void* pBlockData);
6488 
6489  virtual void Alloc(
6490  const VmaAllocationRequest& request,
6491  VmaSuballocationType type,
6492  VkDeviceSize allocSize,
6493  VmaAllocation hAllocation);
6494 
6495  virtual void Free(const VmaAllocation allocation);
6496  virtual void FreeAtOffset(VkDeviceSize offset);
6497 
6498 private:
6499  /*
6500  There are two suballocation vectors, used in ping-pong way.
6501  The one with index m_1stVectorIndex is called 1st.
6502  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6503  2nd can be non-empty only when 1st is not empty.
6504  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6505  */
6506  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6507 
6508  enum SECOND_VECTOR_MODE
6509  {
6510  SECOND_VECTOR_EMPTY,
6511  /*
6512  Suballocations in 2nd vector are created later than the ones in 1st, but they
6513  all have smaller offset.
6514  */
6515  SECOND_VECTOR_RING_BUFFER,
6516  /*
6517  Suballocations in 2nd vector are upper side of double stack.
6518  They all have offsets higher than those in 1st vector.
6519  Top of this stack means smaller offsets, but higher indices in this vector.
6520  */
6521  SECOND_VECTOR_DOUBLE_STACK,
6522  };
6523 
6524  VkDeviceSize m_SumFreeSize;
6525  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6526  uint32_t m_1stVectorIndex;
6527  SECOND_VECTOR_MODE m_2ndVectorMode;
6528 
6529  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6530  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6531  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6532  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6533 
6534  // Number of items in 1st vector with hAllocation = null at the beginning.
6535  size_t m_1stNullItemsBeginCount;
6536  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6537  size_t m_1stNullItemsMiddleCount;
6538  // Number of items in 2nd vector with hAllocation = null.
6539  size_t m_2ndNullItemsCount;
6540 
6541  bool ShouldCompact1st() const;
6542  void CleanupAfterFree();
6543 
6544  bool CreateAllocationRequest_LowerAddress(
6545  uint32_t currentFrameIndex,
6546  uint32_t frameInUseCount,
6547  VkDeviceSize bufferImageGranularity,
6548  VkDeviceSize allocSize,
6549  VkDeviceSize allocAlignment,
6550  VmaSuballocationType allocType,
6551  bool canMakeOtherLost,
6552  uint32_t strategy,
6553  VmaAllocationRequest* pAllocationRequest);
6554  bool CreateAllocationRequest_UpperAddress(
6555  uint32_t currentFrameIndex,
6556  uint32_t frameInUseCount,
6557  VkDeviceSize bufferImageGranularity,
6558  VkDeviceSize allocSize,
6559  VkDeviceSize allocAlignment,
6560  VmaSuballocationType allocType,
6561  bool canMakeOtherLost,
6562  uint32_t strategy,
6563  VmaAllocationRequest* pAllocationRequest);
6564 };
6565 
6566 /*
6567 - GetSize() is the original size of allocated memory block.
6568 - m_UsableSize is this size aligned down to a power of two.
6569  All allocations and calculations happen relative to m_UsableSize.
6570 - GetUnusableSize() is the difference between them.
6571  It is repoted as separate, unused range, not available for allocations.
6572 
6573 Node at level 0 has size = m_UsableSize.
6574 Each next level contains nodes with size 2 times smaller than current level.
6575 m_LevelCount is the maximum number of levels to use in the current object.
6576 */
6577 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6578 {
6579  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6580 public:
6581  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6582  virtual ~VmaBlockMetadata_Buddy();
6583  virtual void Init(VkDeviceSize size);
6584 
6585  virtual bool Validate() const;
6586  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6587  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6588  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6589  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6590 
6591  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6592  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6593 
6594 #if VMA_STATS_STRING_ENABLED
6595  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6596 #endif
6597 
6598  virtual bool CreateAllocationRequest(
6599  uint32_t currentFrameIndex,
6600  uint32_t frameInUseCount,
6601  VkDeviceSize bufferImageGranularity,
6602  VkDeviceSize allocSize,
6603  VkDeviceSize allocAlignment,
6604  bool upperAddress,
6605  VmaSuballocationType allocType,
6606  bool canMakeOtherLost,
6607  uint32_t strategy,
6608  VmaAllocationRequest* pAllocationRequest);
6609 
6610  virtual bool MakeRequestedAllocationsLost(
6611  uint32_t currentFrameIndex,
6612  uint32_t frameInUseCount,
6613  VmaAllocationRequest* pAllocationRequest);
6614 
6615  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6616 
6617  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6618 
6619  virtual void Alloc(
6620  const VmaAllocationRequest& request,
6621  VmaSuballocationType type,
6622  VkDeviceSize allocSize,
6623  VmaAllocation hAllocation);
6624 
6625  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6626  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6627 
6628 private:
6629  static const VkDeviceSize MIN_NODE_SIZE = 32;
6630  static const size_t MAX_LEVELS = 30;
6631 
6632  struct ValidationContext
6633  {
6634  size_t calculatedAllocationCount;
6635  size_t calculatedFreeCount;
6636  VkDeviceSize calculatedSumFreeSize;
6637 
6638  ValidationContext() :
6639  calculatedAllocationCount(0),
6640  calculatedFreeCount(0),
6641  calculatedSumFreeSize(0) { }
6642  };
6643 
6644  struct Node
6645  {
6646  VkDeviceSize offset;
6647  enum TYPE
6648  {
6649  TYPE_FREE,
6650  TYPE_ALLOCATION,
6651  TYPE_SPLIT,
6652  TYPE_COUNT
6653  } type;
6654  Node* parent;
6655  Node* buddy;
6656 
6657  union
6658  {
6659  struct
6660  {
6661  Node* prev;
6662  Node* next;
6663  } free;
6664  struct
6665  {
6666  VmaAllocation alloc;
6667  } allocation;
6668  struct
6669  {
6670  Node* leftChild;
6671  } split;
6672  };
6673  };
6674 
6675  // Size of the memory block aligned down to a power of two.
6676  VkDeviceSize m_UsableSize;
6677  uint32_t m_LevelCount;
6678 
6679  Node* m_Root;
6680  struct {
6681  Node* front;
6682  Node* back;
6683  } m_FreeList[MAX_LEVELS];
6684  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6685  size_t m_AllocationCount;
6686  // Number of nodes in the tree with type == TYPE_FREE.
6687  size_t m_FreeCount;
6688  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6689  VkDeviceSize m_SumFreeSize;
6690 
6691  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6692  void DeleteNode(Node* node);
6693  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6694  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6695  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6696  // Alloc passed just for validation. Can be null.
6697  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6698  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6699  // Adds node to the front of FreeList at given level.
6700  // node->type must be FREE.
6701  // node->free.prev, next can be undefined.
6702  void AddToFreeListFront(uint32_t level, Node* node);
6703  // Removes node from FreeList at given level.
6704  // node->type must be FREE.
6705  // node->free.prev, next stay untouched.
6706  void RemoveFromFreeList(uint32_t level, Node* node);
6707 
6708 #if VMA_STATS_STRING_ENABLED
6709  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6710 #endif
6711 };
6712 
6713 /*
6714 Represents a single block of device memory (`VkDeviceMemory`) with all the
6715 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6716 
6717 Thread-safety: This class must be externally synchronized.
6718 */
6719 class VmaDeviceMemoryBlock
6720 {
6721  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6722 public:
6723  VmaBlockMetadata* m_pMetadata;
6724 
6725  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6726 
6727  ~VmaDeviceMemoryBlock()
6728  {
6729  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6730  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6731  }
6732 
6733  // Always call after construction.
6734  void Init(
6735  VmaAllocator hAllocator,
6736  VmaPool hParentPool,
6737  uint32_t newMemoryTypeIndex,
6738  VkDeviceMemory newMemory,
6739  VkDeviceSize newSize,
6740  uint32_t id,
6741  uint32_t algorithm);
6742  // Always call before destruction.
6743  void Destroy(VmaAllocator allocator);
6744 
6745  VmaPool GetParentPool() const { return m_hParentPool; }
6746  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6747  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6748  uint32_t GetId() const { return m_Id; }
6749  void* GetMappedData() const { return m_pMappedData; }
6750 
6751  // Validates all data structures inside this object. If not valid, returns false.
6752  bool Validate() const;
6753 
6754  VkResult CheckCorruption(VmaAllocator hAllocator);
6755 
6756  // ppData can be null.
6757  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6758  void Unmap(VmaAllocator hAllocator, uint32_t count);
6759 
6760  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6761  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6762 
6763  VkResult BindBufferMemory(
6764  const VmaAllocator hAllocator,
6765  const VmaAllocation hAllocation,
6766  VkDeviceSize allocationLocalOffset,
6767  VkBuffer hBuffer,
6768  const void* pNext);
6769  VkResult BindImageMemory(
6770  const VmaAllocator hAllocator,
6771  const VmaAllocation hAllocation,
6772  VkDeviceSize allocationLocalOffset,
6773  VkImage hImage,
6774  const void* pNext);
6775 
6776 private:
6777  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6778  uint32_t m_MemoryTypeIndex;
6779  uint32_t m_Id;
6780  VkDeviceMemory m_hMemory;
6781 
6782  /*
6783  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6784  Also protects m_MapCount, m_pMappedData.
6785  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6786  */
6787  VMA_MUTEX m_Mutex;
6788  uint32_t m_MapCount;
6789  void* m_pMappedData;
6790 };
6791 
6792 struct VmaPointerLess
6793 {
6794  bool operator()(const void* lhs, const void* rhs) const
6795  {
6796  return lhs < rhs;
6797  }
6798 };
6799 
6800 struct VmaDefragmentationMove
6801 {
6802  size_t srcBlockIndex;
6803  size_t dstBlockIndex;
6804  VkDeviceSize srcOffset;
6805  VkDeviceSize dstOffset;
6806  VkDeviceSize size;
6807  VmaAllocation hAllocation;
6808  VmaDeviceMemoryBlock* pSrcBlock;
6809  VmaDeviceMemoryBlock* pDstBlock;
6810 };
6811 
6812 class VmaDefragmentationAlgorithm;
6813 
6814 /*
6815 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6816 Vulkan memory type.
6817 
6818 Synchronized internally with a mutex.
6819 */
6820 struct VmaBlockVector
6821 {
6822  VMA_CLASS_NO_COPY(VmaBlockVector)
6823 public:
6824  VmaBlockVector(
6825  VmaAllocator hAllocator,
6826  VmaPool hParentPool,
6827  uint32_t memoryTypeIndex,
6828  VkDeviceSize preferredBlockSize,
6829  size_t minBlockCount,
6830  size_t maxBlockCount,
6831  VkDeviceSize bufferImageGranularity,
6832  uint32_t frameInUseCount,
6833  bool explicitBlockSize,
6834  uint32_t algorithm);
6835  ~VmaBlockVector();
6836 
6837  VkResult CreateMinBlocks();
6838 
6839  VmaAllocator GetAllocator() const { return m_hAllocator; }
6840  VmaPool GetParentPool() const { return m_hParentPool; }
6841  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6842  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6843  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6844  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6845  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6846  uint32_t GetAlgorithm() const { return m_Algorithm; }
6847 
6848  void GetPoolStats(VmaPoolStats* pStats);
6849 
6850  bool IsEmpty();
6851  bool IsCorruptionDetectionEnabled() const;
6852 
6853  VkResult Allocate(
6854  uint32_t currentFrameIndex,
6855  VkDeviceSize size,
6856  VkDeviceSize alignment,
6857  const VmaAllocationCreateInfo& createInfo,
6858  VmaSuballocationType suballocType,
6859  size_t allocationCount,
6860  VmaAllocation* pAllocations);
6861 
6862  void Free(const VmaAllocation hAllocation);
6863 
6864  // Adds statistics of this BlockVector to pStats.
6865  void AddStats(VmaStats* pStats);
6866 
6867 #if VMA_STATS_STRING_ENABLED
6868  void PrintDetailedMap(class VmaJsonWriter& json);
6869 #endif
6870 
6871  void MakePoolAllocationsLost(
6872  uint32_t currentFrameIndex,
6873  size_t* pLostAllocationCount);
6874  VkResult CheckCorruption();
6875 
6876  // Saves results in pCtx->res.
6877  void Defragment(
6878  class VmaBlockVectorDefragmentationContext* pCtx,
6880  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6881  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6882  VkCommandBuffer commandBuffer);
6883  void DefragmentationEnd(
6884  class VmaBlockVectorDefragmentationContext* pCtx,
6885  uint32_t flags,
6886  VmaDefragmentationStats* pStats);
6887 
6888  uint32_t ProcessDefragmentations(
6889  class VmaBlockVectorDefragmentationContext *pCtx,
6890  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6891 
6892  void CommitDefragmentations(
6893  class VmaBlockVectorDefragmentationContext *pCtx,
6894  VmaDefragmentationStats* pStats);
6895 
6897  // To be used only while the m_Mutex is locked. Used during defragmentation.
6898 
6899  size_t GetBlockCount() const { return m_Blocks.size(); }
6900  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6901  size_t CalcAllocationCount() const;
6902  bool IsBufferImageGranularityConflictPossible() const;
6903 
6904 private:
6905  friend class VmaDefragmentationAlgorithm_Generic;
6906 
6907  const VmaAllocator m_hAllocator;
6908  const VmaPool m_hParentPool;
6909  const uint32_t m_MemoryTypeIndex;
6910  const VkDeviceSize m_PreferredBlockSize;
6911  const size_t m_MinBlockCount;
6912  const size_t m_MaxBlockCount;
6913  const VkDeviceSize m_BufferImageGranularity;
6914  const uint32_t m_FrameInUseCount;
6915  const bool m_ExplicitBlockSize;
6916  const uint32_t m_Algorithm;
6917  VMA_RW_MUTEX m_Mutex;
6918 
6919  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6920  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6921  bool m_HasEmptyBlock;
6922  // Incrementally sorted by sumFreeSize, ascending.
6923  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6924  uint32_t m_NextBlockId;
6925 
6926  VkDeviceSize CalcMaxBlockSize() const;
6927 
6928  // Finds and removes given block from vector.
6929  void Remove(VmaDeviceMemoryBlock* pBlock);
6930 
6931  // Performs single step in sorting m_Blocks. They may not be fully sorted
6932  // after this call.
6933  void IncrementallySortBlocks();
6934 
6935  VkResult AllocatePage(
6936  uint32_t currentFrameIndex,
6937  VkDeviceSize size,
6938  VkDeviceSize alignment,
6939  const VmaAllocationCreateInfo& createInfo,
6940  VmaSuballocationType suballocType,
6941  VmaAllocation* pAllocation);
6942 
6943  // To be used only without CAN_MAKE_OTHER_LOST flag.
6944  VkResult AllocateFromBlock(
6945  VmaDeviceMemoryBlock* pBlock,
6946  uint32_t currentFrameIndex,
6947  VkDeviceSize size,
6948  VkDeviceSize alignment,
6949  VmaAllocationCreateFlags allocFlags,
6950  void* pUserData,
6951  VmaSuballocationType suballocType,
6952  uint32_t strategy,
6953  VmaAllocation* pAllocation);
6954 
6955  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6956 
6957  // Saves result to pCtx->res.
6958  void ApplyDefragmentationMovesCpu(
6959  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6960  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6961  // Saves result to pCtx->res.
6962  void ApplyDefragmentationMovesGpu(
6963  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6964  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6965  VkCommandBuffer commandBuffer);
6966 
6967  /*
6968  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6969  - updated with new data.
6970  */
6971  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6972 
6973  void UpdateHasEmptyBlock();
6974 };
6975 
6976 struct VmaPool_T
6977 {
6978  VMA_CLASS_NO_COPY(VmaPool_T)
6979 public:
6980  VmaBlockVector m_BlockVector;
6981 
6982  VmaPool_T(
6983  VmaAllocator hAllocator,
6984  const VmaPoolCreateInfo& createInfo,
6985  VkDeviceSize preferredBlockSize);
6986  ~VmaPool_T();
6987 
6988  uint32_t GetId() const { return m_Id; }
6989  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6990 
6991  const char* GetName() const { return m_Name; }
6992  void SetName(const char* pName);
6993 
6994 #if VMA_STATS_STRING_ENABLED
6995  //void PrintDetailedMap(class VmaStringBuilder& sb);
6996 #endif
6997 
6998 private:
6999  uint32_t m_Id;
7000  char* m_Name;
7001 };
7002 
7003 /*
7004 Performs defragmentation:
7005 
7006 - Updates `pBlockVector->m_pMetadata`.
7007 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7008 - Does not move actual data, only returns requested moves as `moves`.
7009 */
7010 class VmaDefragmentationAlgorithm
7011 {
7012  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7013 public:
7014  VmaDefragmentationAlgorithm(
7015  VmaAllocator hAllocator,
7016  VmaBlockVector* pBlockVector,
7017  uint32_t currentFrameIndex) :
7018  m_hAllocator(hAllocator),
7019  m_pBlockVector(pBlockVector),
7020  m_CurrentFrameIndex(currentFrameIndex)
7021  {
7022  }
7023  virtual ~VmaDefragmentationAlgorithm()
7024  {
7025  }
7026 
7027  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7028  virtual void AddAll() = 0;
7029 
7030  virtual VkResult Defragment(
7031  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7032  VkDeviceSize maxBytesToMove,
7033  uint32_t maxAllocationsToMove,
7034  VmaDefragmentationFlags flags) = 0;
7035 
7036  virtual VkDeviceSize GetBytesMoved() const = 0;
7037  virtual uint32_t GetAllocationsMoved() const = 0;
7038 
7039 protected:
7040  VmaAllocator const m_hAllocator;
7041  VmaBlockVector* const m_pBlockVector;
7042  const uint32_t m_CurrentFrameIndex;
7043 
7044  struct AllocationInfo
7045  {
7046  VmaAllocation m_hAllocation;
7047  VkBool32* m_pChanged;
7048 
7049  AllocationInfo() :
7050  m_hAllocation(VK_NULL_HANDLE),
7051  m_pChanged(VMA_NULL)
7052  {
7053  }
7054  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7055  m_hAllocation(hAlloc),
7056  m_pChanged(pChanged)
7057  {
7058  }
7059  };
7060 };
7061 
7062 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7063 {
7064  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7065 public:
7066  VmaDefragmentationAlgorithm_Generic(
7067  VmaAllocator hAllocator,
7068  VmaBlockVector* pBlockVector,
7069  uint32_t currentFrameIndex,
7070  bool overlappingMoveSupported);
7071  virtual ~VmaDefragmentationAlgorithm_Generic();
7072 
7073  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7074  virtual void AddAll() { m_AllAllocations = true; }
7075 
7076  virtual VkResult Defragment(
7077  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7078  VkDeviceSize maxBytesToMove,
7079  uint32_t maxAllocationsToMove,
7080  VmaDefragmentationFlags flags);
7081 
7082  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7083  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7084 
7085 private:
7086  uint32_t m_AllocationCount;
7087  bool m_AllAllocations;
7088 
7089  VkDeviceSize m_BytesMoved;
7090  uint32_t m_AllocationsMoved;
7091 
7092  struct AllocationInfoSizeGreater
7093  {
7094  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7095  {
7096  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7097  }
7098  };
7099 
7100  struct AllocationInfoOffsetGreater
7101  {
7102  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7103  {
7104  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7105  }
7106  };
7107 
7108  struct BlockInfo
7109  {
7110  size_t m_OriginalBlockIndex;
7111  VmaDeviceMemoryBlock* m_pBlock;
7112  bool m_HasNonMovableAllocations;
7113  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7114 
7115  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7116  m_OriginalBlockIndex(SIZE_MAX),
7117  m_pBlock(VMA_NULL),
7118  m_HasNonMovableAllocations(true),
7119  m_Allocations(pAllocationCallbacks)
7120  {
7121  }
7122 
7123  void CalcHasNonMovableAllocations()
7124  {
7125  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7126  const size_t defragmentAllocCount = m_Allocations.size();
7127  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7128  }
7129 
7130  void SortAllocationsBySizeDescending()
7131  {
7132  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7133  }
7134 
7135  void SortAllocationsByOffsetDescending()
7136  {
7137  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7138  }
7139  };
7140 
7141  struct BlockPointerLess
7142  {
7143  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7144  {
7145  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7146  }
7147  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7148  {
7149  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7150  }
7151  };
7152 
7153  // 1. Blocks with some non-movable allocations go first.
7154  // 2. Blocks with smaller sumFreeSize go first.
7155  struct BlockInfoCompareMoveDestination
7156  {
7157  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7158  {
7159  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7160  {
7161  return true;
7162  }
7163  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7164  {
7165  return false;
7166  }
7167  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7168  {
7169  return true;
7170  }
7171  return false;
7172  }
7173  };
7174 
7175  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7176  BlockInfoVector m_Blocks;
7177 
7178  VkResult DefragmentRound(
7179  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7180  VkDeviceSize maxBytesToMove,
7181  uint32_t maxAllocationsToMove,
7182  bool freeOldAllocations);
7183 
7184  size_t CalcBlocksWithNonMovableCount() const;
7185 
7186  static bool MoveMakesSense(
7187  size_t dstBlockIndex, VkDeviceSize dstOffset,
7188  size_t srcBlockIndex, VkDeviceSize srcOffset);
7189 };
7190 
7191 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7192 {
7193  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7194 public:
7195  VmaDefragmentationAlgorithm_Fast(
7196  VmaAllocator hAllocator,
7197  VmaBlockVector* pBlockVector,
7198  uint32_t currentFrameIndex,
7199  bool overlappingMoveSupported);
7200  virtual ~VmaDefragmentationAlgorithm_Fast();
7201 
7202  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7203  virtual void AddAll() { m_AllAllocations = true; }
7204 
7205  virtual VkResult Defragment(
7206  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7207  VkDeviceSize maxBytesToMove,
7208  uint32_t maxAllocationsToMove,
7209  VmaDefragmentationFlags flags);
7210 
7211  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7212  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7213 
7214 private:
7215  struct BlockInfo
7216  {
7217  size_t origBlockIndex;
7218  };
7219 
7220  class FreeSpaceDatabase
7221  {
7222  public:
7223  FreeSpaceDatabase()
7224  {
7225  FreeSpace s = {};
7226  s.blockInfoIndex = SIZE_MAX;
7227  for(size_t i = 0; i < MAX_COUNT; ++i)
7228  {
7229  m_FreeSpaces[i] = s;
7230  }
7231  }
7232 
7233  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7234  {
7235  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7236  {
7237  return;
7238  }
7239 
7240  // Find first invalid or the smallest structure.
7241  size_t bestIndex = SIZE_MAX;
7242  for(size_t i = 0; i < MAX_COUNT; ++i)
7243  {
7244  // Empty structure.
7245  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7246  {
7247  bestIndex = i;
7248  break;
7249  }
7250  if(m_FreeSpaces[i].size < size &&
7251  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7252  {
7253  bestIndex = i;
7254  }
7255  }
7256 
7257  if(bestIndex != SIZE_MAX)
7258  {
7259  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7260  m_FreeSpaces[bestIndex].offset = offset;
7261  m_FreeSpaces[bestIndex].size = size;
7262  }
7263  }
7264 
7265  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7266  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7267  {
7268  size_t bestIndex = SIZE_MAX;
7269  VkDeviceSize bestFreeSpaceAfter = 0;
7270  for(size_t i = 0; i < MAX_COUNT; ++i)
7271  {
7272  // Structure is valid.
7273  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7274  {
7275  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7276  // Allocation fits into this structure.
7277  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7278  {
7279  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7280  (dstOffset + size);
7281  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7282  {
7283  bestIndex = i;
7284  bestFreeSpaceAfter = freeSpaceAfter;
7285  }
7286  }
7287  }
7288  }
7289 
7290  if(bestIndex != SIZE_MAX)
7291  {
7292  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7293  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7294 
7295  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7296  {
7297  // Leave this structure for remaining empty space.
7298  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7299  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7300  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7301  }
7302  else
7303  {
7304  // This structure becomes invalid.
7305  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7306  }
7307 
7308  return true;
7309  }
7310 
7311  return false;
7312  }
7313 
7314  private:
7315  static const size_t MAX_COUNT = 4;
7316 
7317  struct FreeSpace
7318  {
7319  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7320  VkDeviceSize offset;
7321  VkDeviceSize size;
7322  } m_FreeSpaces[MAX_COUNT];
7323  };
7324 
7325  const bool m_OverlappingMoveSupported;
7326 
7327  uint32_t m_AllocationCount;
7328  bool m_AllAllocations;
7329 
7330  VkDeviceSize m_BytesMoved;
7331  uint32_t m_AllocationsMoved;
7332 
7333  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7334 
7335  void PreprocessMetadata();
7336  void PostprocessMetadata();
7337  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7338 };
7339 
7340 struct VmaBlockDefragmentationContext
7341 {
7342  enum BLOCK_FLAG
7343  {
7344  BLOCK_FLAG_USED = 0x00000001,
7345  };
7346  uint32_t flags;
7347  VkBuffer hBuffer;
7348 };
7349 
7350 class VmaBlockVectorDefragmentationContext
7351 {
7352  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7353 public:
7354  VkResult res;
7355  bool mutexLocked;
7356  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7357  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7358  uint32_t defragmentationMovesProcessed;
7359  uint32_t defragmentationMovesCommitted;
7360  bool hasDefragmentationPlan;
7361 
7362  VmaBlockVectorDefragmentationContext(
7363  VmaAllocator hAllocator,
7364  VmaPool hCustomPool, // Optional.
7365  VmaBlockVector* pBlockVector,
7366  uint32_t currFrameIndex);
7367  ~VmaBlockVectorDefragmentationContext();
7368 
7369  VmaPool GetCustomPool() const { return m_hCustomPool; }
7370  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7371  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7372 
7373  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7374  void AddAll() { m_AllAllocations = true; }
7375 
7376  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7377 
7378 private:
7379  const VmaAllocator m_hAllocator;
7380  // Null if not from custom pool.
7381  const VmaPool m_hCustomPool;
7382  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7383  VmaBlockVector* const m_pBlockVector;
7384  const uint32_t m_CurrFrameIndex;
7385  // Owner of this object.
7386  VmaDefragmentationAlgorithm* m_pAlgorithm;
7387 
7388  struct AllocInfo
7389  {
7390  VmaAllocation hAlloc;
7391  VkBool32* pChanged;
7392  };
7393  // Used between constructor and Begin.
7394  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7395  bool m_AllAllocations;
7396 };
7397 
7398 struct VmaDefragmentationContext_T
7399 {
7400 private:
7401  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7402 public:
7403  VmaDefragmentationContext_T(
7404  VmaAllocator hAllocator,
7405  uint32_t currFrameIndex,
7406  uint32_t flags,
7407  VmaDefragmentationStats* pStats);
7408  ~VmaDefragmentationContext_T();
7409 
7410  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7411  void AddAllocations(
7412  uint32_t allocationCount,
7413  const VmaAllocation* pAllocations,
7414  VkBool32* pAllocationsChanged);
7415 
7416  /*
7417  Returns:
7418  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7419  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7420  - Negative value if error occured and object can be destroyed immediately.
7421  */
7422  VkResult Defragment(
7423  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7424  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7425  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7426 
7427  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7428  VkResult DefragmentPassEnd();
7429 
7430 private:
7431  const VmaAllocator m_hAllocator;
7432  const uint32_t m_CurrFrameIndex;
7433  const uint32_t m_Flags;
7434  VmaDefragmentationStats* const m_pStats;
7435 
7436  VkDeviceSize m_MaxCpuBytesToMove;
7437  uint32_t m_MaxCpuAllocationsToMove;
7438  VkDeviceSize m_MaxGpuBytesToMove;
7439  uint32_t m_MaxGpuAllocationsToMove;
7440 
7441  // Owner of these objects.
7442  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7443  // Owner of these objects.
7444  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7445 };
7446 
7447 #if VMA_RECORDING_ENABLED
7448 
7449 class VmaRecorder
7450 {
7451 public:
7452  VmaRecorder();
7453  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7454  void WriteConfiguration(
7455  const VkPhysicalDeviceProperties& devProps,
7456  const VkPhysicalDeviceMemoryProperties& memProps,
7457  uint32_t vulkanApiVersion,
7458  bool dedicatedAllocationExtensionEnabled,
7459  bool bindMemory2ExtensionEnabled,
7460  bool memoryBudgetExtensionEnabled,
7461  bool deviceCoherentMemoryExtensionEnabled);
7462  ~VmaRecorder();
7463 
7464  void RecordCreateAllocator(uint32_t frameIndex);
7465  void RecordDestroyAllocator(uint32_t frameIndex);
7466  void RecordCreatePool(uint32_t frameIndex,
7467  const VmaPoolCreateInfo& createInfo,
7468  VmaPool pool);
7469  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7470  void RecordAllocateMemory(uint32_t frameIndex,
7471  const VkMemoryRequirements& vkMemReq,
7472  const VmaAllocationCreateInfo& createInfo,
7473  VmaAllocation allocation);
7474  void RecordAllocateMemoryPages(uint32_t frameIndex,
7475  const VkMemoryRequirements& vkMemReq,
7476  const VmaAllocationCreateInfo& createInfo,
7477  uint64_t allocationCount,
7478  const VmaAllocation* pAllocations);
7479  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7480  const VkMemoryRequirements& vkMemReq,
7481  bool requiresDedicatedAllocation,
7482  bool prefersDedicatedAllocation,
7483  const VmaAllocationCreateInfo& createInfo,
7484  VmaAllocation allocation);
7485  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7486  const VkMemoryRequirements& vkMemReq,
7487  bool requiresDedicatedAllocation,
7488  bool prefersDedicatedAllocation,
7489  const VmaAllocationCreateInfo& createInfo,
7490  VmaAllocation allocation);
7491  void RecordFreeMemory(uint32_t frameIndex,
7492  VmaAllocation allocation);
7493  void RecordFreeMemoryPages(uint32_t frameIndex,
7494  uint64_t allocationCount,
7495  const VmaAllocation* pAllocations);
7496  void RecordSetAllocationUserData(uint32_t frameIndex,
7497  VmaAllocation allocation,
7498  const void* pUserData);
7499  void RecordCreateLostAllocation(uint32_t frameIndex,
7500  VmaAllocation allocation);
7501  void RecordMapMemory(uint32_t frameIndex,
7502  VmaAllocation allocation);
7503  void RecordUnmapMemory(uint32_t frameIndex,
7504  VmaAllocation allocation);
7505  void RecordFlushAllocation(uint32_t frameIndex,
7506  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7507  void RecordInvalidateAllocation(uint32_t frameIndex,
7508  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7509  void RecordCreateBuffer(uint32_t frameIndex,
7510  const VkBufferCreateInfo& bufCreateInfo,
7511  const VmaAllocationCreateInfo& allocCreateInfo,
7512  VmaAllocation allocation);
7513  void RecordCreateImage(uint32_t frameIndex,
7514  const VkImageCreateInfo& imageCreateInfo,
7515  const VmaAllocationCreateInfo& allocCreateInfo,
7516  VmaAllocation allocation);
7517  void RecordDestroyBuffer(uint32_t frameIndex,
7518  VmaAllocation allocation);
7519  void RecordDestroyImage(uint32_t frameIndex,
7520  VmaAllocation allocation);
7521  void RecordTouchAllocation(uint32_t frameIndex,
7522  VmaAllocation allocation);
7523  void RecordGetAllocationInfo(uint32_t frameIndex,
7524  VmaAllocation allocation);
7525  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7526  VmaPool pool);
7527  void RecordDefragmentationBegin(uint32_t frameIndex,
7528  const VmaDefragmentationInfo2& info,
7530  void RecordDefragmentationEnd(uint32_t frameIndex,
7532  void RecordSetPoolName(uint32_t frameIndex,
7533  VmaPool pool,
7534  const char* name);
7535 
7536 private:
7537  struct CallParams
7538  {
7539  uint32_t threadId;
7540  double time;
7541  };
7542 
7543  class UserDataString
7544  {
7545  public:
7546  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7547  const char* GetString() const { return m_Str; }
7548 
7549  private:
7550  char m_PtrStr[17];
7551  const char* m_Str;
7552  };
7553 
7554  bool m_UseMutex;
7555  VmaRecordFlags m_Flags;
7556  FILE* m_File;
7557  VMA_MUTEX m_FileMutex;
7558  int64_t m_Freq;
7559  int64_t m_StartCounter;
7560 
7561  void GetBasicParams(CallParams& outParams);
7562 
7563  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7564  template<typename T>
7565  void PrintPointerList(uint64_t count, const T* pItems)
7566  {
7567  if(count)
7568  {
7569  fprintf(m_File, "%p", pItems[0]);
7570  for(uint64_t i = 1; i < count; ++i)
7571  {
7572  fprintf(m_File, " %p", pItems[i]);
7573  }
7574  }
7575  }
7576 
7577  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7578  void Flush();
7579 };
7580 
7581 #endif // #if VMA_RECORDING_ENABLED
7582 
7583 /*
7584 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7585 */
7586 class VmaAllocationObjectAllocator
7587 {
7588  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7589 public:
7590  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7591 
7592  template<typename... Types> VmaAllocation Allocate(Types... args);
7593  void Free(VmaAllocation hAlloc);
7594 
7595 private:
7596  VMA_MUTEX m_Mutex;
7597  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7598 };
7599 
7600 struct VmaCurrentBudgetData
7601 {
7602  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7603  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7604 
7605 #if VMA_MEMORY_BUDGET
7606  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7607  VMA_RW_MUTEX m_BudgetMutex;
7608  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7609  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7610  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7611 #endif // #if VMA_MEMORY_BUDGET
7612 
7613  VmaCurrentBudgetData()
7614  {
7615  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7616  {
7617  m_BlockBytes[heapIndex] = 0;
7618  m_AllocationBytes[heapIndex] = 0;
7619 #if VMA_MEMORY_BUDGET
7620  m_VulkanUsage[heapIndex] = 0;
7621  m_VulkanBudget[heapIndex] = 0;
7622  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7623 #endif
7624  }
7625 
7626 #if VMA_MEMORY_BUDGET
7627  m_OperationsSinceBudgetFetch = 0;
7628 #endif
7629  }
7630 
7631  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7632  {
7633  m_AllocationBytes[heapIndex] += allocationSize;
7634 #if VMA_MEMORY_BUDGET
7635  ++m_OperationsSinceBudgetFetch;
7636 #endif
7637  }
7638 
7639  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7640  {
7641  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7642  m_AllocationBytes[heapIndex] -= allocationSize;
7643 #if VMA_MEMORY_BUDGET
7644  ++m_OperationsSinceBudgetFetch;
7645 #endif
7646  }
7647 };
7648 
7649 // Main allocator object.
7650 struct VmaAllocator_T
7651 {
7652  VMA_CLASS_NO_COPY(VmaAllocator_T)
7653 public:
7654  bool m_UseMutex;
7655  uint32_t m_VulkanApiVersion;
7656  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7657  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7658  bool m_UseExtMemoryBudget;
7659  bool m_UseAmdDeviceCoherentMemory;
7660  bool m_UseKhrBufferDeviceAddress;
7661  VkDevice m_hDevice;
7662  VkInstance m_hInstance;
7663  bool m_AllocationCallbacksSpecified;
7664  VkAllocationCallbacks m_AllocationCallbacks;
7665  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7666  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7667 
7668  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7669  uint32_t m_HeapSizeLimitMask;
7670 
7671  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7672  VkPhysicalDeviceMemoryProperties m_MemProps;
7673 
7674  // Default pools.
7675  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7676 
7677  // Each vector is sorted by memory (handle value).
7678  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7679  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7680  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7681 
7682  VmaCurrentBudgetData m_Budget;
7683 
7684  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7685  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7686  ~VmaAllocator_T();
7687 
7688  const VkAllocationCallbacks* GetAllocationCallbacks() const
7689  {
7690  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7691  }
7692  const VmaVulkanFunctions& GetVulkanFunctions() const
7693  {
7694  return m_VulkanFunctions;
7695  }
7696 
7697  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7698 
7699  VkDeviceSize GetBufferImageGranularity() const
7700  {
7701  return VMA_MAX(
7702  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7703  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7704  }
7705 
7706  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7707  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7708 
7709  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7710  {
7711  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7712  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7713  }
7714  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7715  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7716  {
7717  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7718  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7719  }
7720  // Minimum alignment for all allocations in specific memory type.
7721  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7722  {
7723  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7724  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7725  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7726  }
7727 
7728  bool IsIntegratedGpu() const
7729  {
7730  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7731  }
7732 
7733  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7734 
7735 #if VMA_RECORDING_ENABLED
7736  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7737 #endif
7738 
7739  void GetBufferMemoryRequirements(
7740  VkBuffer hBuffer,
7741  VkMemoryRequirements& memReq,
7742  bool& requiresDedicatedAllocation,
7743  bool& prefersDedicatedAllocation) const;
7744  void GetImageMemoryRequirements(
7745  VkImage hImage,
7746  VkMemoryRequirements& memReq,
7747  bool& requiresDedicatedAllocation,
7748  bool& prefersDedicatedAllocation) const;
7749 
7750  // Main allocation function.
7751  VkResult AllocateMemory(
7752  const VkMemoryRequirements& vkMemReq,
7753  bool requiresDedicatedAllocation,
7754  bool prefersDedicatedAllocation,
7755  VkBuffer dedicatedBuffer,
7756  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7757  VkImage dedicatedImage,
7758  const VmaAllocationCreateInfo& createInfo,
7759  VmaSuballocationType suballocType,
7760  size_t allocationCount,
7761  VmaAllocation* pAllocations);
7762 
7763  // Main deallocation function.
7764  void FreeMemory(
7765  size_t allocationCount,
7766  const VmaAllocation* pAllocations);
7767 
7768  VkResult ResizeAllocation(
7769  const VmaAllocation alloc,
7770  VkDeviceSize newSize);
7771 
7772  void CalculateStats(VmaStats* pStats);
7773 
7774  void GetBudget(
7775  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7776 
7777 #if VMA_STATS_STRING_ENABLED
7778  void PrintDetailedMap(class VmaJsonWriter& json);
7779 #endif
7780 
7781  VkResult DefragmentationBegin(
7782  const VmaDefragmentationInfo2& info,
7783  VmaDefragmentationStats* pStats,
7784  VmaDefragmentationContext* pContext);
7785  VkResult DefragmentationEnd(
7786  VmaDefragmentationContext context);
7787 
7788  VkResult DefragmentationPassBegin(
7790  VmaDefragmentationContext context);
7791  VkResult DefragmentationPassEnd(
7792  VmaDefragmentationContext context);
7793 
7794  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7795  bool TouchAllocation(VmaAllocation hAllocation);
7796 
7797  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7798  void DestroyPool(VmaPool pool);
7799  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7800 
7801  void SetCurrentFrameIndex(uint32_t frameIndex);
7802  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7803 
7804  void MakePoolAllocationsLost(
7805  VmaPool hPool,
7806  size_t* pLostAllocationCount);
7807  VkResult CheckPoolCorruption(VmaPool hPool);
7808  VkResult CheckCorruption(uint32_t memoryTypeBits);
7809 
7810  void CreateLostAllocation(VmaAllocation* pAllocation);
7811 
7812  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7813  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7814  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7815  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7816  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7817  VkResult BindVulkanBuffer(
7818  VkDeviceMemory memory,
7819  VkDeviceSize memoryOffset,
7820  VkBuffer buffer,
7821  const void* pNext);
7822  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7823  VkResult BindVulkanImage(
7824  VkDeviceMemory memory,
7825  VkDeviceSize memoryOffset,
7826  VkImage image,
7827  const void* pNext);
7828 
7829  VkResult Map(VmaAllocation hAllocation, void** ppData);
7830  void Unmap(VmaAllocation hAllocation);
7831 
7832  VkResult BindBufferMemory(
7833  VmaAllocation hAllocation,
7834  VkDeviceSize allocationLocalOffset,
7835  VkBuffer hBuffer,
7836  const void* pNext);
7837  VkResult BindImageMemory(
7838  VmaAllocation hAllocation,
7839  VkDeviceSize allocationLocalOffset,
7840  VkImage hImage,
7841  const void* pNext);
7842 
7843  VkResult FlushOrInvalidateAllocation(
7844  VmaAllocation hAllocation,
7845  VkDeviceSize offset, VkDeviceSize size,
7846  VMA_CACHE_OPERATION op);
7847  VkResult FlushOrInvalidateAllocations(
7848  uint32_t allocationCount,
7849  const VmaAllocation* allocations,
7850  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
7851  VMA_CACHE_OPERATION op);
7852 
7853  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7854 
7855  /*
7856  Returns bit mask of memory types that can support defragmentation on GPU as
7857  they support creation of required buffer for copy operations.
7858  */
7859  uint32_t GetGpuDefragmentationMemoryTypeBits();
7860 
7861 private:
7862  VkDeviceSize m_PreferredLargeHeapBlockSize;
7863 
7864  VkPhysicalDevice m_PhysicalDevice;
7865  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7866  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7867 
7868  VMA_RW_MUTEX m_PoolsMutex;
7869  // Protected by m_PoolsMutex. Sorted by pointer value.
7870  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7871  uint32_t m_NextPoolId;
7872 
7873  VmaVulkanFunctions m_VulkanFunctions;
7874 
7875  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7876  uint32_t m_GlobalMemoryTypeBits;
7877 
7878 #if VMA_RECORDING_ENABLED
7879  VmaRecorder* m_pRecorder;
7880 #endif
7881 
7882  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7883 
7884 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7885  void ImportVulkanFunctions_Static();
7886 #endif
7887 
7888  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
7889 
7890 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
7891  void ImportVulkanFunctions_Dynamic();
7892 #endif
7893 
7894  void ValidateVulkanFunctions();
7895 
7896  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7897 
7898  VkResult AllocateMemoryOfType(
7899  VkDeviceSize size,
7900  VkDeviceSize alignment,
7901  bool dedicatedAllocation,
7902  VkBuffer dedicatedBuffer,
7903  VkBufferUsageFlags dedicatedBufferUsage,
7904  VkImage dedicatedImage,
7905  const VmaAllocationCreateInfo& createInfo,
7906  uint32_t memTypeIndex,
7907  VmaSuballocationType suballocType,
7908  size_t allocationCount,
7909  VmaAllocation* pAllocations);
7910 
7911  // Helper function only to be used inside AllocateDedicatedMemory.
7912  VkResult AllocateDedicatedMemoryPage(
7913  VkDeviceSize size,
7914  VmaSuballocationType suballocType,
7915  uint32_t memTypeIndex,
7916  const VkMemoryAllocateInfo& allocInfo,
7917  bool map,
7918  bool isUserDataString,
7919  void* pUserData,
7920  VmaAllocation* pAllocation);
7921 
7922  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7923  VkResult AllocateDedicatedMemory(
7924  VkDeviceSize size,
7925  VmaSuballocationType suballocType,
7926  uint32_t memTypeIndex,
7927  bool withinBudget,
7928  bool map,
7929  bool isUserDataString,
7930  void* pUserData,
7931  VkBuffer dedicatedBuffer,
7932  VkBufferUsageFlags dedicatedBufferUsage,
7933  VkImage dedicatedImage,
7934  size_t allocationCount,
7935  VmaAllocation* pAllocations);
7936 
7937  void FreeDedicatedMemory(const VmaAllocation allocation);
7938 
7939  /*
7940  Calculates and returns bit mask of memory types that can support defragmentation
7941  on GPU as they support creation of required buffer for copy operations.
7942  */
7943  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7944 
7945  uint32_t CalculateGlobalMemoryTypeBits() const;
7946 
7947  bool GetFlushOrInvalidateRange(
7948  VmaAllocation allocation,
7949  VkDeviceSize offset, VkDeviceSize size,
7950  VkMappedMemoryRange& outRange) const;
7951 
7952 #if VMA_MEMORY_BUDGET
7953  void UpdateVulkanBudget();
7954 #endif // #if VMA_MEMORY_BUDGET
7955 };
7956 
7958 // Memory allocation #2 after VmaAllocator_T definition
7959 
7960 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7961 {
7962  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7963 }
7964 
7965 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7966 {
7967  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7968 }
7969 
7970 template<typename T>
7971 static T* VmaAllocate(VmaAllocator hAllocator)
7972 {
7973  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7974 }
7975 
7976 template<typename T>
7977 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7978 {
7979  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7980 }
7981 
7982 template<typename T>
7983 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7984 {
7985  if(ptr != VMA_NULL)
7986  {
7987  ptr->~T();
7988  VmaFree(hAllocator, ptr);
7989  }
7990 }
7991 
7992 template<typename T>
7993 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7994 {
7995  if(ptr != VMA_NULL)
7996  {
7997  for(size_t i = count; i--; )
7998  ptr[i].~T();
7999  VmaFree(hAllocator, ptr);
8000  }
8001 }
8002 
8004 // VmaStringBuilder
8005 
8006 #if VMA_STATS_STRING_ENABLED
8007 
8008 class VmaStringBuilder
8009 {
8010 public:
8011  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8012  size_t GetLength() const { return m_Data.size(); }
8013  const char* GetData() const { return m_Data.data(); }
8014 
8015  void Add(char ch) { m_Data.push_back(ch); }
8016  void Add(const char* pStr);
8017  void AddNewLine() { Add('\n'); }
8018  void AddNumber(uint32_t num);
8019  void AddNumber(uint64_t num);
8020  void AddPointer(const void* ptr);
8021 
8022 private:
8023  VmaVector< char, VmaStlAllocator<char> > m_Data;
8024 };
8025 
8026 void VmaStringBuilder::Add(const char* pStr)
8027 {
8028  const size_t strLen = strlen(pStr);
8029  if(strLen > 0)
8030  {
8031  const size_t oldCount = m_Data.size();
8032  m_Data.resize(oldCount + strLen);
8033  memcpy(m_Data.data() + oldCount, pStr, strLen);
8034  }
8035 }
8036 
8037 void VmaStringBuilder::AddNumber(uint32_t num)
8038 {
8039  char buf[11];
8040  buf[10] = '\0';
8041  char *p = &buf[10];
8042  do
8043  {
8044  *--p = '0' + (num % 10);
8045  num /= 10;
8046  }
8047  while(num);
8048  Add(p);
8049 }
8050 
8051 void VmaStringBuilder::AddNumber(uint64_t num)
8052 {
8053  char buf[21];
8054  buf[20] = '\0';
8055  char *p = &buf[20];
8056  do
8057  {
8058  *--p = '0' + (num % 10);
8059  num /= 10;
8060  }
8061  while(num);
8062  Add(p);
8063 }
8064 
8065 void VmaStringBuilder::AddPointer(const void* ptr)
8066 {
8067  char buf[21];
8068  VmaPtrToStr(buf, sizeof(buf), ptr);
8069  Add(buf);
8070 }
8071 
8072 #endif // #if VMA_STATS_STRING_ENABLED
8073 
8075 // VmaJsonWriter
8076 
8077 #if VMA_STATS_STRING_ENABLED
8078 
8079 class VmaJsonWriter
8080 {
8081  VMA_CLASS_NO_COPY(VmaJsonWriter)
8082 public:
8083  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8084  ~VmaJsonWriter();
8085 
8086  void BeginObject(bool singleLine = false);
8087  void EndObject();
8088 
8089  void BeginArray(bool singleLine = false);
8090  void EndArray();
8091 
8092  void WriteString(const char* pStr);
8093  void BeginString(const char* pStr = VMA_NULL);
8094  void ContinueString(const char* pStr);
8095  void ContinueString(uint32_t n);
8096  void ContinueString(uint64_t n);
8097  void ContinueString_Pointer(const void* ptr);
8098  void EndString(const char* pStr = VMA_NULL);
8099 
8100  void WriteNumber(uint32_t n);
8101  void WriteNumber(uint64_t n);
8102  void WriteBool(bool b);
8103  void WriteNull();
8104 
8105 private:
8106  static const char* const INDENT;
8107 
8108  enum COLLECTION_TYPE
8109  {
8110  COLLECTION_TYPE_OBJECT,
8111  COLLECTION_TYPE_ARRAY,
8112  };
8113  struct StackItem
8114  {
8115  COLLECTION_TYPE type;
8116  uint32_t valueCount;
8117  bool singleLineMode;
8118  };
8119 
8120  VmaStringBuilder& m_SB;
8121  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8122  bool m_InsideString;
8123 
8124  void BeginValue(bool isString);
8125  void WriteIndent(bool oneLess = false);
8126 };
8127 
8128 const char* const VmaJsonWriter::INDENT = " ";
8129 
8130 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8131  m_SB(sb),
8132  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8133  m_InsideString(false)
8134 {
8135 }
8136 
8137 VmaJsonWriter::~VmaJsonWriter()
8138 {
8139  VMA_ASSERT(!m_InsideString);
8140  VMA_ASSERT(m_Stack.empty());
8141 }
8142 
8143 void VmaJsonWriter::BeginObject(bool singleLine)
8144 {
8145  VMA_ASSERT(!m_InsideString);
8146 
8147  BeginValue(false);
8148  m_SB.Add('{');
8149 
8150  StackItem item;
8151  item.type = COLLECTION_TYPE_OBJECT;
8152  item.valueCount = 0;
8153  item.singleLineMode = singleLine;
8154  m_Stack.push_back(item);
8155 }
8156 
8157 void VmaJsonWriter::EndObject()
8158 {
8159  VMA_ASSERT(!m_InsideString);
8160 
8161  WriteIndent(true);
8162  m_SB.Add('}');
8163 
8164  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8165  m_Stack.pop_back();
8166 }
8167 
8168 void VmaJsonWriter::BeginArray(bool singleLine)
8169 {
8170  VMA_ASSERT(!m_InsideString);
8171 
8172  BeginValue(false);
8173  m_SB.Add('[');
8174 
8175  StackItem item;
8176  item.type = COLLECTION_TYPE_ARRAY;
8177  item.valueCount = 0;
8178  item.singleLineMode = singleLine;
8179  m_Stack.push_back(item);
8180 }
8181 
8182 void VmaJsonWriter::EndArray()
8183 {
8184  VMA_ASSERT(!m_InsideString);
8185 
8186  WriteIndent(true);
8187  m_SB.Add(']');
8188 
8189  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8190  m_Stack.pop_back();
8191 }
8192 
8193 void VmaJsonWriter::WriteString(const char* pStr)
8194 {
8195  BeginString(pStr);
8196  EndString();
8197 }
8198 
8199 void VmaJsonWriter::BeginString(const char* pStr)
8200 {
8201  VMA_ASSERT(!m_InsideString);
8202 
8203  BeginValue(true);
8204  m_SB.Add('"');
8205  m_InsideString = true;
8206  if(pStr != VMA_NULL && pStr[0] != '\0')
8207  {
8208  ContinueString(pStr);
8209  }
8210 }
8211 
8212 void VmaJsonWriter::ContinueString(const char* pStr)
8213 {
8214  VMA_ASSERT(m_InsideString);
8215 
8216  const size_t strLen = strlen(pStr);
8217  for(size_t i = 0; i < strLen; ++i)
8218  {
8219  char ch = pStr[i];
8220  if(ch == '\\')
8221  {
8222  m_SB.Add("\\\\");
8223  }
8224  else if(ch == '"')
8225  {
8226  m_SB.Add("\\\"");
8227  }
8228  else if(ch >= 32)
8229  {
8230  m_SB.Add(ch);
8231  }
8232  else switch(ch)
8233  {
8234  case '\b':
8235  m_SB.Add("\\b");
8236  break;
8237  case '\f':
8238  m_SB.Add("\\f");
8239  break;
8240  case '\n':
8241  m_SB.Add("\\n");
8242  break;
8243  case '\r':
8244  m_SB.Add("\\r");
8245  break;
8246  case '\t':
8247  m_SB.Add("\\t");
8248  break;
8249  default:
8250  VMA_ASSERT(0 && "Character not currently supported.");
8251  break;
8252  }
8253  }
8254 }
8255 
8256 void VmaJsonWriter::ContinueString(uint32_t n)
8257 {
8258  VMA_ASSERT(m_InsideString);
8259  m_SB.AddNumber(n);
8260 }
8261 
8262 void VmaJsonWriter::ContinueString(uint64_t n)
8263 {
8264  VMA_ASSERT(m_InsideString);
8265  m_SB.AddNumber(n);
8266 }
8267 
8268 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8269 {
8270  VMA_ASSERT(m_InsideString);
8271  m_SB.AddPointer(ptr);
8272 }
8273 
8274 void VmaJsonWriter::EndString(const char* pStr)
8275 {
8276  VMA_ASSERT(m_InsideString);
8277  if(pStr != VMA_NULL && pStr[0] != '\0')
8278  {
8279  ContinueString(pStr);
8280  }
8281  m_SB.Add('"');
8282  m_InsideString = false;
8283 }
8284 
8285 void VmaJsonWriter::WriteNumber(uint32_t n)
8286 {
8287  VMA_ASSERT(!m_InsideString);
8288  BeginValue(false);
8289  m_SB.AddNumber(n);
8290 }
8291 
8292 void VmaJsonWriter::WriteNumber(uint64_t n)
8293 {
8294  VMA_ASSERT(!m_InsideString);
8295  BeginValue(false);
8296  m_SB.AddNumber(n);
8297 }
8298 
8299 void VmaJsonWriter::WriteBool(bool b)
8300 {
8301  VMA_ASSERT(!m_InsideString);
8302  BeginValue(false);
8303  m_SB.Add(b ? "true" : "false");
8304 }
8305 
8306 void VmaJsonWriter::WriteNull()
8307 {
8308  VMA_ASSERT(!m_InsideString);
8309  BeginValue(false);
8310  m_SB.Add("null");
8311 }
8312 
8313 void VmaJsonWriter::BeginValue(bool isString)
8314 {
8315  if(!m_Stack.empty())
8316  {
8317  StackItem& currItem = m_Stack.back();
8318  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8319  currItem.valueCount % 2 == 0)
8320  {
8321  VMA_ASSERT(isString);
8322  }
8323 
8324  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8325  currItem.valueCount % 2 != 0)
8326  {
8327  m_SB.Add(": ");
8328  }
8329  else if(currItem.valueCount > 0)
8330  {
8331  m_SB.Add(", ");
8332  WriteIndent();
8333  }
8334  else
8335  {
8336  WriteIndent();
8337  }
8338  ++currItem.valueCount;
8339  }
8340 }
8341 
8342 void VmaJsonWriter::WriteIndent(bool oneLess)
8343 {
8344  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8345  {
8346  m_SB.AddNewLine();
8347 
8348  size_t count = m_Stack.size();
8349  if(count > 0 && oneLess)
8350  {
8351  --count;
8352  }
8353  for(size_t i = 0; i < count; ++i)
8354  {
8355  m_SB.Add(INDENT);
8356  }
8357  }
8358 }
8359 
8360 #endif // #if VMA_STATS_STRING_ENABLED
8361 
8363 
8364 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8365 {
8366  if(IsUserDataString())
8367  {
8368  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8369 
8370  FreeUserDataString(hAllocator);
8371 
8372  if(pUserData != VMA_NULL)
8373  {
8374  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8375  }
8376  }
8377  else
8378  {
8379  m_pUserData = pUserData;
8380  }
8381 }
8382 
8383 void VmaAllocation_T::ChangeBlockAllocation(
8384  VmaAllocator hAllocator,
8385  VmaDeviceMemoryBlock* block,
8386  VkDeviceSize offset)
8387 {
8388  VMA_ASSERT(block != VMA_NULL);
8389  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8390 
8391  // Move mapping reference counter from old block to new block.
8392  if(block != m_BlockAllocation.m_Block)
8393  {
8394  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8395  if(IsPersistentMap())
8396  ++mapRefCount;
8397  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8398  block->Map(hAllocator, mapRefCount, VMA_NULL);
8399  }
8400 
8401  m_BlockAllocation.m_Block = block;
8402  m_BlockAllocation.m_Offset = offset;
8403 }
8404 
8405 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8406 {
8407  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8408  m_BlockAllocation.m_Offset = newOffset;
8409 }
8410 
8411 VkDeviceSize VmaAllocation_T::GetOffset() const
8412 {
8413  switch(m_Type)
8414  {
8415  case ALLOCATION_TYPE_BLOCK:
8416  return m_BlockAllocation.m_Offset;
8417  case ALLOCATION_TYPE_DEDICATED:
8418  return 0;
8419  default:
8420  VMA_ASSERT(0);
8421  return 0;
8422  }
8423 }
8424 
8425 VkDeviceMemory VmaAllocation_T::GetMemory() const
8426 {
8427  switch(m_Type)
8428  {
8429  case ALLOCATION_TYPE_BLOCK:
8430  return m_BlockAllocation.m_Block->GetDeviceMemory();
8431  case ALLOCATION_TYPE_DEDICATED:
8432  return m_DedicatedAllocation.m_hMemory;
8433  default:
8434  VMA_ASSERT(0);
8435  return VK_NULL_HANDLE;
8436  }
8437 }
8438 
8439 void* VmaAllocation_T::GetMappedData() const
8440 {
8441  switch(m_Type)
8442  {
8443  case ALLOCATION_TYPE_BLOCK:
8444  if(m_MapCount != 0)
8445  {
8446  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8447  VMA_ASSERT(pBlockData != VMA_NULL);
8448  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8449  }
8450  else
8451  {
8452  return VMA_NULL;
8453  }
8454  break;
8455  case ALLOCATION_TYPE_DEDICATED:
8456  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8457  return m_DedicatedAllocation.m_pMappedData;
8458  default:
8459  VMA_ASSERT(0);
8460  return VMA_NULL;
8461  }
8462 }
8463 
8464 bool VmaAllocation_T::CanBecomeLost() const
8465 {
8466  switch(m_Type)
8467  {
8468  case ALLOCATION_TYPE_BLOCK:
8469  return m_BlockAllocation.m_CanBecomeLost;
8470  case ALLOCATION_TYPE_DEDICATED:
8471  return false;
8472  default:
8473  VMA_ASSERT(0);
8474  return false;
8475  }
8476 }
8477 
8478 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8479 {
8480  VMA_ASSERT(CanBecomeLost());
8481 
8482  /*
8483  Warning: This is a carefully designed algorithm.
8484  Do not modify unless you really know what you're doing :)
8485  */
8486  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8487  for(;;)
8488  {
8489  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8490  {
8491  VMA_ASSERT(0);
8492  return false;
8493  }
8494  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8495  {
8496  return false;
8497  }
8498  else // Last use time earlier than current time.
8499  {
8500  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8501  {
8502  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8503  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8504  return true;
8505  }
8506  }
8507  }
8508 }
8509 
8510 #if VMA_STATS_STRING_ENABLED
8511 
8512 // Correspond to values of enum VmaSuballocationType.
8513 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8514  "FREE",
8515  "UNKNOWN",
8516  "BUFFER",
8517  "IMAGE_UNKNOWN",
8518  "IMAGE_LINEAR",
8519  "IMAGE_OPTIMAL",
8520 };
8521 
8522 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8523 {
8524  json.WriteString("Type");
8525  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8526 
8527  json.WriteString("Size");
8528  json.WriteNumber(m_Size);
8529 
8530  if(m_pUserData != VMA_NULL)
8531  {
8532  json.WriteString("UserData");
8533  if(IsUserDataString())
8534  {
8535  json.WriteString((const char*)m_pUserData);
8536  }
8537  else
8538  {
8539  json.BeginString();
8540  json.ContinueString_Pointer(m_pUserData);
8541  json.EndString();
8542  }
8543  }
8544 
8545  json.WriteString("CreationFrameIndex");
8546  json.WriteNumber(m_CreationFrameIndex);
8547 
8548  json.WriteString("LastUseFrameIndex");
8549  json.WriteNumber(GetLastUseFrameIndex());
8550 
8551  if(m_BufferImageUsage != 0)
8552  {
8553  json.WriteString("Usage");
8554  json.WriteNumber(m_BufferImageUsage);
8555  }
8556 }
8557 
8558 #endif
8559 
8560 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8561 {
8562  VMA_ASSERT(IsUserDataString());
8563  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8564  m_pUserData = VMA_NULL;
8565 }
8566 
8567 void VmaAllocation_T::BlockAllocMap()
8568 {
8569  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8570 
8571  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8572  {
8573  ++m_MapCount;
8574  }
8575  else
8576  {
8577  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8578  }
8579 }
8580 
8581 void VmaAllocation_T::BlockAllocUnmap()
8582 {
8583  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8584 
8585  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8586  {
8587  --m_MapCount;
8588  }
8589  else
8590  {
8591  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8592  }
8593 }
8594 
8595 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8596 {
8597  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8598 
8599  if(m_MapCount != 0)
8600  {
8601  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8602  {
8603  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8604  *ppData = m_DedicatedAllocation.m_pMappedData;
8605  ++m_MapCount;
8606  return VK_SUCCESS;
8607  }
8608  else
8609  {
8610  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8611  return VK_ERROR_MEMORY_MAP_FAILED;
8612  }
8613  }
8614  else
8615  {
8616  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8617  hAllocator->m_hDevice,
8618  m_DedicatedAllocation.m_hMemory,
8619  0, // offset
8620  VK_WHOLE_SIZE,
8621  0, // flags
8622  ppData);
8623  if(result == VK_SUCCESS)
8624  {
8625  m_DedicatedAllocation.m_pMappedData = *ppData;
8626  m_MapCount = 1;
8627  }
8628  return result;
8629  }
8630 }
8631 
8632 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8633 {
8634  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8635 
8636  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8637  {
8638  --m_MapCount;
8639  if(m_MapCount == 0)
8640  {
8641  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8642  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8643  hAllocator->m_hDevice,
8644  m_DedicatedAllocation.m_hMemory);
8645  }
8646  }
8647  else
8648  {
8649  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8650  }
8651 }
8652 
8653 #if VMA_STATS_STRING_ENABLED
8654 
8655 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8656 {
8657  json.BeginObject();
8658 
8659  json.WriteString("Blocks");
8660  json.WriteNumber(stat.blockCount);
8661 
8662  json.WriteString("Allocations");
8663  json.WriteNumber(stat.allocationCount);
8664 
8665  json.WriteString("UnusedRanges");
8666  json.WriteNumber(stat.unusedRangeCount);
8667 
8668  json.WriteString("UsedBytes");
8669  json.WriteNumber(stat.usedBytes);
8670 
8671  json.WriteString("UnusedBytes");
8672  json.WriteNumber(stat.unusedBytes);
8673 
8674  if(stat.allocationCount > 1)
8675  {
8676  json.WriteString("AllocationSize");
8677  json.BeginObject(true);
8678  json.WriteString("Min");
8679  json.WriteNumber(stat.allocationSizeMin);
8680  json.WriteString("Avg");
8681  json.WriteNumber(stat.allocationSizeAvg);
8682  json.WriteString("Max");
8683  json.WriteNumber(stat.allocationSizeMax);
8684  json.EndObject();
8685  }
8686 
8687  if(stat.unusedRangeCount > 1)
8688  {
8689  json.WriteString("UnusedRangeSize");
8690  json.BeginObject(true);
8691  json.WriteString("Min");
8692  json.WriteNumber(stat.unusedRangeSizeMin);
8693  json.WriteString("Avg");
8694  json.WriteNumber(stat.unusedRangeSizeAvg);
8695  json.WriteString("Max");
8696  json.WriteNumber(stat.unusedRangeSizeMax);
8697  json.EndObject();
8698  }
8699 
8700  json.EndObject();
8701 }
8702 
8703 #endif // #if VMA_STATS_STRING_ENABLED
8704 
8705 struct VmaSuballocationItemSizeLess
8706 {
8707  bool operator()(
8708  const VmaSuballocationList::iterator lhs,
8709  const VmaSuballocationList::iterator rhs) const
8710  {
8711  return lhs->size < rhs->size;
8712  }
8713  bool operator()(
8714  const VmaSuballocationList::iterator lhs,
8715  VkDeviceSize rhsSize) const
8716  {
8717  return lhs->size < rhsSize;
8718  }
8719 };
8720 
8721 
8723 // class VmaBlockMetadata
8724 
8725 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8726  m_Size(0),
8727  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8728 {
8729 }
8730 
8731 #if VMA_STATS_STRING_ENABLED
8732 
8733 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8734  VkDeviceSize unusedBytes,
8735  size_t allocationCount,
8736  size_t unusedRangeCount) const
8737 {
8738  json.BeginObject();
8739 
8740  json.WriteString("TotalBytes");
8741  json.WriteNumber(GetSize());
8742 
8743  json.WriteString("UnusedBytes");
8744  json.WriteNumber(unusedBytes);
8745 
8746  json.WriteString("Allocations");
8747  json.WriteNumber((uint64_t)allocationCount);
8748 
8749  json.WriteString("UnusedRanges");
8750  json.WriteNumber((uint64_t)unusedRangeCount);
8751 
8752  json.WriteString("Suballocations");
8753  json.BeginArray();
8754 }
8755 
8756 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8757  VkDeviceSize offset,
8758  VmaAllocation hAllocation) const
8759 {
8760  json.BeginObject(true);
8761 
8762  json.WriteString("Offset");
8763  json.WriteNumber(offset);
8764 
8765  hAllocation->PrintParameters(json);
8766 
8767  json.EndObject();
8768 }
8769 
8770 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8771  VkDeviceSize offset,
8772  VkDeviceSize size) const
8773 {
8774  json.BeginObject(true);
8775 
8776  json.WriteString("Offset");
8777  json.WriteNumber(offset);
8778 
8779  json.WriteString("Type");
8780  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8781 
8782  json.WriteString("Size");
8783  json.WriteNumber(size);
8784 
8785  json.EndObject();
8786 }
8787 
8788 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8789 {
8790  json.EndArray();
8791  json.EndObject();
8792 }
8793 
8794 #endif // #if VMA_STATS_STRING_ENABLED
8795 
8797 // class VmaBlockMetadata_Generic
8798 
8799 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8800  VmaBlockMetadata(hAllocator),
8801  m_FreeCount(0),
8802  m_SumFreeSize(0),
8803  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8804  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8805 {
8806 }
8807 
8808 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8809 {
8810 }
8811 
8812 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8813 {
8814  VmaBlockMetadata::Init(size);
8815 
8816  m_FreeCount = 1;
8817  m_SumFreeSize = size;
8818 
8819  VmaSuballocation suballoc = {};
8820  suballoc.offset = 0;
8821  suballoc.size = size;
8822  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8823  suballoc.hAllocation = VK_NULL_HANDLE;
8824 
8825  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8826  m_Suballocations.push_back(suballoc);
8827  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8828  --suballocItem;
8829  m_FreeSuballocationsBySize.push_back(suballocItem);
8830 }
8831 
8832 bool VmaBlockMetadata_Generic::Validate() const
8833 {
8834  VMA_VALIDATE(!m_Suballocations.empty());
8835 
8836  // Expected offset of new suballocation as calculated from previous ones.
8837  VkDeviceSize calculatedOffset = 0;
8838  // Expected number of free suballocations as calculated from traversing their list.
8839  uint32_t calculatedFreeCount = 0;
8840  // Expected sum size of free suballocations as calculated from traversing their list.
8841  VkDeviceSize calculatedSumFreeSize = 0;
8842  // Expected number of free suballocations that should be registered in
8843  // m_FreeSuballocationsBySize calculated from traversing their list.
8844  size_t freeSuballocationsToRegister = 0;
8845  // True if previous visited suballocation was free.
8846  bool prevFree = false;
8847 
8848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8849  suballocItem != m_Suballocations.cend();
8850  ++suballocItem)
8851  {
8852  const VmaSuballocation& subAlloc = *suballocItem;
8853 
8854  // Actual offset of this suballocation doesn't match expected one.
8855  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8856 
8857  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8858  // Two adjacent free suballocations are invalid. They should be merged.
8859  VMA_VALIDATE(!prevFree || !currFree);
8860 
8861  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8862 
8863  if(currFree)
8864  {
8865  calculatedSumFreeSize += subAlloc.size;
8866  ++calculatedFreeCount;
8867  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8868  {
8869  ++freeSuballocationsToRegister;
8870  }
8871 
8872  // Margin required between allocations - every free space must be at least that large.
8873  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8874  }
8875  else
8876  {
8877  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8878  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8879 
8880  // Margin required between allocations - previous allocation must be free.
8881  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8882  }
8883 
8884  calculatedOffset += subAlloc.size;
8885  prevFree = currFree;
8886  }
8887 
8888  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8889  // match expected one.
8890  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8891 
8892  VkDeviceSize lastSize = 0;
8893  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8894  {
8895  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8896 
8897  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8898  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8899  // They must be sorted by size ascending.
8900  VMA_VALIDATE(suballocItem->size >= lastSize);
8901 
8902  lastSize = suballocItem->size;
8903  }
8904 
8905  // Check if totals match calculacted values.
8906  VMA_VALIDATE(ValidateFreeSuballocationList());
8907  VMA_VALIDATE(calculatedOffset == GetSize());
8908  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8909  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8910 
8911  return true;
8912 }
8913 
8914 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8915 {
8916  if(!m_FreeSuballocationsBySize.empty())
8917  {
8918  return m_FreeSuballocationsBySize.back()->size;
8919  }
8920  else
8921  {
8922  return 0;
8923  }
8924 }
8925 
8926 bool VmaBlockMetadata_Generic::IsEmpty() const
8927 {
8928  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8929 }
8930 
8931 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8932 {
8933  outInfo.blockCount = 1;
8934 
8935  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8936  outInfo.allocationCount = rangeCount - m_FreeCount;
8937  outInfo.unusedRangeCount = m_FreeCount;
8938 
8939  outInfo.unusedBytes = m_SumFreeSize;
8940  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8941 
8942  outInfo.allocationSizeMin = UINT64_MAX;
8943  outInfo.allocationSizeMax = 0;
8944  outInfo.unusedRangeSizeMin = UINT64_MAX;
8945  outInfo.unusedRangeSizeMax = 0;
8946 
8947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8948  suballocItem != m_Suballocations.cend();
8949  ++suballocItem)
8950  {
8951  const VmaSuballocation& suballoc = *suballocItem;
8952  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8953  {
8954  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8955  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8956  }
8957  else
8958  {
8959  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8960  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8961  }
8962  }
8963 }
8964 
8965 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8966 {
8967  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8968 
8969  inoutStats.size += GetSize();
8970  inoutStats.unusedSize += m_SumFreeSize;
8971  inoutStats.allocationCount += rangeCount - m_FreeCount;
8972  inoutStats.unusedRangeCount += m_FreeCount;
8973  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8974 }
8975 
8976 #if VMA_STATS_STRING_ENABLED
8977 
8978 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8979 {
8980  PrintDetailedMap_Begin(json,
8981  m_SumFreeSize, // unusedBytes
8982  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8983  m_FreeCount); // unusedRangeCount
8984 
8985  size_t i = 0;
8986  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8987  suballocItem != m_Suballocations.cend();
8988  ++suballocItem, ++i)
8989  {
8990  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8991  {
8992  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8993  }
8994  else
8995  {
8996  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8997  }
8998  }
8999 
9000  PrintDetailedMap_End(json);
9001 }
9002 
9003 #endif // #if VMA_STATS_STRING_ENABLED
9004 
9005 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9006  uint32_t currentFrameIndex,
9007  uint32_t frameInUseCount,
9008  VkDeviceSize bufferImageGranularity,
9009  VkDeviceSize allocSize,
9010  VkDeviceSize allocAlignment,
9011  bool upperAddress,
9012  VmaSuballocationType allocType,
9013  bool canMakeOtherLost,
9014  uint32_t strategy,
9015  VmaAllocationRequest* pAllocationRequest)
9016 {
9017  VMA_ASSERT(allocSize > 0);
9018  VMA_ASSERT(!upperAddress);
9019  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9020  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9021  VMA_HEAVY_ASSERT(Validate());
9022 
9023  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9024 
9025  // There is not enough total free space in this block to fullfill the request: Early return.
9026  if(canMakeOtherLost == false &&
9027  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9028  {
9029  return false;
9030  }
9031 
9032  // New algorithm, efficiently searching freeSuballocationsBySize.
9033  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9034  if(freeSuballocCount > 0)
9035  {
9037  {
9038  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9039  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9040  m_FreeSuballocationsBySize.data(),
9041  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9042  allocSize + 2 * VMA_DEBUG_MARGIN,
9043  VmaSuballocationItemSizeLess());
9044  size_t index = it - m_FreeSuballocationsBySize.data();
9045  for(; index < freeSuballocCount; ++index)
9046  {
9047  if(CheckAllocation(
9048  currentFrameIndex,
9049  frameInUseCount,
9050  bufferImageGranularity,
9051  allocSize,
9052  allocAlignment,
9053  allocType,
9054  m_FreeSuballocationsBySize[index],
9055  false, // canMakeOtherLost
9056  &pAllocationRequest->offset,
9057  &pAllocationRequest->itemsToMakeLostCount,
9058  &pAllocationRequest->sumFreeSize,
9059  &pAllocationRequest->sumItemSize))
9060  {
9061  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9062  return true;
9063  }
9064  }
9065  }
9066  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9067  {
9068  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9069  it != m_Suballocations.end();
9070  ++it)
9071  {
9072  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9073  currentFrameIndex,
9074  frameInUseCount,
9075  bufferImageGranularity,
9076  allocSize,
9077  allocAlignment,
9078  allocType,
9079  it,
9080  false, // canMakeOtherLost
9081  &pAllocationRequest->offset,
9082  &pAllocationRequest->itemsToMakeLostCount,
9083  &pAllocationRequest->sumFreeSize,
9084  &pAllocationRequest->sumItemSize))
9085  {
9086  pAllocationRequest->item = it;
9087  return true;
9088  }
9089  }
9090  }
9091  else // WORST_FIT, FIRST_FIT
9092  {
9093  // Search staring from biggest suballocations.
9094  for(size_t index = freeSuballocCount; index--; )
9095  {
9096  if(CheckAllocation(
9097  currentFrameIndex,
9098  frameInUseCount,
9099  bufferImageGranularity,
9100  allocSize,
9101  allocAlignment,
9102  allocType,
9103  m_FreeSuballocationsBySize[index],
9104  false, // canMakeOtherLost
9105  &pAllocationRequest->offset,
9106  &pAllocationRequest->itemsToMakeLostCount,
9107  &pAllocationRequest->sumFreeSize,
9108  &pAllocationRequest->sumItemSize))
9109  {
9110  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9111  return true;
9112  }
9113  }
9114  }
9115  }
9116 
9117  if(canMakeOtherLost)
9118  {
9119  // Brute-force algorithm. TODO: Come up with something better.
9120 
9121  bool found = false;
9122  VmaAllocationRequest tmpAllocRequest = {};
9123  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9124  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9125  suballocIt != m_Suballocations.end();
9126  ++suballocIt)
9127  {
9128  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9129  suballocIt->hAllocation->CanBecomeLost())
9130  {
9131  if(CheckAllocation(
9132  currentFrameIndex,
9133  frameInUseCount,
9134  bufferImageGranularity,
9135  allocSize,
9136  allocAlignment,
9137  allocType,
9138  suballocIt,
9139  canMakeOtherLost,
9140  &tmpAllocRequest.offset,
9141  &tmpAllocRequest.itemsToMakeLostCount,
9142  &tmpAllocRequest.sumFreeSize,
9143  &tmpAllocRequest.sumItemSize))
9144  {
9146  {
9147  *pAllocationRequest = tmpAllocRequest;
9148  pAllocationRequest->item = suballocIt;
9149  break;
9150  }
9151  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9152  {
9153  *pAllocationRequest = tmpAllocRequest;
9154  pAllocationRequest->item = suballocIt;
9155  found = true;
9156  }
9157  }
9158  }
9159  }
9160 
9161  return found;
9162  }
9163 
9164  return false;
9165 }
9166 
9167 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9168  uint32_t currentFrameIndex,
9169  uint32_t frameInUseCount,
9170  VmaAllocationRequest* pAllocationRequest)
9171 {
9172  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9173 
9174  while(pAllocationRequest->itemsToMakeLostCount > 0)
9175  {
9176  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9177  {
9178  ++pAllocationRequest->item;
9179  }
9180  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9181  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9182  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9183  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9184  {
9185  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9186  --pAllocationRequest->itemsToMakeLostCount;
9187  }
9188  else
9189  {
9190  return false;
9191  }
9192  }
9193 
9194  VMA_HEAVY_ASSERT(Validate());
9195  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9196  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9197 
9198  return true;
9199 }
9200 
9201 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9202 {
9203  uint32_t lostAllocationCount = 0;
9204  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9205  it != m_Suballocations.end();
9206  ++it)
9207  {
9208  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9209  it->hAllocation->CanBecomeLost() &&
9210  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9211  {
9212  it = FreeSuballocation(it);
9213  ++lostAllocationCount;
9214  }
9215  }
9216  return lostAllocationCount;
9217 }
9218 
9219 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9220 {
9221  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9222  it != m_Suballocations.end();
9223  ++it)
9224  {
9225  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
9226  {
9227  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9228  {
9229  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9230  return VK_ERROR_VALIDATION_FAILED_EXT;
9231  }
9232  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9233  {
9234  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9235  return VK_ERROR_VALIDATION_FAILED_EXT;
9236  }
9237  }
9238  }
9239 
9240  return VK_SUCCESS;
9241 }
9242 
9243 void VmaBlockMetadata_Generic::Alloc(
9244  const VmaAllocationRequest& request,
9245  VmaSuballocationType type,
9246  VkDeviceSize allocSize,
9247  VmaAllocation hAllocation)
9248 {
9249  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9250  VMA_ASSERT(request.item != m_Suballocations.end());
9251  VmaSuballocation& suballoc = *request.item;
9252  // Given suballocation is a free block.
9253  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9254  // Given offset is inside this suballocation.
9255  VMA_ASSERT(request.offset >= suballoc.offset);
9256  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9257  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9258  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9259 
9260  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9261  // it to become used.
9262  UnregisterFreeSuballocation(request.item);
9263 
9264  suballoc.offset = request.offset;
9265  suballoc.size = allocSize;
9266  suballoc.type = type;
9267  suballoc.hAllocation = hAllocation;
9268 
9269  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9270  if(paddingEnd)
9271  {
9272  VmaSuballocation paddingSuballoc = {};
9273  paddingSuballoc.offset = request.offset + allocSize;
9274  paddingSuballoc.size = paddingEnd;
9275  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9276  VmaSuballocationList::iterator next = request.item;
9277  ++next;
9278  const VmaSuballocationList::iterator paddingEndItem =
9279  m_Suballocations.insert(next, paddingSuballoc);
9280  RegisterFreeSuballocation(paddingEndItem);
9281  }
9282 
9283  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9284  if(paddingBegin)
9285  {
9286  VmaSuballocation paddingSuballoc = {};
9287  paddingSuballoc.offset = request.offset - paddingBegin;
9288  paddingSuballoc.size = paddingBegin;
9289  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9290  const VmaSuballocationList::iterator paddingBeginItem =
9291  m_Suballocations.insert(request.item, paddingSuballoc);
9292  RegisterFreeSuballocation(paddingBeginItem);
9293  }
9294 
9295  // Update totals.
9296  m_FreeCount = m_FreeCount - 1;
9297  if(paddingBegin > 0)
9298  {
9299  ++m_FreeCount;
9300  }
9301  if(paddingEnd > 0)
9302  {
9303  ++m_FreeCount;
9304  }
9305  m_SumFreeSize -= allocSize;
9306 }
9307 
9308 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9309 {
9310  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9311  suballocItem != m_Suballocations.end();
9312  ++suballocItem)
9313  {
9314  VmaSuballocation& suballoc = *suballocItem;
9315  if(suballoc.hAllocation == allocation)
9316  {
9317  FreeSuballocation(suballocItem);
9318  VMA_HEAVY_ASSERT(Validate());
9319  return;
9320  }
9321  }
9322  VMA_ASSERT(0 && "Not found!");
9323 }
9324 
9325 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9326 {
9327  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9328  suballocItem != m_Suballocations.end();
9329  ++suballocItem)
9330  {
9331  VmaSuballocation& suballoc = *suballocItem;
9332  if(suballoc.offset == offset)
9333  {
9334  FreeSuballocation(suballocItem);
9335  return;
9336  }
9337  }
9338  VMA_ASSERT(0 && "Not found!");
9339 }
9340 
9341 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9342 {
9343  VkDeviceSize lastSize = 0;
9344  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9345  {
9346  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9347 
9348  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9349  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9350  VMA_VALIDATE(it->size >= lastSize);
9351  lastSize = it->size;
9352  }
9353  return true;
9354 }
9355 
9356 bool VmaBlockMetadata_Generic::CheckAllocation(
9357  uint32_t currentFrameIndex,
9358  uint32_t frameInUseCount,
9359  VkDeviceSize bufferImageGranularity,
9360  VkDeviceSize allocSize,
9361  VkDeviceSize allocAlignment,
9362  VmaSuballocationType allocType,
9363  VmaSuballocationList::const_iterator suballocItem,
9364  bool canMakeOtherLost,
9365  VkDeviceSize* pOffset,
9366  size_t* itemsToMakeLostCount,
9367  VkDeviceSize* pSumFreeSize,
9368  VkDeviceSize* pSumItemSize) const
9369 {
9370  VMA_ASSERT(allocSize > 0);
9371  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9372  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9373  VMA_ASSERT(pOffset != VMA_NULL);
9374 
9375  *itemsToMakeLostCount = 0;
9376  *pSumFreeSize = 0;
9377  *pSumItemSize = 0;
9378 
9379  if(canMakeOtherLost)
9380  {
9381  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9382  {
9383  *pSumFreeSize = suballocItem->size;
9384  }
9385  else
9386  {
9387  if(suballocItem->hAllocation->CanBecomeLost() &&
9388  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9389  {
9390  ++*itemsToMakeLostCount;
9391  *pSumItemSize = suballocItem->size;
9392  }
9393  else
9394  {
9395  return false;
9396  }
9397  }
9398 
9399  // Remaining size is too small for this request: Early return.
9400  if(GetSize() - suballocItem->offset < allocSize)
9401  {
9402  return false;
9403  }
9404 
9405  // Start from offset equal to beginning of this suballocation.
9406  *pOffset = suballocItem->offset;
9407 
9408  // Apply VMA_DEBUG_MARGIN at the beginning.
9409  if(VMA_DEBUG_MARGIN > 0)
9410  {
9411  *pOffset += VMA_DEBUG_MARGIN;
9412  }
9413 
9414  // Apply alignment.
9415  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9416 
9417  // Check previous suballocations for BufferImageGranularity conflicts.
9418  // Make bigger alignment if necessary.
9419  if(bufferImageGranularity > 1)
9420  {
9421  bool bufferImageGranularityConflict = false;
9422  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9423  while(prevSuballocItem != m_Suballocations.cbegin())
9424  {
9425  --prevSuballocItem;
9426  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9427  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9428  {
9429  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9430  {
9431  bufferImageGranularityConflict = true;
9432  break;
9433  }
9434  }
9435  else
9436  // Already on previous page.
9437  break;
9438  }
9439  if(bufferImageGranularityConflict)
9440  {
9441  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9442  }
9443  }
9444 
9445  // Now that we have final *pOffset, check if we are past suballocItem.
9446  // If yes, return false - this function should be called for another suballocItem as starting point.
9447  if(*pOffset >= suballocItem->offset + suballocItem->size)
9448  {
9449  return false;
9450  }
9451 
9452  // Calculate padding at the beginning based on current offset.
9453  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9454 
9455  // Calculate required margin at the end.
9456  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9457 
9458  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9459  // Another early return check.
9460  if(suballocItem->offset + totalSize > GetSize())
9461  {
9462  return false;
9463  }
9464 
9465  // Advance lastSuballocItem until desired size is reached.
9466  // Update itemsToMakeLostCount.
9467  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9468  if(totalSize > suballocItem->size)
9469  {
9470  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9471  while(remainingSize > 0)
9472  {
9473  ++lastSuballocItem;
9474  if(lastSuballocItem == m_Suballocations.cend())
9475  {
9476  return false;
9477  }
9478  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9479  {
9480  *pSumFreeSize += lastSuballocItem->size;
9481  }
9482  else
9483  {
9484  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9485  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9486  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9487  {
9488  ++*itemsToMakeLostCount;
9489  *pSumItemSize += lastSuballocItem->size;
9490  }
9491  else
9492  {
9493  return false;
9494  }
9495  }
9496  remainingSize = (lastSuballocItem->size < remainingSize) ?
9497  remainingSize - lastSuballocItem->size : 0;
9498  }
9499  }
9500 
9501  // Check next suballocations for BufferImageGranularity conflicts.
9502  // If conflict exists, we must mark more allocations lost or fail.
9503  if(bufferImageGranularity > 1)
9504  {
9505  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9506  ++nextSuballocItem;
9507  while(nextSuballocItem != m_Suballocations.cend())
9508  {
9509  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9510  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9511  {
9512  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9513  {
9514  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9515  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9516  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9517  {
9518  ++*itemsToMakeLostCount;
9519  }
9520  else
9521  {
9522  return false;
9523  }
9524  }
9525  }
9526  else
9527  {
9528  // Already on next page.
9529  break;
9530  }
9531  ++nextSuballocItem;
9532  }
9533  }
9534  }
9535  else
9536  {
9537  const VmaSuballocation& suballoc = *suballocItem;
9538  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9539 
9540  *pSumFreeSize = suballoc.size;
9541 
9542  // Size of this suballocation is too small for this request: Early return.
9543  if(suballoc.size < allocSize)
9544  {
9545  return false;
9546  }
9547 
9548  // Start from offset equal to beginning of this suballocation.
9549  *pOffset = suballoc.offset;
9550 
9551  // Apply VMA_DEBUG_MARGIN at the beginning.
9552  if(VMA_DEBUG_MARGIN > 0)
9553  {
9554  *pOffset += VMA_DEBUG_MARGIN;
9555  }
9556 
9557  // Apply alignment.
9558  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9559 
9560  // Check previous suballocations for BufferImageGranularity conflicts.
9561  // Make bigger alignment if necessary.
9562  if(bufferImageGranularity > 1)
9563  {
9564  bool bufferImageGranularityConflict = false;
9565  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9566  while(prevSuballocItem != m_Suballocations.cbegin())
9567  {
9568  --prevSuballocItem;
9569  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9570  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9571  {
9572  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9573  {
9574  bufferImageGranularityConflict = true;
9575  break;
9576  }
9577  }
9578  else
9579  // Already on previous page.
9580  break;
9581  }
9582  if(bufferImageGranularityConflict)
9583  {
9584  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9585  }
9586  }
9587 
9588  // Calculate padding at the beginning based on current offset.
9589  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9590 
9591  // Calculate required margin at the end.
9592  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9593 
9594  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9595  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9596  {
9597  return false;
9598  }
9599 
9600  // Check next suballocations for BufferImageGranularity conflicts.
9601  // If conflict exists, allocation cannot be made here.
9602  if(bufferImageGranularity > 1)
9603  {
9604  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9605  ++nextSuballocItem;
9606  while(nextSuballocItem != m_Suballocations.cend())
9607  {
9608  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9609  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9610  {
9611  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9612  {
9613  return false;
9614  }
9615  }
9616  else
9617  {
9618  // Already on next page.
9619  break;
9620  }
9621  ++nextSuballocItem;
9622  }
9623  }
9624  }
9625 
9626  // All tests passed: Success. pOffset is already filled.
9627  return true;
9628 }
9629 
9630 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9631 {
9632  VMA_ASSERT(item != m_Suballocations.end());
9633  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9634 
9635  VmaSuballocationList::iterator nextItem = item;
9636  ++nextItem;
9637  VMA_ASSERT(nextItem != m_Suballocations.end());
9638  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9639 
9640  item->size += nextItem->size;
9641  --m_FreeCount;
9642  m_Suballocations.erase(nextItem);
9643 }
9644 
9645 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9646 {
9647  // Change this suballocation to be marked as free.
9648  VmaSuballocation& suballoc = *suballocItem;
9649  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9650  suballoc.hAllocation = VK_NULL_HANDLE;
9651 
9652  // Update totals.
9653  ++m_FreeCount;
9654  m_SumFreeSize += suballoc.size;
9655 
9656  // Merge with previous and/or next suballocation if it's also free.
9657  bool mergeWithNext = false;
9658  bool mergeWithPrev = false;
9659 
9660  VmaSuballocationList::iterator nextItem = suballocItem;
9661  ++nextItem;
9662  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9663  {
9664  mergeWithNext = true;
9665  }
9666 
9667  VmaSuballocationList::iterator prevItem = suballocItem;
9668  if(suballocItem != m_Suballocations.begin())
9669  {
9670  --prevItem;
9671  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9672  {
9673  mergeWithPrev = true;
9674  }
9675  }
9676 
9677  if(mergeWithNext)
9678  {
9679  UnregisterFreeSuballocation(nextItem);
9680  MergeFreeWithNext(suballocItem);
9681  }
9682 
9683  if(mergeWithPrev)
9684  {
9685  UnregisterFreeSuballocation(prevItem);
9686  MergeFreeWithNext(prevItem);
9687  RegisterFreeSuballocation(prevItem);
9688  return prevItem;
9689  }
9690  else
9691  {
9692  RegisterFreeSuballocation(suballocItem);
9693  return suballocItem;
9694  }
9695 }
9696 
9697 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9698 {
9699  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9700  VMA_ASSERT(item->size > 0);
9701 
9702  // You may want to enable this validation at the beginning or at the end of
9703  // this function, depending on what do you want to check.
9704  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9705 
9706  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9707  {
9708  if(m_FreeSuballocationsBySize.empty())
9709  {
9710  m_FreeSuballocationsBySize.push_back(item);
9711  }
9712  else
9713  {
9714  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9715  }
9716  }
9717 
9718  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9719 }
9720 
9721 
9722 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9723 {
9724  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9725  VMA_ASSERT(item->size > 0);
9726 
9727  // You may want to enable this validation at the beginning or at the end of
9728  // this function, depending on what do you want to check.
9729  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9730 
9731  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9732  {
9733  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9734  m_FreeSuballocationsBySize.data(),
9735  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9736  item,
9737  VmaSuballocationItemSizeLess());
9738  for(size_t index = it - m_FreeSuballocationsBySize.data();
9739  index < m_FreeSuballocationsBySize.size();
9740  ++index)
9741  {
9742  if(m_FreeSuballocationsBySize[index] == item)
9743  {
9744  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9745  return;
9746  }
9747  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9748  }
9749  VMA_ASSERT(0 && "Not found.");
9750  }
9751 
9752  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9753 }
9754 
9755 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9756  VkDeviceSize bufferImageGranularity,
9757  VmaSuballocationType& inOutPrevSuballocType) const
9758 {
9759  if(bufferImageGranularity == 1 || IsEmpty())
9760  {
9761  return false;
9762  }
9763 
9764  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9765  bool typeConflictFound = false;
9766  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9767  it != m_Suballocations.cend();
9768  ++it)
9769  {
9770  const VmaSuballocationType suballocType = it->type;
9771  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9772  {
9773  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9774  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9775  {
9776  typeConflictFound = true;
9777  }
9778  inOutPrevSuballocType = suballocType;
9779  }
9780  }
9781 
9782  return typeConflictFound || minAlignment >= bufferImageGranularity;
9783 }
9784 
9786 // class VmaBlockMetadata_Linear
9787 
9788 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9789  VmaBlockMetadata(hAllocator),
9790  m_SumFreeSize(0),
9791  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9792  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9793  m_1stVectorIndex(0),
9794  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9795  m_1stNullItemsBeginCount(0),
9796  m_1stNullItemsMiddleCount(0),
9797  m_2ndNullItemsCount(0)
9798 {
9799 }
9800 
9801 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9802 {
9803 }
9804 
9805 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9806 {
9807  VmaBlockMetadata::Init(size);
9808  m_SumFreeSize = size;
9809 }
9810 
9811 bool VmaBlockMetadata_Linear::Validate() const
9812 {
9813  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9814  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9815 
9816  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9817  VMA_VALIDATE(!suballocations1st.empty() ||
9818  suballocations2nd.empty() ||
9819  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9820 
9821  if(!suballocations1st.empty())
9822  {
9823  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9824  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9825  // Null item at the end should be just pop_back().
9826  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9827  }
9828  if(!suballocations2nd.empty())
9829  {
9830  // Null item at the end should be just pop_back().
9831  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9832  }
9833 
9834  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9835  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9836 
9837  VkDeviceSize sumUsedSize = 0;
9838  const size_t suballoc1stCount = suballocations1st.size();
9839  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9840 
9841  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9842  {
9843  const size_t suballoc2ndCount = suballocations2nd.size();
9844  size_t nullItem2ndCount = 0;
9845  for(size_t i = 0; i < suballoc2ndCount; ++i)
9846  {
9847  const VmaSuballocation& suballoc = suballocations2nd[i];
9848  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9849 
9850  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9851  VMA_VALIDATE(suballoc.offset >= offset);
9852 
9853  if(!currFree)
9854  {
9855  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9856  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9857  sumUsedSize += suballoc.size;
9858  }
9859  else
9860  {
9861  ++nullItem2ndCount;
9862  }
9863 
9864  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9865  }
9866 
9867  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9868  }
9869 
9870  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9871  {
9872  const VmaSuballocation& suballoc = suballocations1st[i];
9873  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9874  suballoc.hAllocation == VK_NULL_HANDLE);
9875  }
9876 
9877  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9878 
9879  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9880  {
9881  const VmaSuballocation& suballoc = suballocations1st[i];
9882  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9883 
9884  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9885  VMA_VALIDATE(suballoc.offset >= offset);
9886  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9887 
9888  if(!currFree)
9889  {
9890  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9891  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9892  sumUsedSize += suballoc.size;
9893  }
9894  else
9895  {
9896  ++nullItem1stCount;
9897  }
9898 
9899  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9900  }
9901  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9902 
9903  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9904  {
9905  const size_t suballoc2ndCount = suballocations2nd.size();
9906  size_t nullItem2ndCount = 0;
9907  for(size_t i = suballoc2ndCount; i--; )
9908  {
9909  const VmaSuballocation& suballoc = suballocations2nd[i];
9910  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9911 
9912  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9913  VMA_VALIDATE(suballoc.offset >= offset);
9914 
9915  if(!currFree)
9916  {
9917  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9918  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9919  sumUsedSize += suballoc.size;
9920  }
9921  else
9922  {
9923  ++nullItem2ndCount;
9924  }
9925 
9926  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9927  }
9928 
9929  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9930  }
9931 
9932  VMA_VALIDATE(offset <= GetSize());
9933  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9934 
9935  return true;
9936 }
9937 
9938 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9939 {
9940  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9941  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9942 }
9943 
9944 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9945 {
9946  const VkDeviceSize size = GetSize();
9947 
9948  /*
9949  We don't consider gaps inside allocation vectors with freed allocations because
9950  they are not suitable for reuse in linear allocator. We consider only space that
9951  is available for new allocations.
9952  */
9953  if(IsEmpty())
9954  {
9955  return size;
9956  }
9957 
9958  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9959 
9960  switch(m_2ndVectorMode)
9961  {
9962  case SECOND_VECTOR_EMPTY:
9963  /*
9964  Available space is after end of 1st, as well as before beginning of 1st (which
9965  whould make it a ring buffer).
9966  */
9967  {
9968  const size_t suballocations1stCount = suballocations1st.size();
9969  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9970  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9971  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9972  return VMA_MAX(
9973  firstSuballoc.offset,
9974  size - (lastSuballoc.offset + lastSuballoc.size));
9975  }
9976  break;
9977 
9978  case SECOND_VECTOR_RING_BUFFER:
9979  /*
9980  Available space is only between end of 2nd and beginning of 1st.
9981  */
9982  {
9983  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9984  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9985  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9986  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9987  }
9988  break;
9989 
9990  case SECOND_VECTOR_DOUBLE_STACK:
9991  /*
9992  Available space is only between end of 1st and top of 2nd.
9993  */
9994  {
9995  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9996  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9997  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9998  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9999  }
10000  break;
10001 
10002  default:
10003  VMA_ASSERT(0);
10004  return 0;
10005  }
10006 }
10007 
10008 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10009 {
10010  const VkDeviceSize size = GetSize();
10011  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10013  const size_t suballoc1stCount = suballocations1st.size();
10014  const size_t suballoc2ndCount = suballocations2nd.size();
10015 
10016  outInfo.blockCount = 1;
10017  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10018  outInfo.unusedRangeCount = 0;
10019  outInfo.usedBytes = 0;
10020  outInfo.allocationSizeMin = UINT64_MAX;
10021  outInfo.allocationSizeMax = 0;
10022  outInfo.unusedRangeSizeMin = UINT64_MAX;
10023  outInfo.unusedRangeSizeMax = 0;
10024 
10025  VkDeviceSize lastOffset = 0;
10026 
10027  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10028  {
10029  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10030  size_t nextAlloc2ndIndex = 0;
10031  while(lastOffset < freeSpace2ndTo1stEnd)
10032  {
10033  // Find next non-null allocation or move nextAllocIndex to the end.
10034  while(nextAlloc2ndIndex < suballoc2ndCount &&
10035  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10036  {
10037  ++nextAlloc2ndIndex;
10038  }
10039 
10040  // Found non-null allocation.
10041  if(nextAlloc2ndIndex < suballoc2ndCount)
10042  {
10043  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10044 
10045  // 1. Process free space before this allocation.
10046  if(lastOffset < suballoc.offset)
10047  {
10048  // There is free space from lastOffset to suballoc.offset.
10049  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10050  ++outInfo.unusedRangeCount;
10051  outInfo.unusedBytes += unusedRangeSize;
10052  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10053  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10054  }
10055 
10056  // 2. Process this allocation.
10057  // There is allocation with suballoc.offset, suballoc.size.
10058  outInfo.usedBytes += suballoc.size;
10059  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10060  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10061 
10062  // 3. Prepare for next iteration.
10063  lastOffset = suballoc.offset + suballoc.size;
10064  ++nextAlloc2ndIndex;
10065  }
10066  // We are at the end.
10067  else
10068  {
10069  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10070  if(lastOffset < freeSpace2ndTo1stEnd)
10071  {
10072  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10073  ++outInfo.unusedRangeCount;
10074  outInfo.unusedBytes += unusedRangeSize;
10075  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10076  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10077  }
10078 
10079  // End of loop.
10080  lastOffset = freeSpace2ndTo1stEnd;
10081  }
10082  }
10083  }
10084 
10085  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10086  const VkDeviceSize freeSpace1stTo2ndEnd =
10087  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10088  while(lastOffset < freeSpace1stTo2ndEnd)
10089  {
10090  // Find next non-null allocation or move nextAllocIndex to the end.
10091  while(nextAlloc1stIndex < suballoc1stCount &&
10092  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10093  {
10094  ++nextAlloc1stIndex;
10095  }
10096 
10097  // Found non-null allocation.
10098  if(nextAlloc1stIndex < suballoc1stCount)
10099  {
10100  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10101 
10102  // 1. Process free space before this allocation.
10103  if(lastOffset < suballoc.offset)
10104  {
10105  // There is free space from lastOffset to suballoc.offset.
10106  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10107  ++outInfo.unusedRangeCount;
10108  outInfo.unusedBytes += unusedRangeSize;
10109  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10110  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10111  }
10112 
10113  // 2. Process this allocation.
10114  // There is allocation with suballoc.offset, suballoc.size.
10115  outInfo.usedBytes += suballoc.size;
10116  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10117  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10118 
10119  // 3. Prepare for next iteration.
10120  lastOffset = suballoc.offset + suballoc.size;
10121  ++nextAlloc1stIndex;
10122  }
10123  // We are at the end.
10124  else
10125  {
10126  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10127  if(lastOffset < freeSpace1stTo2ndEnd)
10128  {
10129  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10130  ++outInfo.unusedRangeCount;
10131  outInfo.unusedBytes += unusedRangeSize;
10132  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10133  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10134  }
10135 
10136  // End of loop.
10137  lastOffset = freeSpace1stTo2ndEnd;
10138  }
10139  }
10140 
10141  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10142  {
10143  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10144  while(lastOffset < size)
10145  {
10146  // Find next non-null allocation or move nextAllocIndex to the end.
10147  while(nextAlloc2ndIndex != SIZE_MAX &&
10148  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10149  {
10150  --nextAlloc2ndIndex;
10151  }
10152 
10153  // Found non-null allocation.
10154  if(nextAlloc2ndIndex != SIZE_MAX)
10155  {
10156  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10157 
10158  // 1. Process free space before this allocation.
10159  if(lastOffset < suballoc.offset)
10160  {
10161  // There is free space from lastOffset to suballoc.offset.
10162  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10163  ++outInfo.unusedRangeCount;
10164  outInfo.unusedBytes += unusedRangeSize;
10165  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10166  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10167  }
10168 
10169  // 2. Process this allocation.
10170  // There is allocation with suballoc.offset, suballoc.size.
10171  outInfo.usedBytes += suballoc.size;
10172  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10173  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10174 
10175  // 3. Prepare for next iteration.
10176  lastOffset = suballoc.offset + suballoc.size;
10177  --nextAlloc2ndIndex;
10178  }
10179  // We are at the end.
10180  else
10181  {
10182  // There is free space from lastOffset to size.
10183  if(lastOffset < size)
10184  {
10185  const VkDeviceSize unusedRangeSize = size - lastOffset;
10186  ++outInfo.unusedRangeCount;
10187  outInfo.unusedBytes += unusedRangeSize;
10188  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10189  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10190  }
10191 
10192  // End of loop.
10193  lastOffset = size;
10194  }
10195  }
10196  }
10197 
10198  outInfo.unusedBytes = size - outInfo.usedBytes;
10199 }
10200 
10201 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10202 {
10203  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10204  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10205  const VkDeviceSize size = GetSize();
10206  const size_t suballoc1stCount = suballocations1st.size();
10207  const size_t suballoc2ndCount = suballocations2nd.size();
10208 
10209  inoutStats.size += size;
10210 
10211  VkDeviceSize lastOffset = 0;
10212 
10213  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10214  {
10215  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10216  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10217  while(lastOffset < freeSpace2ndTo1stEnd)
10218  {
10219  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10220  while(nextAlloc2ndIndex < suballoc2ndCount &&
10221  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10222  {
10223  ++nextAlloc2ndIndex;
10224  }
10225 
10226  // Found non-null allocation.
10227  if(nextAlloc2ndIndex < suballoc2ndCount)
10228  {
10229  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10230 
10231  // 1. Process free space before this allocation.
10232  if(lastOffset < suballoc.offset)
10233  {
10234  // There is free space from lastOffset to suballoc.offset.
10235  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10236  inoutStats.unusedSize += unusedRangeSize;
10237  ++inoutStats.unusedRangeCount;
10238  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10239  }
10240 
10241  // 2. Process this allocation.
10242  // There is allocation with suballoc.offset, suballoc.size.
10243  ++inoutStats.allocationCount;
10244 
10245  // 3. Prepare for next iteration.
10246  lastOffset = suballoc.offset + suballoc.size;
10247  ++nextAlloc2ndIndex;
10248  }
10249  // We are at the end.
10250  else
10251  {
10252  if(lastOffset < freeSpace2ndTo1stEnd)
10253  {
10254  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10255  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10256  inoutStats.unusedSize += unusedRangeSize;
10257  ++inoutStats.unusedRangeCount;
10258  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10259  }
10260 
10261  // End of loop.
10262  lastOffset = freeSpace2ndTo1stEnd;
10263  }
10264  }
10265  }
10266 
10267  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10268  const VkDeviceSize freeSpace1stTo2ndEnd =
10269  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10270  while(lastOffset < freeSpace1stTo2ndEnd)
10271  {
10272  // Find next non-null allocation or move nextAllocIndex to the end.
10273  while(nextAlloc1stIndex < suballoc1stCount &&
10274  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10275  {
10276  ++nextAlloc1stIndex;
10277  }
10278 
10279  // Found non-null allocation.
10280  if(nextAlloc1stIndex < suballoc1stCount)
10281  {
10282  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10283 
10284  // 1. Process free space before this allocation.
10285  if(lastOffset < suballoc.offset)
10286  {
10287  // There is free space from lastOffset to suballoc.offset.
10288  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10289  inoutStats.unusedSize += unusedRangeSize;
10290  ++inoutStats.unusedRangeCount;
10291  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10292  }
10293 
10294  // 2. Process this allocation.
10295  // There is allocation with suballoc.offset, suballoc.size.
10296  ++inoutStats.allocationCount;
10297 
10298  // 3. Prepare for next iteration.
10299  lastOffset = suballoc.offset + suballoc.size;
10300  ++nextAlloc1stIndex;
10301  }
10302  // We are at the end.
10303  else
10304  {
10305  if(lastOffset < freeSpace1stTo2ndEnd)
10306  {
10307  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10308  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10309  inoutStats.unusedSize += unusedRangeSize;
10310  ++inoutStats.unusedRangeCount;
10311  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10312  }
10313 
10314  // End of loop.
10315  lastOffset = freeSpace1stTo2ndEnd;
10316  }
10317  }
10318 
10319  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10320  {
10321  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10322  while(lastOffset < size)
10323  {
10324  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10325  while(nextAlloc2ndIndex != SIZE_MAX &&
10326  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10327  {
10328  --nextAlloc2ndIndex;
10329  }
10330 
10331  // Found non-null allocation.
10332  if(nextAlloc2ndIndex != SIZE_MAX)
10333  {
10334  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10335 
10336  // 1. Process free space before this allocation.
10337  if(lastOffset < suballoc.offset)
10338  {
10339  // There is free space from lastOffset to suballoc.offset.
10340  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10341  inoutStats.unusedSize += unusedRangeSize;
10342  ++inoutStats.unusedRangeCount;
10343  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10344  }
10345 
10346  // 2. Process this allocation.
10347  // There is allocation with suballoc.offset, suballoc.size.
10348  ++inoutStats.allocationCount;
10349 
10350  // 3. Prepare for next iteration.
10351  lastOffset = suballoc.offset + suballoc.size;
10352  --nextAlloc2ndIndex;
10353  }
10354  // We are at the end.
10355  else
10356  {
10357  if(lastOffset < size)
10358  {
10359  // There is free space from lastOffset to size.
10360  const VkDeviceSize unusedRangeSize = size - lastOffset;
10361  inoutStats.unusedSize += unusedRangeSize;
10362  ++inoutStats.unusedRangeCount;
10363  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10364  }
10365 
10366  // End of loop.
10367  lastOffset = size;
10368  }
10369  }
10370  }
10371 }
10372 
10373 #if VMA_STATS_STRING_ENABLED
10374 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10375 {
10376  const VkDeviceSize size = GetSize();
10377  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10378  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10379  const size_t suballoc1stCount = suballocations1st.size();
10380  const size_t suballoc2ndCount = suballocations2nd.size();
10381 
10382  // FIRST PASS
10383 
10384  size_t unusedRangeCount = 0;
10385  VkDeviceSize usedBytes = 0;
10386 
10387  VkDeviceSize lastOffset = 0;
10388 
10389  size_t alloc2ndCount = 0;
10390  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10391  {
10392  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10393  size_t nextAlloc2ndIndex = 0;
10394  while(lastOffset < freeSpace2ndTo1stEnd)
10395  {
10396  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10397  while(nextAlloc2ndIndex < suballoc2ndCount &&
10398  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10399  {
10400  ++nextAlloc2ndIndex;
10401  }
10402 
10403  // Found non-null allocation.
10404  if(nextAlloc2ndIndex < suballoc2ndCount)
10405  {
10406  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10407 
10408  // 1. Process free space before this allocation.
10409  if(lastOffset < suballoc.offset)
10410  {
10411  // There is free space from lastOffset to suballoc.offset.
10412  ++unusedRangeCount;
10413  }
10414 
10415  // 2. Process this allocation.
10416  // There is allocation with suballoc.offset, suballoc.size.
10417  ++alloc2ndCount;
10418  usedBytes += suballoc.size;
10419 
10420  // 3. Prepare for next iteration.
10421  lastOffset = suballoc.offset + suballoc.size;
10422  ++nextAlloc2ndIndex;
10423  }
10424  // We are at the end.
10425  else
10426  {
10427  if(lastOffset < freeSpace2ndTo1stEnd)
10428  {
10429  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10430  ++unusedRangeCount;
10431  }
10432 
10433  // End of loop.
10434  lastOffset = freeSpace2ndTo1stEnd;
10435  }
10436  }
10437  }
10438 
10439  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10440  size_t alloc1stCount = 0;
10441  const VkDeviceSize freeSpace1stTo2ndEnd =
10442  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10443  while(lastOffset < freeSpace1stTo2ndEnd)
10444  {
10445  // Find next non-null allocation or move nextAllocIndex to the end.
10446  while(nextAlloc1stIndex < suballoc1stCount &&
10447  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10448  {
10449  ++nextAlloc1stIndex;
10450  }
10451 
10452  // Found non-null allocation.
10453  if(nextAlloc1stIndex < suballoc1stCount)
10454  {
10455  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10456 
10457  // 1. Process free space before this allocation.
10458  if(lastOffset < suballoc.offset)
10459  {
10460  // There is free space from lastOffset to suballoc.offset.
10461  ++unusedRangeCount;
10462  }
10463 
10464  // 2. Process this allocation.
10465  // There is allocation with suballoc.offset, suballoc.size.
10466  ++alloc1stCount;
10467  usedBytes += suballoc.size;
10468 
10469  // 3. Prepare for next iteration.
10470  lastOffset = suballoc.offset + suballoc.size;
10471  ++nextAlloc1stIndex;
10472  }
10473  // We are at the end.
10474  else
10475  {
10476  if(lastOffset < size)
10477  {
10478  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10479  ++unusedRangeCount;
10480  }
10481 
10482  // End of loop.
10483  lastOffset = freeSpace1stTo2ndEnd;
10484  }
10485  }
10486 
10487  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10488  {
10489  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10490  while(lastOffset < size)
10491  {
10492  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10493  while(nextAlloc2ndIndex != SIZE_MAX &&
10494  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10495  {
10496  --nextAlloc2ndIndex;
10497  }
10498 
10499  // Found non-null allocation.
10500  if(nextAlloc2ndIndex != SIZE_MAX)
10501  {
10502  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10503 
10504  // 1. Process free space before this allocation.
10505  if(lastOffset < suballoc.offset)
10506  {
10507  // There is free space from lastOffset to suballoc.offset.
10508  ++unusedRangeCount;
10509  }
10510 
10511  // 2. Process this allocation.
10512  // There is allocation with suballoc.offset, suballoc.size.
10513  ++alloc2ndCount;
10514  usedBytes += suballoc.size;
10515 
10516  // 3. Prepare for next iteration.
10517  lastOffset = suballoc.offset + suballoc.size;
10518  --nextAlloc2ndIndex;
10519  }
10520  // We are at the end.
10521  else
10522  {
10523  if(lastOffset < size)
10524  {
10525  // There is free space from lastOffset to size.
10526  ++unusedRangeCount;
10527  }
10528 
10529  // End of loop.
10530  lastOffset = size;
10531  }
10532  }
10533  }
10534 
10535  const VkDeviceSize unusedBytes = size - usedBytes;
10536  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10537 
10538  // SECOND PASS
10539  lastOffset = 0;
10540 
10541  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10542  {
10543  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10544  size_t nextAlloc2ndIndex = 0;
10545  while(lastOffset < freeSpace2ndTo1stEnd)
10546  {
10547  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10548  while(nextAlloc2ndIndex < suballoc2ndCount &&
10549  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10550  {
10551  ++nextAlloc2ndIndex;
10552  }
10553 
10554  // Found non-null allocation.
10555  if(nextAlloc2ndIndex < suballoc2ndCount)
10556  {
10557  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10558 
10559  // 1. Process free space before this allocation.
10560  if(lastOffset < suballoc.offset)
10561  {
10562  // There is free space from lastOffset to suballoc.offset.
10563  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10564  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10565  }
10566 
10567  // 2. Process this allocation.
10568  // There is allocation with suballoc.offset, suballoc.size.
10569  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10570 
10571  // 3. Prepare for next iteration.
10572  lastOffset = suballoc.offset + suballoc.size;
10573  ++nextAlloc2ndIndex;
10574  }
10575  // We are at the end.
10576  else
10577  {
10578  if(lastOffset < freeSpace2ndTo1stEnd)
10579  {
10580  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10581  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10582  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10583  }
10584 
10585  // End of loop.
10586  lastOffset = freeSpace2ndTo1stEnd;
10587  }
10588  }
10589  }
10590 
10591  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10592  while(lastOffset < freeSpace1stTo2ndEnd)
10593  {
10594  // Find next non-null allocation or move nextAllocIndex to the end.
10595  while(nextAlloc1stIndex < suballoc1stCount &&
10596  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10597  {
10598  ++nextAlloc1stIndex;
10599  }
10600 
10601  // Found non-null allocation.
10602  if(nextAlloc1stIndex < suballoc1stCount)
10603  {
10604  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10605 
10606  // 1. Process free space before this allocation.
10607  if(lastOffset < suballoc.offset)
10608  {
10609  // There is free space from lastOffset to suballoc.offset.
10610  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10611  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10612  }
10613 
10614  // 2. Process this allocation.
10615  // There is allocation with suballoc.offset, suballoc.size.
10616  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10617 
10618  // 3. Prepare for next iteration.
10619  lastOffset = suballoc.offset + suballoc.size;
10620  ++nextAlloc1stIndex;
10621  }
10622  // We are at the end.
10623  else
10624  {
10625  if(lastOffset < freeSpace1stTo2ndEnd)
10626  {
10627  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10628  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10629  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10630  }
10631 
10632  // End of loop.
10633  lastOffset = freeSpace1stTo2ndEnd;
10634  }
10635  }
10636 
10637  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10638  {
10639  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10640  while(lastOffset < size)
10641  {
10642  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10643  while(nextAlloc2ndIndex != SIZE_MAX &&
10644  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10645  {
10646  --nextAlloc2ndIndex;
10647  }
10648 
10649  // Found non-null allocation.
10650  if(nextAlloc2ndIndex != SIZE_MAX)
10651  {
10652  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10653 
10654  // 1. Process free space before this allocation.
10655  if(lastOffset < suballoc.offset)
10656  {
10657  // There is free space from lastOffset to suballoc.offset.
10658  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10659  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10660  }
10661 
10662  // 2. Process this allocation.
10663  // There is allocation with suballoc.offset, suballoc.size.
10664  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10665 
10666  // 3. Prepare for next iteration.
10667  lastOffset = suballoc.offset + suballoc.size;
10668  --nextAlloc2ndIndex;
10669  }
10670  // We are at the end.
10671  else
10672  {
10673  if(lastOffset < size)
10674  {
10675  // There is free space from lastOffset to size.
10676  const VkDeviceSize unusedRangeSize = size - lastOffset;
10677  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10678  }
10679 
10680  // End of loop.
10681  lastOffset = size;
10682  }
10683  }
10684  }
10685 
10686  PrintDetailedMap_End(json);
10687 }
10688 #endif // #if VMA_STATS_STRING_ENABLED
10689 
10690 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10691  uint32_t currentFrameIndex,
10692  uint32_t frameInUseCount,
10693  VkDeviceSize bufferImageGranularity,
10694  VkDeviceSize allocSize,
10695  VkDeviceSize allocAlignment,
10696  bool upperAddress,
10697  VmaSuballocationType allocType,
10698  bool canMakeOtherLost,
10699  uint32_t strategy,
10700  VmaAllocationRequest* pAllocationRequest)
10701 {
10702  VMA_ASSERT(allocSize > 0);
10703  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10704  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10705  VMA_HEAVY_ASSERT(Validate());
10706  return upperAddress ?
10707  CreateAllocationRequest_UpperAddress(
10708  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10709  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10710  CreateAllocationRequest_LowerAddress(
10711  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10712  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10713 }
10714 
10715 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10716  uint32_t currentFrameIndex,
10717  uint32_t frameInUseCount,
10718  VkDeviceSize bufferImageGranularity,
10719  VkDeviceSize allocSize,
10720  VkDeviceSize allocAlignment,
10721  VmaSuballocationType allocType,
10722  bool canMakeOtherLost,
10723  uint32_t strategy,
10724  VmaAllocationRequest* pAllocationRequest)
10725 {
10726  const VkDeviceSize size = GetSize();
10727  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10728  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10729 
10730  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10731  {
10732  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10733  return false;
10734  }
10735 
10736  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10737  if(allocSize > size)
10738  {
10739  return false;
10740  }
10741  VkDeviceSize resultBaseOffset = size - allocSize;
10742  if(!suballocations2nd.empty())
10743  {
10744  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10745  resultBaseOffset = lastSuballoc.offset - allocSize;
10746  if(allocSize > lastSuballoc.offset)
10747  {
10748  return false;
10749  }
10750  }
10751 
10752  // Start from offset equal to end of free space.
10753  VkDeviceSize resultOffset = resultBaseOffset;
10754 
10755  // Apply VMA_DEBUG_MARGIN at the end.
10756  if(VMA_DEBUG_MARGIN > 0)
10757  {
10758  if(resultOffset < VMA_DEBUG_MARGIN)
10759  {
10760  return false;
10761  }
10762  resultOffset -= VMA_DEBUG_MARGIN;
10763  }
10764 
10765  // Apply alignment.
10766  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10767 
10768  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10769  // Make bigger alignment if necessary.
10770  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10771  {
10772  bool bufferImageGranularityConflict = false;
10773  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10774  {
10775  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10776  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10777  {
10778  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10779  {
10780  bufferImageGranularityConflict = true;
10781  break;
10782  }
10783  }
10784  else
10785  // Already on previous page.
10786  break;
10787  }
10788  if(bufferImageGranularityConflict)
10789  {
10790  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10791  }
10792  }
10793 
10794  // There is enough free space.
10795  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10796  suballocations1st.back().offset + suballocations1st.back().size :
10797  0;
10798  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10799  {
10800  // Check previous suballocations for BufferImageGranularity conflicts.
10801  // If conflict exists, allocation cannot be made here.
10802  if(bufferImageGranularity > 1)
10803  {
10804  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10805  {
10806  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10807  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10808  {
10809  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10810  {
10811  return false;
10812  }
10813  }
10814  else
10815  {
10816  // Already on next page.
10817  break;
10818  }
10819  }
10820  }
10821 
10822  // All tests passed: Success.
10823  pAllocationRequest->offset = resultOffset;
10824  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10825  pAllocationRequest->sumItemSize = 0;
10826  // pAllocationRequest->item unused.
10827  pAllocationRequest->itemsToMakeLostCount = 0;
10828  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10829  return true;
10830  }
10831 
10832  return false;
10833 }
10834 
10835 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10836  uint32_t currentFrameIndex,
10837  uint32_t frameInUseCount,
10838  VkDeviceSize bufferImageGranularity,
10839  VkDeviceSize allocSize,
10840  VkDeviceSize allocAlignment,
10841  VmaSuballocationType allocType,
10842  bool canMakeOtherLost,
10843  uint32_t strategy,
10844  VmaAllocationRequest* pAllocationRequest)
10845 {
10846  const VkDeviceSize size = GetSize();
10847  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10848  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10849 
10850  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10851  {
10852  // Try to allocate at the end of 1st vector.
10853 
10854  VkDeviceSize resultBaseOffset = 0;
10855  if(!suballocations1st.empty())
10856  {
10857  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10858  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10859  }
10860 
10861  // Start from offset equal to beginning of free space.
10862  VkDeviceSize resultOffset = resultBaseOffset;
10863 
10864  // Apply VMA_DEBUG_MARGIN at the beginning.
10865  if(VMA_DEBUG_MARGIN > 0)
10866  {
10867  resultOffset += VMA_DEBUG_MARGIN;
10868  }
10869 
10870  // Apply alignment.
10871  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10872 
10873  // Check previous suballocations for BufferImageGranularity conflicts.
10874  // Make bigger alignment if necessary.
10875  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10876  {
10877  bool bufferImageGranularityConflict = false;
10878  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10879  {
10880  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10881  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10882  {
10883  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10884  {
10885  bufferImageGranularityConflict = true;
10886  break;
10887  }
10888  }
10889  else
10890  // Already on previous page.
10891  break;
10892  }
10893  if(bufferImageGranularityConflict)
10894  {
10895  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10896  }
10897  }
10898 
10899  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10900  suballocations2nd.back().offset : size;
10901 
10902  // There is enough free space at the end after alignment.
10903  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10904  {
10905  // Check next suballocations for BufferImageGranularity conflicts.
10906  // If conflict exists, allocation cannot be made here.
10907  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10908  {
10909  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10910  {
10911  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10912  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10913  {
10914  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10915  {
10916  return false;
10917  }
10918  }
10919  else
10920  {
10921  // Already on previous page.
10922  break;
10923  }
10924  }
10925  }
10926 
10927  // All tests passed: Success.
10928  pAllocationRequest->offset = resultOffset;
10929  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10930  pAllocationRequest->sumItemSize = 0;
10931  // pAllocationRequest->item, customData unused.
10932  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10933  pAllocationRequest->itemsToMakeLostCount = 0;
10934  return true;
10935  }
10936  }
10937 
10938  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10939  // beginning of 1st vector as the end of free space.
10940  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10941  {
10942  VMA_ASSERT(!suballocations1st.empty());
10943 
10944  VkDeviceSize resultBaseOffset = 0;
10945  if(!suballocations2nd.empty())
10946  {
10947  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10948  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10949  }
10950 
10951  // Start from offset equal to beginning of free space.
10952  VkDeviceSize resultOffset = resultBaseOffset;
10953 
10954  // Apply VMA_DEBUG_MARGIN at the beginning.
10955  if(VMA_DEBUG_MARGIN > 0)
10956  {
10957  resultOffset += VMA_DEBUG_MARGIN;
10958  }
10959 
10960  // Apply alignment.
10961  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10962 
10963  // Check previous suballocations for BufferImageGranularity conflicts.
10964  // Make bigger alignment if necessary.
10965  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10966  {
10967  bool bufferImageGranularityConflict = false;
10968  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10969  {
10970  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10971  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10972  {
10973  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10974  {
10975  bufferImageGranularityConflict = true;
10976  break;
10977  }
10978  }
10979  else
10980  // Already on previous page.
10981  break;
10982  }
10983  if(bufferImageGranularityConflict)
10984  {
10985  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10986  }
10987  }
10988 
10989  pAllocationRequest->itemsToMakeLostCount = 0;
10990  pAllocationRequest->sumItemSize = 0;
10991  size_t index1st = m_1stNullItemsBeginCount;
10992 
10993  if(canMakeOtherLost)
10994  {
10995  while(index1st < suballocations1st.size() &&
10996  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10997  {
10998  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10999  const VmaSuballocation& suballoc = suballocations1st[index1st];
11000  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11001  {
11002  // No problem.
11003  }
11004  else
11005  {
11006  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11007  if(suballoc.hAllocation->CanBecomeLost() &&
11008  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11009  {
11010  ++pAllocationRequest->itemsToMakeLostCount;
11011  pAllocationRequest->sumItemSize += suballoc.size;
11012  }
11013  else
11014  {
11015  return false;
11016  }
11017  }
11018  ++index1st;
11019  }
11020 
11021  // Check next suballocations for BufferImageGranularity conflicts.
11022  // If conflict exists, we must mark more allocations lost or fail.
11023  if(bufferImageGranularity > 1)
11024  {
11025  while(index1st < suballocations1st.size())
11026  {
11027  const VmaSuballocation& suballoc = suballocations1st[index1st];
11028  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11029  {
11030  if(suballoc.hAllocation != VK_NULL_HANDLE)
11031  {
11032  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11033  if(suballoc.hAllocation->CanBecomeLost() &&
11034  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11035  {
11036  ++pAllocationRequest->itemsToMakeLostCount;
11037  pAllocationRequest->sumItemSize += suballoc.size;
11038  }
11039  else
11040  {
11041  return false;
11042  }
11043  }
11044  }
11045  else
11046  {
11047  // Already on next page.
11048  break;
11049  }
11050  ++index1st;
11051  }
11052  }
11053 
11054  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11055  if(index1st == suballocations1st.size() &&
11056  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11057  {
11058  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11059  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11060  }
11061  }
11062 
11063  // There is enough free space at the end after alignment.
11064  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11065  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11066  {
11067  // Check next suballocations for BufferImageGranularity conflicts.
11068  // If conflict exists, allocation cannot be made here.
11069  if(bufferImageGranularity > 1)
11070  {
11071  for(size_t nextSuballocIndex = index1st;
11072  nextSuballocIndex < suballocations1st.size();
11073  nextSuballocIndex++)
11074  {
11075  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11076  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11077  {
11078  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11079  {
11080  return false;
11081  }
11082  }
11083  else
11084  {
11085  // Already on next page.
11086  break;
11087  }
11088  }
11089  }
11090 
11091  // All tests passed: Success.
11092  pAllocationRequest->offset = resultOffset;
11093  pAllocationRequest->sumFreeSize =
11094  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11095  - resultBaseOffset
11096  - pAllocationRequest->sumItemSize;
11097  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11098  // pAllocationRequest->item, customData unused.
11099  return true;
11100  }
11101  }
11102 
11103  return false;
11104 }
11105 
11106 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11107  uint32_t currentFrameIndex,
11108  uint32_t frameInUseCount,
11109  VmaAllocationRequest* pAllocationRequest)
11110 {
11111  if(pAllocationRequest->itemsToMakeLostCount == 0)
11112  {
11113  return true;
11114  }
11115 
11116  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11117 
11118  // We always start from 1st.
11119  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11120  size_t index = m_1stNullItemsBeginCount;
11121  size_t madeLostCount = 0;
11122  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11123  {
11124  if(index == suballocations->size())
11125  {
11126  index = 0;
11127  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11128  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11129  {
11130  suballocations = &AccessSuballocations2nd();
11131  }
11132  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11133  // suballocations continues pointing at AccessSuballocations1st().
11134  VMA_ASSERT(!suballocations->empty());
11135  }
11136  VmaSuballocation& suballoc = (*suballocations)[index];
11137  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11138  {
11139  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11140  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11141  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11142  {
11143  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11144  suballoc.hAllocation = VK_NULL_HANDLE;
11145  m_SumFreeSize += suballoc.size;
11146  if(suballocations == &AccessSuballocations1st())
11147  {
11148  ++m_1stNullItemsMiddleCount;
11149  }
11150  else
11151  {
11152  ++m_2ndNullItemsCount;
11153  }
11154  ++madeLostCount;
11155  }
11156  else
11157  {
11158  return false;
11159  }
11160  }
11161  ++index;
11162  }
11163 
11164  CleanupAfterFree();
11165  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
11166 
11167  return true;
11168 }
11169 
11170 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11171 {
11172  uint32_t lostAllocationCount = 0;
11173 
11174  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11175  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11176  {
11177  VmaSuballocation& suballoc = suballocations1st[i];
11178  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11179  suballoc.hAllocation->CanBecomeLost() &&
11180  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11181  {
11182  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11183  suballoc.hAllocation = VK_NULL_HANDLE;
11184  ++m_1stNullItemsMiddleCount;
11185  m_SumFreeSize += suballoc.size;
11186  ++lostAllocationCount;
11187  }
11188  }
11189 
11190  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11191  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11192  {
11193  VmaSuballocation& suballoc = suballocations2nd[i];
11194  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11195  suballoc.hAllocation->CanBecomeLost() &&
11196  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11197  {
11198  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11199  suballoc.hAllocation = VK_NULL_HANDLE;
11200  ++m_2ndNullItemsCount;
11201  m_SumFreeSize += suballoc.size;
11202  ++lostAllocationCount;
11203  }
11204  }
11205 
11206  if(lostAllocationCount)
11207  {
11208  CleanupAfterFree();
11209  }
11210 
11211  return lostAllocationCount;
11212 }
11213 
11214 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11215 {
11216  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11217  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11218  {
11219  const VmaSuballocation& suballoc = suballocations1st[i];
11220  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11221  {
11222  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11223  {
11224  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11225  return VK_ERROR_VALIDATION_FAILED_EXT;
11226  }
11227  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11228  {
11229  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11230  return VK_ERROR_VALIDATION_FAILED_EXT;
11231  }
11232  }
11233  }
11234 
11235  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11236  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11237  {
11238  const VmaSuballocation& suballoc = suballocations2nd[i];
11239  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11240  {
11241  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11242  {
11243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11244  return VK_ERROR_VALIDATION_FAILED_EXT;
11245  }
11246  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11247  {
11248  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11249  return VK_ERROR_VALIDATION_FAILED_EXT;
11250  }
11251  }
11252  }
11253 
11254  return VK_SUCCESS;
11255 }
11256 
11257 void VmaBlockMetadata_Linear::Alloc(
11258  const VmaAllocationRequest& request,
11259  VmaSuballocationType type,
11260  VkDeviceSize allocSize,
11261  VmaAllocation hAllocation)
11262 {
11263  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11264 
11265  switch(request.type)
11266  {
11267  case VmaAllocationRequestType::UpperAddress:
11268  {
11269  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11270  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11271  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11272  suballocations2nd.push_back(newSuballoc);
11273  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11274  }
11275  break;
11276  case VmaAllocationRequestType::EndOf1st:
11277  {
11278  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11279 
11280  VMA_ASSERT(suballocations1st.empty() ||
11281  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11282  // Check if it fits before the end of the block.
11283  VMA_ASSERT(request.offset + allocSize <= GetSize());
11284 
11285  suballocations1st.push_back(newSuballoc);
11286  }
11287  break;
11288  case VmaAllocationRequestType::EndOf2nd:
11289  {
11290  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11291  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11292  VMA_ASSERT(!suballocations1st.empty() &&
11293  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11294  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11295 
11296  switch(m_2ndVectorMode)
11297  {
11298  case SECOND_VECTOR_EMPTY:
11299  // First allocation from second part ring buffer.
11300  VMA_ASSERT(suballocations2nd.empty());
11301  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11302  break;
11303  case SECOND_VECTOR_RING_BUFFER:
11304  // 2-part ring buffer is already started.
11305  VMA_ASSERT(!suballocations2nd.empty());
11306  break;
11307  case SECOND_VECTOR_DOUBLE_STACK:
11308  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11309  break;
11310  default:
11311  VMA_ASSERT(0);
11312  }
11313 
11314  suballocations2nd.push_back(newSuballoc);
11315  }
11316  break;
11317  default:
11318  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11319  }
11320 
11321  m_SumFreeSize -= newSuballoc.size;
11322 }
11323 
11324 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11325 {
11326  FreeAtOffset(allocation->GetOffset());
11327 }
11328 
11329 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11330 {
11331  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11332  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11333 
11334  if(!suballocations1st.empty())
11335  {
11336  // First allocation: Mark it as next empty at the beginning.
11337  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11338  if(firstSuballoc.offset == offset)
11339  {
11340  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11341  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11342  m_SumFreeSize += firstSuballoc.size;
11343  ++m_1stNullItemsBeginCount;
11344  CleanupAfterFree();
11345  return;
11346  }
11347  }
11348 
11349  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11350  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11351  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11352  {
11353  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11354  if(lastSuballoc.offset == offset)
11355  {
11356  m_SumFreeSize += lastSuballoc.size;
11357  suballocations2nd.pop_back();
11358  CleanupAfterFree();
11359  return;
11360  }
11361  }
11362  // Last allocation in 1st vector.
11363  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11364  {
11365  VmaSuballocation& lastSuballoc = suballocations1st.back();
11366  if(lastSuballoc.offset == offset)
11367  {
11368  m_SumFreeSize += lastSuballoc.size;
11369  suballocations1st.pop_back();
11370  CleanupAfterFree();
11371  return;
11372  }
11373  }
11374 
11375  // Item from the middle of 1st vector.
11376  {
11377  VmaSuballocation refSuballoc;
11378  refSuballoc.offset = offset;
11379  // Rest of members stays uninitialized intentionally for better performance.
11380  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11381  suballocations1st.begin() + m_1stNullItemsBeginCount,
11382  suballocations1st.end(),
11383  refSuballoc,
11384  VmaSuballocationOffsetLess());
11385  if(it != suballocations1st.end())
11386  {
11387  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11388  it->hAllocation = VK_NULL_HANDLE;
11389  ++m_1stNullItemsMiddleCount;
11390  m_SumFreeSize += it->size;
11391  CleanupAfterFree();
11392  return;
11393  }
11394  }
11395 
11396  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11397  {
11398  // Item from the middle of 2nd vector.
11399  VmaSuballocation refSuballoc;
11400  refSuballoc.offset = offset;
11401  // Rest of members stays uninitialized intentionally for better performance.
11402  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11403  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11404  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11405  if(it != suballocations2nd.end())
11406  {
11407  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11408  it->hAllocation = VK_NULL_HANDLE;
11409  ++m_2ndNullItemsCount;
11410  m_SumFreeSize += it->size;
11411  CleanupAfterFree();
11412  return;
11413  }
11414  }
11415 
11416  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11417 }
11418 
11419 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11420 {
11421  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11422  const size_t suballocCount = AccessSuballocations1st().size();
11423  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11424 }
11425 
11426 void VmaBlockMetadata_Linear::CleanupAfterFree()
11427 {
11428  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11429  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11430 
11431  if(IsEmpty())
11432  {
11433  suballocations1st.clear();
11434  suballocations2nd.clear();
11435  m_1stNullItemsBeginCount = 0;
11436  m_1stNullItemsMiddleCount = 0;
11437  m_2ndNullItemsCount = 0;
11438  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11439  }
11440  else
11441  {
11442  const size_t suballoc1stCount = suballocations1st.size();
11443  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11444  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11445 
11446  // Find more null items at the beginning of 1st vector.
11447  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11448  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11449  {
11450  ++m_1stNullItemsBeginCount;
11451  --m_1stNullItemsMiddleCount;
11452  }
11453 
11454  // Find more null items at the end of 1st vector.
11455  while(m_1stNullItemsMiddleCount > 0 &&
11456  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11457  {
11458  --m_1stNullItemsMiddleCount;
11459  suballocations1st.pop_back();
11460  }
11461 
11462  // Find more null items at the end of 2nd vector.
11463  while(m_2ndNullItemsCount > 0 &&
11464  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11465  {
11466  --m_2ndNullItemsCount;
11467  suballocations2nd.pop_back();
11468  }
11469 
11470  // Find more null items at the beginning of 2nd vector.
11471  while(m_2ndNullItemsCount > 0 &&
11472  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11473  {
11474  --m_2ndNullItemsCount;
11475  VmaVectorRemove(suballocations2nd, 0);
11476  }
11477 
11478  if(ShouldCompact1st())
11479  {
11480  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11481  size_t srcIndex = m_1stNullItemsBeginCount;
11482  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11483  {
11484  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11485  {
11486  ++srcIndex;
11487  }
11488  if(dstIndex != srcIndex)
11489  {
11490  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11491  }
11492  ++srcIndex;
11493  }
11494  suballocations1st.resize(nonNullItemCount);
11495  m_1stNullItemsBeginCount = 0;
11496  m_1stNullItemsMiddleCount = 0;
11497  }
11498 
11499  // 2nd vector became empty.
11500  if(suballocations2nd.empty())
11501  {
11502  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11503  }
11504 
11505  // 1st vector became empty.
11506  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11507  {
11508  suballocations1st.clear();
11509  m_1stNullItemsBeginCount = 0;
11510 
11511  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11512  {
11513  // Swap 1st with 2nd. Now 2nd is empty.
11514  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11515  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11516  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11517  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11518  {
11519  ++m_1stNullItemsBeginCount;
11520  --m_1stNullItemsMiddleCount;
11521  }
11522  m_2ndNullItemsCount = 0;
11523  m_1stVectorIndex ^= 1;
11524  }
11525  }
11526  }
11527 
11528  VMA_HEAVY_ASSERT(Validate());
11529 }
11530 
11531 
11533 // class VmaBlockMetadata_Buddy
11534 
11535 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11536  VmaBlockMetadata(hAllocator),
11537  m_Root(VMA_NULL),
11538  m_AllocationCount(0),
11539  m_FreeCount(1),
11540  m_SumFreeSize(0)
11541 {
11542  memset(m_FreeList, 0, sizeof(m_FreeList));
11543 }
11544 
11545 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11546 {
11547  DeleteNode(m_Root);
11548 }
11549 
11550 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11551 {
11552  VmaBlockMetadata::Init(size);
11553 
11554  m_UsableSize = VmaPrevPow2(size);
11555  m_SumFreeSize = m_UsableSize;
11556 
11557  // Calculate m_LevelCount.
11558  m_LevelCount = 1;
11559  while(m_LevelCount < MAX_LEVELS &&
11560  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11561  {
11562  ++m_LevelCount;
11563  }
11564 
11565  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11566  rootNode->offset = 0;
11567  rootNode->type = Node::TYPE_FREE;
11568  rootNode->parent = VMA_NULL;
11569  rootNode->buddy = VMA_NULL;
11570 
11571  m_Root = rootNode;
11572  AddToFreeListFront(0, rootNode);
11573 }
11574 
11575 bool VmaBlockMetadata_Buddy::Validate() const
11576 {
11577  // Validate tree.
11578  ValidationContext ctx;
11579  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11580  {
11581  VMA_VALIDATE(false && "ValidateNode failed.");
11582  }
11583  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11584  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11585 
11586  // Validate free node lists.
11587  for(uint32_t level = 0; level < m_LevelCount; ++level)
11588  {
11589  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11590  m_FreeList[level].front->free.prev == VMA_NULL);
11591 
11592  for(Node* node = m_FreeList[level].front;
11593  node != VMA_NULL;
11594  node = node->free.next)
11595  {
11596  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11597 
11598  if(node->free.next == VMA_NULL)
11599  {
11600  VMA_VALIDATE(m_FreeList[level].back == node);
11601  }
11602  else
11603  {
11604  VMA_VALIDATE(node->free.next->free.prev == node);
11605  }
11606  }
11607  }
11608 
11609  // Validate that free lists ar higher levels are empty.
11610  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11611  {
11612  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11613  }
11614 
11615  return true;
11616 }
11617 
11618 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11619 {
11620  for(uint32_t level = 0; level < m_LevelCount; ++level)
11621  {
11622  if(m_FreeList[level].front != VMA_NULL)
11623  {
11624  return LevelToNodeSize(level);
11625  }
11626  }
11627  return 0;
11628 }
11629 
11630 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11631 {
11632  const VkDeviceSize unusableSize = GetUnusableSize();
11633 
11634  outInfo.blockCount = 1;
11635 
11636  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11637  outInfo.usedBytes = outInfo.unusedBytes = 0;
11638 
11639  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11640  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11641  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11642 
11643  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11644 
11645  if(unusableSize > 0)
11646  {
11647  ++outInfo.unusedRangeCount;
11648  outInfo.unusedBytes += unusableSize;
11649  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11650  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11651  }
11652 }
11653 
11654 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11655 {
11656  const VkDeviceSize unusableSize = GetUnusableSize();
11657 
11658  inoutStats.size += GetSize();
11659  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11660  inoutStats.allocationCount += m_AllocationCount;
11661  inoutStats.unusedRangeCount += m_FreeCount;
11662  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11663 
11664  if(unusableSize > 0)
11665  {
11666  ++inoutStats.unusedRangeCount;
11667  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11668  }
11669 }
11670 
11671 #if VMA_STATS_STRING_ENABLED
11672 
11673 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11674 {
11675  // TODO optimize
11676  VmaStatInfo stat;
11677  CalcAllocationStatInfo(stat);
11678 
11679  PrintDetailedMap_Begin(
11680  json,
11681  stat.unusedBytes,
11682  stat.allocationCount,
11683  stat.unusedRangeCount);
11684 
11685  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11686 
11687  const VkDeviceSize unusableSize = GetUnusableSize();
11688  if(unusableSize > 0)
11689  {
11690  PrintDetailedMap_UnusedRange(json,
11691  m_UsableSize, // offset
11692  unusableSize); // size
11693  }
11694 
11695  PrintDetailedMap_End(json);
11696 }
11697 
11698 #endif // #if VMA_STATS_STRING_ENABLED
11699 
11700 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11701  uint32_t currentFrameIndex,
11702  uint32_t frameInUseCount,
11703  VkDeviceSize bufferImageGranularity,
11704  VkDeviceSize allocSize,
11705  VkDeviceSize allocAlignment,
11706  bool upperAddress,
11707  VmaSuballocationType allocType,
11708  bool canMakeOtherLost,
11709  uint32_t strategy,
11710  VmaAllocationRequest* pAllocationRequest)
11711 {
11712  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11713 
11714  // Simple way to respect bufferImageGranularity. May be optimized some day.
11715  // Whenever it might be an OPTIMAL image...
11716  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11717  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11718  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11719  {
11720  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11721  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11722  }
11723 
11724  if(allocSize > m_UsableSize)
11725  {
11726  return false;
11727  }
11728 
11729  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11730  for(uint32_t level = targetLevel + 1; level--; )
11731  {
11732  for(Node* freeNode = m_FreeList[level].front;
11733  freeNode != VMA_NULL;
11734  freeNode = freeNode->free.next)
11735  {
11736  if(freeNode->offset % allocAlignment == 0)
11737  {
11738  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11739  pAllocationRequest->offset = freeNode->offset;
11740  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11741  pAllocationRequest->sumItemSize = 0;
11742  pAllocationRequest->itemsToMakeLostCount = 0;
11743  pAllocationRequest->customData = (void*)(uintptr_t)level;
11744  return true;
11745  }
11746  }
11747  }
11748 
11749  return false;
11750 }
11751 
11752 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11753  uint32_t currentFrameIndex,
11754  uint32_t frameInUseCount,
11755  VmaAllocationRequest* pAllocationRequest)
11756 {
11757  /*
11758  Lost allocations are not supported in buddy allocator at the moment.
11759  Support might be added in the future.
11760  */
11761  return pAllocationRequest->itemsToMakeLostCount == 0;
11762 }
11763 
11764 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11765 {
11766  /*
11767  Lost allocations are not supported in buddy allocator at the moment.
11768  Support might be added in the future.
11769  */
11770  return 0;
11771 }
11772 
11773 void VmaBlockMetadata_Buddy::Alloc(
11774  const VmaAllocationRequest& request,
11775  VmaSuballocationType type,
11776  VkDeviceSize allocSize,
11777  VmaAllocation hAllocation)
11778 {
11779  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11780 
11781  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11782  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11783 
11784  Node* currNode = m_FreeList[currLevel].front;
11785  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11786  while(currNode->offset != request.offset)
11787  {
11788  currNode = currNode->free.next;
11789  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11790  }
11791 
11792  // Go down, splitting free nodes.
11793  while(currLevel < targetLevel)
11794  {
11795  // currNode is already first free node at currLevel.
11796  // Remove it from list of free nodes at this currLevel.
11797  RemoveFromFreeList(currLevel, currNode);
11798 
11799  const uint32_t childrenLevel = currLevel + 1;
11800 
11801  // Create two free sub-nodes.
11802  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11803  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11804 
11805  leftChild->offset = currNode->offset;
11806  leftChild->type = Node::TYPE_FREE;
11807  leftChild->parent = currNode;
11808  leftChild->buddy = rightChild;
11809 
11810  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11811  rightChild->type = Node::TYPE_FREE;
11812  rightChild->parent = currNode;
11813  rightChild->buddy = leftChild;
11814 
11815  // Convert current currNode to split type.
11816  currNode->type = Node::TYPE_SPLIT;
11817  currNode->split.leftChild = leftChild;
11818 
11819  // Add child nodes to free list. Order is important!
11820  AddToFreeListFront(childrenLevel, rightChild);
11821  AddToFreeListFront(childrenLevel, leftChild);
11822 
11823  ++m_FreeCount;
11824  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11825  ++currLevel;
11826  currNode = m_FreeList[currLevel].front;
11827 
11828  /*
11829  We can be sure that currNode, as left child of node previously split,
11830  also fullfills the alignment requirement.
11831  */
11832  }
11833 
11834  // Remove from free list.
11835  VMA_ASSERT(currLevel == targetLevel &&
11836  currNode != VMA_NULL &&
11837  currNode->type == Node::TYPE_FREE);
11838  RemoveFromFreeList(currLevel, currNode);
11839 
11840  // Convert to allocation node.
11841  currNode->type = Node::TYPE_ALLOCATION;
11842  currNode->allocation.alloc = hAllocation;
11843 
11844  ++m_AllocationCount;
11845  --m_FreeCount;
11846  m_SumFreeSize -= allocSize;
11847 }
11848 
11849 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11850 {
11851  if(node->type == Node::TYPE_SPLIT)
11852  {
11853  DeleteNode(node->split.leftChild->buddy);
11854  DeleteNode(node->split.leftChild);
11855  }
11856 
11857  vma_delete(GetAllocationCallbacks(), node);
11858 }
11859 
11860 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11861 {
11862  VMA_VALIDATE(level < m_LevelCount);
11863  VMA_VALIDATE(curr->parent == parent);
11864  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11865  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11866  switch(curr->type)
11867  {
11868  case Node::TYPE_FREE:
11869  // curr->free.prev, next are validated separately.
11870  ctx.calculatedSumFreeSize += levelNodeSize;
11871  ++ctx.calculatedFreeCount;
11872  break;
11873  case Node::TYPE_ALLOCATION:
11874  ++ctx.calculatedAllocationCount;
11875  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11876  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11877  break;
11878  case Node::TYPE_SPLIT:
11879  {
11880  const uint32_t childrenLevel = level + 1;
11881  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11882  const Node* const leftChild = curr->split.leftChild;
11883  VMA_VALIDATE(leftChild != VMA_NULL);
11884  VMA_VALIDATE(leftChild->offset == curr->offset);
11885  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11886  {
11887  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11888  }
11889  const Node* const rightChild = leftChild->buddy;
11890  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11891  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11892  {
11893  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11894  }
11895  }
11896  break;
11897  default:
11898  return false;
11899  }
11900 
11901  return true;
11902 }
11903 
11904 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11905 {
11906  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11907  uint32_t level = 0;
11908  VkDeviceSize currLevelNodeSize = m_UsableSize;
11909  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11910  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11911  {
11912  ++level;
11913  currLevelNodeSize = nextLevelNodeSize;
11914  nextLevelNodeSize = currLevelNodeSize >> 1;
11915  }
11916  return level;
11917 }
11918 
11919 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11920 {
11921  // Find node and level.
11922  Node* node = m_Root;
11923  VkDeviceSize nodeOffset = 0;
11924  uint32_t level = 0;
11925  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11926  while(node->type == Node::TYPE_SPLIT)
11927  {
11928  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11929  if(offset < nodeOffset + nextLevelSize)
11930  {
11931  node = node->split.leftChild;
11932  }
11933  else
11934  {
11935  node = node->split.leftChild->buddy;
11936  nodeOffset += nextLevelSize;
11937  }
11938  ++level;
11939  levelNodeSize = nextLevelSize;
11940  }
11941 
11942  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11943  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11944 
11945  ++m_FreeCount;
11946  --m_AllocationCount;
11947  m_SumFreeSize += alloc->GetSize();
11948 
11949  node->type = Node::TYPE_FREE;
11950 
11951  // Join free nodes if possible.
11952  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11953  {
11954  RemoveFromFreeList(level, node->buddy);
11955  Node* const parent = node->parent;
11956 
11957  vma_delete(GetAllocationCallbacks(), node->buddy);
11958  vma_delete(GetAllocationCallbacks(), node);
11959  parent->type = Node::TYPE_FREE;
11960 
11961  node = parent;
11962  --level;
11963  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11964  --m_FreeCount;
11965  }
11966 
11967  AddToFreeListFront(level, node);
11968 }
11969 
11970 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11971 {
11972  switch(node->type)
11973  {
11974  case Node::TYPE_FREE:
11975  ++outInfo.unusedRangeCount;
11976  outInfo.unusedBytes += levelNodeSize;
11977  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11978  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11979  break;
11980  case Node::TYPE_ALLOCATION:
11981  {
11982  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11983  ++outInfo.allocationCount;
11984  outInfo.usedBytes += allocSize;
11985  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11986  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11987 
11988  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11989  if(unusedRangeSize > 0)
11990  {
11991  ++outInfo.unusedRangeCount;
11992  outInfo.unusedBytes += unusedRangeSize;
11993  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11994  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11995  }
11996  }
11997  break;
11998  case Node::TYPE_SPLIT:
11999  {
12000  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12001  const Node* const leftChild = node->split.leftChild;
12002  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12003  const Node* const rightChild = leftChild->buddy;
12004  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12005  }
12006  break;
12007  default:
12008  VMA_ASSERT(0);
12009  }
12010 }
12011 
12012 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12013 {
12014  VMA_ASSERT(node->type == Node::TYPE_FREE);
12015 
12016  // List is empty.
12017  Node* const frontNode = m_FreeList[level].front;
12018  if(frontNode == VMA_NULL)
12019  {
12020  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12021  node->free.prev = node->free.next = VMA_NULL;
12022  m_FreeList[level].front = m_FreeList[level].back = node;
12023  }
12024  else
12025  {
12026  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12027  node->free.prev = VMA_NULL;
12028  node->free.next = frontNode;
12029  frontNode->free.prev = node;
12030  m_FreeList[level].front = node;
12031  }
12032 }
12033 
12034 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12035 {
12036  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12037 
12038  // It is at the front.
12039  if(node->free.prev == VMA_NULL)
12040  {
12041  VMA_ASSERT(m_FreeList[level].front == node);
12042  m_FreeList[level].front = node->free.next;
12043  }
12044  else
12045  {
12046  Node* const prevFreeNode = node->free.prev;
12047  VMA_ASSERT(prevFreeNode->free.next == node);
12048  prevFreeNode->free.next = node->free.next;
12049  }
12050 
12051  // It is at the back.
12052  if(node->free.next == VMA_NULL)
12053  {
12054  VMA_ASSERT(m_FreeList[level].back == node);
12055  m_FreeList[level].back = node->free.prev;
12056  }
12057  else
12058  {
12059  Node* const nextFreeNode = node->free.next;
12060  VMA_ASSERT(nextFreeNode->free.prev == node);
12061  nextFreeNode->free.prev = node->free.prev;
12062  }
12063 }
12064 
12065 #if VMA_STATS_STRING_ENABLED
12066 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12067 {
12068  switch(node->type)
12069  {
12070  case Node::TYPE_FREE:
12071  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12072  break;
12073  case Node::TYPE_ALLOCATION:
12074  {
12075  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12076  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12077  if(allocSize < levelNodeSize)
12078  {
12079  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12080  }
12081  }
12082  break;
12083  case Node::TYPE_SPLIT:
12084  {
12085  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12086  const Node* const leftChild = node->split.leftChild;
12087  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12088  const Node* const rightChild = leftChild->buddy;
12089  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12090  }
12091  break;
12092  default:
12093  VMA_ASSERT(0);
12094  }
12095 }
12096 #endif // #if VMA_STATS_STRING_ENABLED
12097 
12098 
12100 // class VmaDeviceMemoryBlock
12101 
12102 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12103  m_pMetadata(VMA_NULL),
12104  m_MemoryTypeIndex(UINT32_MAX),
12105  m_Id(0),
12106  m_hMemory(VK_NULL_HANDLE),
12107  m_MapCount(0),
12108  m_pMappedData(VMA_NULL)
12109 {
12110 }
12111 
12112 void VmaDeviceMemoryBlock::Init(
12113  VmaAllocator hAllocator,
12114  VmaPool hParentPool,
12115  uint32_t newMemoryTypeIndex,
12116  VkDeviceMemory newMemory,
12117  VkDeviceSize newSize,
12118  uint32_t id,
12119  uint32_t algorithm)
12120 {
12121  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12122 
12123  m_hParentPool = hParentPool;
12124  m_MemoryTypeIndex = newMemoryTypeIndex;
12125  m_Id = id;
12126  m_hMemory = newMemory;
12127 
12128  switch(algorithm)
12129  {
12131  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12132  break;
12134  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12135  break;
12136  default:
12137  VMA_ASSERT(0);
12138  // Fall-through.
12139  case 0:
12140  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12141  }
12142  m_pMetadata->Init(newSize);
12143 }
12144 
12145 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12146 {
12147  // This is the most important assert in the entire library.
12148  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12149  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12150 
12151  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12152  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12153  m_hMemory = VK_NULL_HANDLE;
12154 
12155  vma_delete(allocator, m_pMetadata);
12156  m_pMetadata = VMA_NULL;
12157 }
12158 
12159 bool VmaDeviceMemoryBlock::Validate() const
12160 {
12161  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12162  (m_pMetadata->GetSize() != 0));
12163 
12164  return m_pMetadata->Validate();
12165 }
12166 
12167 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12168 {
12169  void* pData = nullptr;
12170  VkResult res = Map(hAllocator, 1, &pData);
12171  if(res != VK_SUCCESS)
12172  {
12173  return res;
12174  }
12175 
12176  res = m_pMetadata->CheckCorruption(pData);
12177 
12178  Unmap(hAllocator, 1);
12179 
12180  return res;
12181 }
12182 
12183 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12184 {
12185  if(count == 0)
12186  {
12187  return VK_SUCCESS;
12188  }
12189 
12190  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12191  if(m_MapCount != 0)
12192  {
12193  m_MapCount += count;
12194  VMA_ASSERT(m_pMappedData != VMA_NULL);
12195  if(ppData != VMA_NULL)
12196  {
12197  *ppData = m_pMappedData;
12198  }
12199  return VK_SUCCESS;
12200  }
12201  else
12202  {
12203  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12204  hAllocator->m_hDevice,
12205  m_hMemory,
12206  0, // offset
12207  VK_WHOLE_SIZE,
12208  0, // flags
12209  &m_pMappedData);
12210  if(result == VK_SUCCESS)
12211  {
12212  if(ppData != VMA_NULL)
12213  {
12214  *ppData = m_pMappedData;
12215  }
12216  m_MapCount = count;
12217  }
12218  return result;
12219  }
12220 }
12221 
12222 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12223 {
12224  if(count == 0)
12225  {
12226  return;
12227  }
12228 
12229  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12230  if(m_MapCount >= count)
12231  {
12232  m_MapCount -= count;
12233  if(m_MapCount == 0)
12234  {
12235  m_pMappedData = VMA_NULL;
12236  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12237  }
12238  }
12239  else
12240  {
12241  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12242  }
12243 }
12244 
12245 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12246 {
12247  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12248  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12249 
12250  void* pData;
12251  VkResult res = Map(hAllocator, 1, &pData);
12252  if(res != VK_SUCCESS)
12253  {
12254  return res;
12255  }
12256 
12257  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12258  VmaWriteMagicValue(pData, allocOffset + allocSize);
12259 
12260  Unmap(hAllocator, 1);
12261 
12262  return VK_SUCCESS;
12263 }
12264 
12265 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12266 {
12267  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12268  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12269 
12270  void* pData;
12271  VkResult res = Map(hAllocator, 1, &pData);
12272  if(res != VK_SUCCESS)
12273  {
12274  return res;
12275  }
12276 
12277  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12278  {
12279  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12280  }
12281  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12282  {
12283  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12284  }
12285 
12286  Unmap(hAllocator, 1);
12287 
12288  return VK_SUCCESS;
12289 }
12290 
12291 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12292  const VmaAllocator hAllocator,
12293  const VmaAllocation hAllocation,
12294  VkDeviceSize allocationLocalOffset,
12295  VkBuffer hBuffer,
12296  const void* pNext)
12297 {
12298  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12299  hAllocation->GetBlock() == this);
12300  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12301  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12302  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12303  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12304  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12305  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12306 }
12307 
12308 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12309  const VmaAllocator hAllocator,
12310  const VmaAllocation hAllocation,
12311  VkDeviceSize allocationLocalOffset,
12312  VkImage hImage,
12313  const void* pNext)
12314 {
12315  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12316  hAllocation->GetBlock() == this);
12317  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12318  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12319  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12320  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12321  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12322  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12323 }
12324 
12325 static void InitStatInfo(VmaStatInfo& outInfo)
12326 {
12327  memset(&outInfo, 0, sizeof(outInfo));
12328  outInfo.allocationSizeMin = UINT64_MAX;
12329  outInfo.unusedRangeSizeMin = UINT64_MAX;
12330 }
12331 
12332 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12333 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12334 {
12335  inoutInfo.blockCount += srcInfo.blockCount;
12336  inoutInfo.allocationCount += srcInfo.allocationCount;
12337  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12338  inoutInfo.usedBytes += srcInfo.usedBytes;
12339  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12340  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12341  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12342  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12343  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12344 }
12345 
12346 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12347 {
12348  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12349  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12350  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12351  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12352 }
12353 
12354 VmaPool_T::VmaPool_T(
12355  VmaAllocator hAllocator,
12356  const VmaPoolCreateInfo& createInfo,
12357  VkDeviceSize preferredBlockSize) :
12358  m_BlockVector(
12359  hAllocator,
12360  this, // hParentPool
12361  createInfo.memoryTypeIndex,
12362  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12363  createInfo.minBlockCount,
12364  createInfo.maxBlockCount,
12365  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12366  createInfo.frameInUseCount,
12367  createInfo.blockSize != 0, // explicitBlockSize
12368  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12369  m_Id(0),
12370  m_Name(VMA_NULL)
12371 {
12372 }
12373 
12374 VmaPool_T::~VmaPool_T()
12375 {
12376 }
12377 
12378 void VmaPool_T::SetName(const char* pName)
12379 {
12380  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12381  VmaFreeString(allocs, m_Name);
12382 
12383  if(pName != VMA_NULL)
12384  {
12385  m_Name = VmaCreateStringCopy(allocs, pName);
12386  }
12387  else
12388  {
12389  m_Name = VMA_NULL;
12390  }
12391 }
12392 
12393 #if VMA_STATS_STRING_ENABLED
12394 
12395 #endif // #if VMA_STATS_STRING_ENABLED
12396 
12397 VmaBlockVector::VmaBlockVector(
12398  VmaAllocator hAllocator,
12399  VmaPool hParentPool,
12400  uint32_t memoryTypeIndex,
12401  VkDeviceSize preferredBlockSize,
12402  size_t minBlockCount,
12403  size_t maxBlockCount,
12404  VkDeviceSize bufferImageGranularity,
12405  uint32_t frameInUseCount,
12406  bool explicitBlockSize,
12407  uint32_t algorithm) :
12408  m_hAllocator(hAllocator),
12409  m_hParentPool(hParentPool),
12410  m_MemoryTypeIndex(memoryTypeIndex),
12411  m_PreferredBlockSize(preferredBlockSize),
12412  m_MinBlockCount(minBlockCount),
12413  m_MaxBlockCount(maxBlockCount),
12414  m_BufferImageGranularity(bufferImageGranularity),
12415  m_FrameInUseCount(frameInUseCount),
12416  m_ExplicitBlockSize(explicitBlockSize),
12417  m_Algorithm(algorithm),
12418  m_HasEmptyBlock(false),
12419  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12420  m_NextBlockId(0)
12421 {
12422 }
12423 
12424 VmaBlockVector::~VmaBlockVector()
12425 {
12426  for(size_t i = m_Blocks.size(); i--; )
12427  {
12428  m_Blocks[i]->Destroy(m_hAllocator);
12429  vma_delete(m_hAllocator, m_Blocks[i]);
12430  }
12431 }
12432 
12433 VkResult VmaBlockVector::CreateMinBlocks()
12434 {
12435  for(size_t i = 0; i < m_MinBlockCount; ++i)
12436  {
12437  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12438  if(res != VK_SUCCESS)
12439  {
12440  return res;
12441  }
12442  }
12443  return VK_SUCCESS;
12444 }
12445 
12446 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12447 {
12448  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12449 
12450  const size_t blockCount = m_Blocks.size();
12451 
12452  pStats->size = 0;
12453  pStats->unusedSize = 0;
12454  pStats->allocationCount = 0;
12455  pStats->unusedRangeCount = 0;
12456  pStats->unusedRangeSizeMax = 0;
12457  pStats->blockCount = blockCount;
12458 
12459  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12460  {
12461  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12462  VMA_ASSERT(pBlock);
12463  VMA_HEAVY_ASSERT(pBlock->Validate());
12464  pBlock->m_pMetadata->AddPoolStats(*pStats);
12465  }
12466 }
12467 
12468 bool VmaBlockVector::IsEmpty()
12469 {
12470  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12471  return m_Blocks.empty();
12472 }
12473 
12474 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12475 {
12476  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12477  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12478  (VMA_DEBUG_MARGIN > 0) &&
12479  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12480  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12481 }
12482 
12483 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12484 
12485 VkResult VmaBlockVector::Allocate(
12486  uint32_t currentFrameIndex,
12487  VkDeviceSize size,
12488  VkDeviceSize alignment,
12489  const VmaAllocationCreateInfo& createInfo,
12490  VmaSuballocationType suballocType,
12491  size_t allocationCount,
12492  VmaAllocation* pAllocations)
12493 {
12494  size_t allocIndex;
12495  VkResult res = VK_SUCCESS;
12496 
12497  if(IsCorruptionDetectionEnabled())
12498  {
12499  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12500  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12501  }
12502 
12503  {
12504  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12505  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12506  {
12507  res = AllocatePage(
12508  currentFrameIndex,
12509  size,
12510  alignment,
12511  createInfo,
12512  suballocType,
12513  pAllocations + allocIndex);
12514  if(res != VK_SUCCESS)
12515  {
12516  break;
12517  }
12518  }
12519  }
12520 
12521  if(res != VK_SUCCESS)
12522  {
12523  // Free all already created allocations.
12524  while(allocIndex--)
12525  {
12526  Free(pAllocations[allocIndex]);
12527  }
12528  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12529  }
12530 
12531  return res;
12532 }
12533 
12534 VkResult VmaBlockVector::AllocatePage(
12535  uint32_t currentFrameIndex,
12536  VkDeviceSize size,
12537  VkDeviceSize alignment,
12538  const VmaAllocationCreateInfo& createInfo,
12539  VmaSuballocationType suballocType,
12540  VmaAllocation* pAllocation)
12541 {
12542  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12543  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12544  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12545  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12546 
12547  VkDeviceSize freeMemory;
12548  {
12549  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12550  VmaBudget heapBudget = {};
12551  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12552  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12553  }
12554 
12555  const bool canFallbackToDedicated = !IsCustomPool();
12556  const bool canCreateNewBlock =
12557  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12558  (m_Blocks.size() < m_MaxBlockCount) &&
12559  (freeMemory >= size || !canFallbackToDedicated);
12560  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12561 
12562  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12563  // Which in turn is available only when maxBlockCount = 1.
12564  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12565  {
12566  canMakeOtherLost = false;
12567  }
12568 
12569  // Upper address can only be used with linear allocator and within single memory block.
12570  if(isUpperAddress &&
12571  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12572  {
12573  return VK_ERROR_FEATURE_NOT_PRESENT;
12574  }
12575 
12576  // Validate strategy.
12577  switch(strategy)
12578  {
12579  case 0:
12581  break;
12585  break;
12586  default:
12587  return VK_ERROR_FEATURE_NOT_PRESENT;
12588  }
12589 
12590  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12591  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12592  {
12593  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12594  }
12595 
12596  /*
12597  Under certain condition, this whole section can be skipped for optimization, so
12598  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12599  e.g. for custom pools with linear algorithm.
12600  */
12601  if(!canMakeOtherLost || canCreateNewBlock)
12602  {
12603  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12604  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12606 
12607  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12608  {
12609  // Use only last block.
12610  if(!m_Blocks.empty())
12611  {
12612  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12613  VMA_ASSERT(pCurrBlock);
12614  VkResult res = AllocateFromBlock(
12615  pCurrBlock,
12616  currentFrameIndex,
12617  size,
12618  alignment,
12619  allocFlagsCopy,
12620  createInfo.pUserData,
12621  suballocType,
12622  strategy,
12623  pAllocation);
12624  if(res == VK_SUCCESS)
12625  {
12626  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12627  return VK_SUCCESS;
12628  }
12629  }
12630  }
12631  else
12632  {
12634  {
12635  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12636  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12637  {
12638  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12639  VMA_ASSERT(pCurrBlock);
12640  VkResult res = AllocateFromBlock(
12641  pCurrBlock,
12642  currentFrameIndex,
12643  size,
12644  alignment,
12645  allocFlagsCopy,
12646  createInfo.pUserData,
12647  suballocType,
12648  strategy,
12649  pAllocation);
12650  if(res == VK_SUCCESS)
12651  {
12652  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12653  return VK_SUCCESS;
12654  }
12655  }
12656  }
12657  else // WORST_FIT, FIRST_FIT
12658  {
12659  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12660  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12661  {
12662  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12663  VMA_ASSERT(pCurrBlock);
12664  VkResult res = AllocateFromBlock(
12665  pCurrBlock,
12666  currentFrameIndex,
12667  size,
12668  alignment,
12669  allocFlagsCopy,
12670  createInfo.pUserData,
12671  suballocType,
12672  strategy,
12673  pAllocation);
12674  if(res == VK_SUCCESS)
12675  {
12676  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12677  return VK_SUCCESS;
12678  }
12679  }
12680  }
12681  }
12682 
12683  // 2. Try to create new block.
12684  if(canCreateNewBlock)
12685  {
12686  // Calculate optimal size for new block.
12687  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12688  uint32_t newBlockSizeShift = 0;
12689  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12690 
12691  if(!m_ExplicitBlockSize)
12692  {
12693  // Allocate 1/8, 1/4, 1/2 as first blocks.
12694  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12695  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12696  {
12697  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12698  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12699  {
12700  newBlockSize = smallerNewBlockSize;
12701  ++newBlockSizeShift;
12702  }
12703  else
12704  {
12705  break;
12706  }
12707  }
12708  }
12709 
12710  size_t newBlockIndex = 0;
12711  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12712  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12713  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12714  if(!m_ExplicitBlockSize)
12715  {
12716  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12717  {
12718  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12719  if(smallerNewBlockSize >= size)
12720  {
12721  newBlockSize = smallerNewBlockSize;
12722  ++newBlockSizeShift;
12723  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12724  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12725  }
12726  else
12727  {
12728  break;
12729  }
12730  }
12731  }
12732 
12733  if(res == VK_SUCCESS)
12734  {
12735  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12736  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12737 
12738  res = AllocateFromBlock(
12739  pBlock,
12740  currentFrameIndex,
12741  size,
12742  alignment,
12743  allocFlagsCopy,
12744  createInfo.pUserData,
12745  suballocType,
12746  strategy,
12747  pAllocation);
12748  if(res == VK_SUCCESS)
12749  {
12750  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12751  return VK_SUCCESS;
12752  }
12753  else
12754  {
12755  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12756  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12757  }
12758  }
12759  }
12760  }
12761 
12762  // 3. Try to allocate from existing blocks with making other allocations lost.
12763  if(canMakeOtherLost)
12764  {
12765  uint32_t tryIndex = 0;
12766  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12767  {
12768  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12769  VmaAllocationRequest bestRequest = {};
12770  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12771 
12772  // 1. Search existing allocations.
12774  {
12775  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12776  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12777  {
12778  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12779  VMA_ASSERT(pCurrBlock);
12780  VmaAllocationRequest currRequest = {};
12781  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12782  currentFrameIndex,
12783  m_FrameInUseCount,
12784  m_BufferImageGranularity,
12785  size,
12786  alignment,
12787  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12788  suballocType,
12789  canMakeOtherLost,
12790  strategy,
12791  &currRequest))
12792  {
12793  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12794  if(pBestRequestBlock == VMA_NULL ||
12795  currRequestCost < bestRequestCost)
12796  {
12797  pBestRequestBlock = pCurrBlock;
12798  bestRequest = currRequest;
12799  bestRequestCost = currRequestCost;
12800 
12801  if(bestRequestCost == 0)
12802  {
12803  break;
12804  }
12805  }
12806  }
12807  }
12808  }
12809  else // WORST_FIT, FIRST_FIT
12810  {
12811  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12812  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12813  {
12814  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12815  VMA_ASSERT(pCurrBlock);
12816  VmaAllocationRequest currRequest = {};
12817  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12818  currentFrameIndex,
12819  m_FrameInUseCount,
12820  m_BufferImageGranularity,
12821  size,
12822  alignment,
12823  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12824  suballocType,
12825  canMakeOtherLost,
12826  strategy,
12827  &currRequest))
12828  {
12829  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12830  if(pBestRequestBlock == VMA_NULL ||
12831  currRequestCost < bestRequestCost ||
12833  {
12834  pBestRequestBlock = pCurrBlock;
12835  bestRequest = currRequest;
12836  bestRequestCost = currRequestCost;
12837 
12838  if(bestRequestCost == 0 ||
12840  {
12841  break;
12842  }
12843  }
12844  }
12845  }
12846  }
12847 
12848  if(pBestRequestBlock != VMA_NULL)
12849  {
12850  if(mapped)
12851  {
12852  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12853  if(res != VK_SUCCESS)
12854  {
12855  return res;
12856  }
12857  }
12858 
12859  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12860  currentFrameIndex,
12861  m_FrameInUseCount,
12862  &bestRequest))
12863  {
12864  // Allocate from this pBlock.
12865  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12866  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12867  UpdateHasEmptyBlock();
12868  (*pAllocation)->InitBlockAllocation(
12869  pBestRequestBlock,
12870  bestRequest.offset,
12871  alignment,
12872  size,
12873  m_MemoryTypeIndex,
12874  suballocType,
12875  mapped,
12876  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12877  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12878  VMA_DEBUG_LOG(" Returned from existing block");
12879  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12880  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12881  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12882  {
12883  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12884  }
12885  if(IsCorruptionDetectionEnabled())
12886  {
12887  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12888  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12889  }
12890  return VK_SUCCESS;
12891  }
12892  // else: Some allocations must have been touched while we are here. Next try.
12893  }
12894  else
12895  {
12896  // Could not find place in any of the blocks - break outer loop.
12897  break;
12898  }
12899  }
12900  /* Maximum number of tries exceeded - a very unlike event when many other
12901  threads are simultaneously touching allocations making it impossible to make
12902  lost at the same time as we try to allocate. */
12903  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12904  {
12905  return VK_ERROR_TOO_MANY_OBJECTS;
12906  }
12907  }
12908 
12909  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12910 }
12911 
12912 void VmaBlockVector::Free(
12913  const VmaAllocation hAllocation)
12914 {
12915  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12916 
12917  bool budgetExceeded = false;
12918  {
12919  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12920  VmaBudget heapBudget = {};
12921  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12922  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12923  }
12924 
12925  // Scope for lock.
12926  {
12927  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12928 
12929  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12930 
12931  if(IsCorruptionDetectionEnabled())
12932  {
12933  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12934  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12935  }
12936 
12937  if(hAllocation->IsPersistentMap())
12938  {
12939  pBlock->Unmap(m_hAllocator, 1);
12940  }
12941 
12942  pBlock->m_pMetadata->Free(hAllocation);
12943  VMA_HEAVY_ASSERT(pBlock->Validate());
12944 
12945  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12946 
12947  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12948  // pBlock became empty after this deallocation.
12949  if(pBlock->m_pMetadata->IsEmpty())
12950  {
12951  // Already has empty block. We don't want to have two, so delete this one.
12952  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12953  {
12954  pBlockToDelete = pBlock;
12955  Remove(pBlock);
12956  }
12957  // else: We now have an empty block - leave it.
12958  }
12959  // pBlock didn't become empty, but we have another empty block - find and free that one.
12960  // (This is optional, heuristics.)
12961  else if(m_HasEmptyBlock && canDeleteBlock)
12962  {
12963  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12964  if(pLastBlock->m_pMetadata->IsEmpty())
12965  {
12966  pBlockToDelete = pLastBlock;
12967  m_Blocks.pop_back();
12968  }
12969  }
12970 
12971  UpdateHasEmptyBlock();
12972  IncrementallySortBlocks();
12973  }
12974 
12975  // Destruction of a free block. Deferred until this point, outside of mutex
12976  // lock, for performance reason.
12977  if(pBlockToDelete != VMA_NULL)
12978  {
12979  VMA_DEBUG_LOG(" Deleted empty block");
12980  pBlockToDelete->Destroy(m_hAllocator);
12981  vma_delete(m_hAllocator, pBlockToDelete);
12982  }
12983 }
12984 
12985 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12986 {
12987  VkDeviceSize result = 0;
12988  for(size_t i = m_Blocks.size(); i--; )
12989  {
12990  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12991  if(result >= m_PreferredBlockSize)
12992  {
12993  break;
12994  }
12995  }
12996  return result;
12997 }
12998 
12999 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13000 {
13001  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13002  {
13003  if(m_Blocks[blockIndex] == pBlock)
13004  {
13005  VmaVectorRemove(m_Blocks, blockIndex);
13006  return;
13007  }
13008  }
13009  VMA_ASSERT(0);
13010 }
13011 
13012 void VmaBlockVector::IncrementallySortBlocks()
13013 {
13014  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13015  {
13016  // Bubble sort only until first swap.
13017  for(size_t i = 1; i < m_Blocks.size(); ++i)
13018  {
13019  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13020  {
13021  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13022  return;
13023  }
13024  }
13025  }
13026 }
13027 
13028 VkResult VmaBlockVector::AllocateFromBlock(
13029  VmaDeviceMemoryBlock* pBlock,
13030  uint32_t currentFrameIndex,
13031  VkDeviceSize size,
13032  VkDeviceSize alignment,
13033  VmaAllocationCreateFlags allocFlags,
13034  void* pUserData,
13035  VmaSuballocationType suballocType,
13036  uint32_t strategy,
13037  VmaAllocation* pAllocation)
13038 {
13039  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13040  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13041  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13042  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13043 
13044  VmaAllocationRequest currRequest = {};
13045  if(pBlock->m_pMetadata->CreateAllocationRequest(
13046  currentFrameIndex,
13047  m_FrameInUseCount,
13048  m_BufferImageGranularity,
13049  size,
13050  alignment,
13051  isUpperAddress,
13052  suballocType,
13053  false, // canMakeOtherLost
13054  strategy,
13055  &currRequest))
13056  {
13057  // Allocate from pCurrBlock.
13058  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13059 
13060  if(mapped)
13061  {
13062  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13063  if(res != VK_SUCCESS)
13064  {
13065  return res;
13066  }
13067  }
13068 
13069  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13070  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13071  UpdateHasEmptyBlock();
13072  (*pAllocation)->InitBlockAllocation(
13073  pBlock,
13074  currRequest.offset,
13075  alignment,
13076  size,
13077  m_MemoryTypeIndex,
13078  suballocType,
13079  mapped,
13080  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13081  VMA_HEAVY_ASSERT(pBlock->Validate());
13082  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13083  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13084  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13085  {
13086  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13087  }
13088  if(IsCorruptionDetectionEnabled())
13089  {
13090  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13091  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13092  }
13093  return VK_SUCCESS;
13094  }
13095  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13096 }
13097 
13098 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13099 {
13100  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13101  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13102  allocInfo.allocationSize = blockSize;
13103 
13104 #if VMA_BUFFER_DEVICE_ADDRESS
13105  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13106  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13107  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13108  {
13109  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13110  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13111  }
13112 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13113 
13114  VkDeviceMemory mem = VK_NULL_HANDLE;
13115  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13116  if(res < 0)
13117  {
13118  return res;
13119  }
13120 
13121  // New VkDeviceMemory successfully created.
13122 
13123  // Create new Allocation for it.
13124  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13125  pBlock->Init(
13126  m_hAllocator,
13127  m_hParentPool,
13128  m_MemoryTypeIndex,
13129  mem,
13130  allocInfo.allocationSize,
13131  m_NextBlockId++,
13132  m_Algorithm);
13133 
13134  m_Blocks.push_back(pBlock);
13135  if(pNewBlockIndex != VMA_NULL)
13136  {
13137  *pNewBlockIndex = m_Blocks.size() - 1;
13138  }
13139 
13140  return VK_SUCCESS;
13141 }
13142 
13143 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13144  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13145  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13146 {
13147  const size_t blockCount = m_Blocks.size();
13148  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13149 
13150  enum BLOCK_FLAG
13151  {
13152  BLOCK_FLAG_USED = 0x00000001,
13153  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13154  };
13155 
13156  struct BlockInfo
13157  {
13158  uint32_t flags;
13159  void* pMappedData;
13160  };
13161  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13162  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13163  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13164 
13165  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13166  const size_t moveCount = moves.size();
13167  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13168  {
13169  const VmaDefragmentationMove& move = moves[moveIndex];
13170  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13171  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13172  }
13173 
13174  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13175 
13176  // Go over all blocks. Get mapped pointer or map if necessary.
13177  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13178  {
13179  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13180  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13181  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13182  {
13183  currBlockInfo.pMappedData = pBlock->GetMappedData();
13184  // It is not originally mapped - map it.
13185  if(currBlockInfo.pMappedData == VMA_NULL)
13186  {
13187  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13188  if(pDefragCtx->res == VK_SUCCESS)
13189  {
13190  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13191  }
13192  }
13193  }
13194  }
13195 
13196  // Go over all moves. Do actual data transfer.
13197  if(pDefragCtx->res == VK_SUCCESS)
13198  {
13199  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13200  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13201 
13202  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13203  {
13204  const VmaDefragmentationMove& move = moves[moveIndex];
13205 
13206  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13207  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13208 
13209  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13210 
13211  // Invalidate source.
13212  if(isNonCoherent)
13213  {
13214  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13215  memRange.memory = pSrcBlock->GetDeviceMemory();
13216  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13217  memRange.size = VMA_MIN(
13218  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13219  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13220  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13221  }
13222 
13223  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13224  memmove(
13225  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13226  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13227  static_cast<size_t>(move.size));
13228 
13229  if(IsCorruptionDetectionEnabled())
13230  {
13231  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13232  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13233  }
13234 
13235  // Flush destination.
13236  if(isNonCoherent)
13237  {
13238  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13239  memRange.memory = pDstBlock->GetDeviceMemory();
13240  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13241  memRange.size = VMA_MIN(
13242  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13243  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13244  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13245  }
13246  }
13247  }
13248 
13249  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13250  // Regardless of pCtx->res == VK_SUCCESS.
13251  for(size_t blockIndex = blockCount; blockIndex--; )
13252  {
13253  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13254  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13255  {
13256  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13257  pBlock->Unmap(m_hAllocator, 1);
13258  }
13259  }
13260 }
13261 
13262 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13263  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13264  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13265  VkCommandBuffer commandBuffer)
13266 {
13267  const size_t blockCount = m_Blocks.size();
13268 
13269  pDefragCtx->blockContexts.resize(blockCount);
13270  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13271 
13272  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13273  const size_t moveCount = moves.size();
13274  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13275  {
13276  const VmaDefragmentationMove& move = moves[moveIndex];
13277 
13278  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13279  {
13280  // Old school move still require us to map the whole block
13281  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13282  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13283  }
13284  }
13285 
13286  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13287 
13288  // Go over all blocks. Create and bind buffer for whole block if necessary.
13289  {
13290  VkBufferCreateInfo bufCreateInfo;
13291  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13292 
13293  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13294  {
13295  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13296  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13297  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13298  {
13299  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13300  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13301  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13302  if(pDefragCtx->res == VK_SUCCESS)
13303  {
13304  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13305  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13306  }
13307  }
13308  }
13309  }
13310 
13311  // Go over all moves. Post data transfer commands to command buffer.
13312  if(pDefragCtx->res == VK_SUCCESS)
13313  {
13314  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13315  {
13316  const VmaDefragmentationMove& move = moves[moveIndex];
13317 
13318  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13319  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13320 
13321  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13322 
13323  VkBufferCopy region = {
13324  move.srcOffset,
13325  move.dstOffset,
13326  move.size };
13327  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13328  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13329  }
13330  }
13331 
13332  // Save buffers to defrag context for later destruction.
13333  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13334  {
13335  pDefragCtx->res = VK_NOT_READY;
13336  }
13337 }
13338 
13339 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13340 {
13341  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13342  {
13343  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13344  if(pBlock->m_pMetadata->IsEmpty())
13345  {
13346  if(m_Blocks.size() > m_MinBlockCount)
13347  {
13348  if(pDefragmentationStats != VMA_NULL)
13349  {
13350  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13351  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13352  }
13353 
13354  VmaVectorRemove(m_Blocks, blockIndex);
13355  pBlock->Destroy(m_hAllocator);
13356  vma_delete(m_hAllocator, pBlock);
13357  }
13358  else
13359  {
13360  break;
13361  }
13362  }
13363  }
13364  UpdateHasEmptyBlock();
13365 }
13366 
13367 void VmaBlockVector::UpdateHasEmptyBlock()
13368 {
13369  m_HasEmptyBlock = false;
13370  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13371  {
13372  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13373  if(pBlock->m_pMetadata->IsEmpty())
13374  {
13375  m_HasEmptyBlock = true;
13376  break;
13377  }
13378  }
13379 }
13380 
13381 #if VMA_STATS_STRING_ENABLED
13382 
13383 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13384 {
13385  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13386 
13387  json.BeginObject();
13388 
13389  if(IsCustomPool())
13390  {
13391  const char* poolName = m_hParentPool->GetName();
13392  if(poolName != VMA_NULL && poolName[0] != '\0')
13393  {
13394  json.WriteString("Name");
13395  json.WriteString(poolName);
13396  }
13397 
13398  json.WriteString("MemoryTypeIndex");
13399  json.WriteNumber(m_MemoryTypeIndex);
13400 
13401  json.WriteString("BlockSize");
13402  json.WriteNumber(m_PreferredBlockSize);
13403 
13404  json.WriteString("BlockCount");
13405  json.BeginObject(true);
13406  if(m_MinBlockCount > 0)
13407  {
13408  json.WriteString("Min");
13409  json.WriteNumber((uint64_t)m_MinBlockCount);
13410  }
13411  if(m_MaxBlockCount < SIZE_MAX)
13412  {
13413  json.WriteString("Max");
13414  json.WriteNumber((uint64_t)m_MaxBlockCount);
13415  }
13416  json.WriteString("Cur");
13417  json.WriteNumber((uint64_t)m_Blocks.size());
13418  json.EndObject();
13419 
13420  if(m_FrameInUseCount > 0)
13421  {
13422  json.WriteString("FrameInUseCount");
13423  json.WriteNumber(m_FrameInUseCount);
13424  }
13425 
13426  if(m_Algorithm != 0)
13427  {
13428  json.WriteString("Algorithm");
13429  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13430  }
13431  }
13432  else
13433  {
13434  json.WriteString("PreferredBlockSize");
13435  json.WriteNumber(m_PreferredBlockSize);
13436  }
13437 
13438  json.WriteString("Blocks");
13439  json.BeginObject();
13440  for(size_t i = 0; i < m_Blocks.size(); ++i)
13441  {
13442  json.BeginString();
13443  json.ContinueString(m_Blocks[i]->GetId());
13444  json.EndString();
13445 
13446  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13447  }
13448  json.EndObject();
13449 
13450  json.EndObject();
13451 }
13452 
13453 #endif // #if VMA_STATS_STRING_ENABLED
13454 
13455 void VmaBlockVector::Defragment(
13456  class VmaBlockVectorDefragmentationContext* pCtx,
13458  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13459  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13460  VkCommandBuffer commandBuffer)
13461 {
13462  pCtx->res = VK_SUCCESS;
13463 
13464  const VkMemoryPropertyFlags memPropFlags =
13465  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13466  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13467 
13468  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13469  isHostVisible;
13470  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13471  !IsCorruptionDetectionEnabled() &&
13472  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13473 
13474  // There are options to defragment this memory type.
13475  if(canDefragmentOnCpu || canDefragmentOnGpu)
13476  {
13477  bool defragmentOnGpu;
13478  // There is only one option to defragment this memory type.
13479  if(canDefragmentOnGpu != canDefragmentOnCpu)
13480  {
13481  defragmentOnGpu = canDefragmentOnGpu;
13482  }
13483  // Both options are available: Heuristics to choose the best one.
13484  else
13485  {
13486  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13487  m_hAllocator->IsIntegratedGpu();
13488  }
13489 
13490  bool overlappingMoveSupported = !defragmentOnGpu;
13491 
13492  if(m_hAllocator->m_UseMutex)
13493  {
13495  {
13496  if(!m_Mutex.TryLockWrite())
13497  {
13498  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13499  return;
13500  }
13501  }
13502  else
13503  {
13504  m_Mutex.LockWrite();
13505  pCtx->mutexLocked = true;
13506  }
13507  }
13508 
13509  pCtx->Begin(overlappingMoveSupported, flags);
13510 
13511  // Defragment.
13512 
13513  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13514  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13515  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13516 
13517  // Accumulate statistics.
13518  if(pStats != VMA_NULL)
13519  {
13520  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13521  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13522  pStats->bytesMoved += bytesMoved;
13523  pStats->allocationsMoved += allocationsMoved;
13524  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13525  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13526  if(defragmentOnGpu)
13527  {
13528  maxGpuBytesToMove -= bytesMoved;
13529  maxGpuAllocationsToMove -= allocationsMoved;
13530  }
13531  else
13532  {
13533  maxCpuBytesToMove -= bytesMoved;
13534  maxCpuAllocationsToMove -= allocationsMoved;
13535  }
13536  }
13537 
13539  {
13540  if(m_hAllocator->m_UseMutex)
13541  m_Mutex.UnlockWrite();
13542 
13543  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13544  pCtx->res = VK_NOT_READY;
13545 
13546  return;
13547  }
13548 
13549  if(pCtx->res >= VK_SUCCESS)
13550  {
13551  if(defragmentOnGpu)
13552  {
13553  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13554  }
13555  else
13556  {
13557  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13558  }
13559  }
13560  }
13561 }
13562 
13563 void VmaBlockVector::DefragmentationEnd(
13564  class VmaBlockVectorDefragmentationContext* pCtx,
13565  uint32_t flags,
13566  VmaDefragmentationStats* pStats)
13567 {
13568  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13569  {
13570  VMA_ASSERT(pCtx->mutexLocked == false);
13571 
13572  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13573  // lock protecting us. Since we mutate state here, we have to take the lock out now
13574  m_Mutex.LockWrite();
13575  pCtx->mutexLocked = true;
13576  }
13577 
13578  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13579  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13580  {
13581  // Destroy buffers.
13582  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13583  {
13584  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13585  if(blockCtx.hBuffer)
13586  {
13587  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13588  }
13589  }
13590 
13591  if(pCtx->res >= VK_SUCCESS)
13592  {
13593  FreeEmptyBlocks(pStats);
13594  }
13595  }
13596 
13597  if(pCtx->mutexLocked)
13598  {
13599  VMA_ASSERT(m_hAllocator->m_UseMutex);
13600  m_Mutex.UnlockWrite();
13601  }
13602 }
13603 
13604 uint32_t VmaBlockVector::ProcessDefragmentations(
13605  class VmaBlockVectorDefragmentationContext *pCtx,
13606  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13607 {
13608  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13609 
13610  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13611 
13612  for(uint32_t i = 0; i < moveCount; ++ i)
13613  {
13614  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13615 
13616  pMove->allocation = move.hAllocation;
13617  pMove->memory = move.pDstBlock->GetDeviceMemory();
13618  pMove->offset = move.dstOffset;
13619 
13620  ++ pMove;
13621  }
13622 
13623  pCtx->defragmentationMovesProcessed += moveCount;
13624 
13625  return moveCount;
13626 }
13627 
13628 void VmaBlockVector::CommitDefragmentations(
13629  class VmaBlockVectorDefragmentationContext *pCtx,
13630  VmaDefragmentationStats* pStats)
13631 {
13632  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13633 
13634  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13635  {
13636  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13637 
13638  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13639  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13640  }
13641 
13642  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13643  FreeEmptyBlocks(pStats);
13644 }
13645 
13646 size_t VmaBlockVector::CalcAllocationCount() const
13647 {
13648  size_t result = 0;
13649  for(size_t i = 0; i < m_Blocks.size(); ++i)
13650  {
13651  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13652  }
13653  return result;
13654 }
13655 
13656 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13657 {
13658  if(m_BufferImageGranularity == 1)
13659  {
13660  return false;
13661  }
13662  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13663  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13664  {
13665  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13666  VMA_ASSERT(m_Algorithm == 0);
13667  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13668  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13669  {
13670  return true;
13671  }
13672  }
13673  return false;
13674 }
13675 
13676 void VmaBlockVector::MakePoolAllocationsLost(
13677  uint32_t currentFrameIndex,
13678  size_t* pLostAllocationCount)
13679 {
13680  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13681  size_t lostAllocationCount = 0;
13682  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13683  {
13684  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13685  VMA_ASSERT(pBlock);
13686  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13687  }
13688  if(pLostAllocationCount != VMA_NULL)
13689  {
13690  *pLostAllocationCount = lostAllocationCount;
13691  }
13692 }
13693 
13694 VkResult VmaBlockVector::CheckCorruption()
13695 {
13696  if(!IsCorruptionDetectionEnabled())
13697  {
13698  return VK_ERROR_FEATURE_NOT_PRESENT;
13699  }
13700 
13701  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13702  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13703  {
13704  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13705  VMA_ASSERT(pBlock);
13706  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13707  if(res != VK_SUCCESS)
13708  {
13709  return res;
13710  }
13711  }
13712  return VK_SUCCESS;
13713 }
13714 
13715 void VmaBlockVector::AddStats(VmaStats* pStats)
13716 {
13717  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13718  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13719 
13720  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13721 
13722  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13723  {
13724  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13725  VMA_ASSERT(pBlock);
13726  VMA_HEAVY_ASSERT(pBlock->Validate());
13727  VmaStatInfo allocationStatInfo;
13728  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13729  VmaAddStatInfo(pStats->total, allocationStatInfo);
13730  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13731  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13732  }
13733 }
13734 
13736 // VmaDefragmentationAlgorithm_Generic members definition
13737 
13738 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13739  VmaAllocator hAllocator,
13740  VmaBlockVector* pBlockVector,
13741  uint32_t currentFrameIndex,
13742  bool overlappingMoveSupported) :
13743  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13744  m_AllocationCount(0),
13745  m_AllAllocations(false),
13746  m_BytesMoved(0),
13747  m_AllocationsMoved(0),
13748  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13749 {
13750  // Create block info for each block.
13751  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13752  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13753  {
13754  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13755  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13756  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13757  m_Blocks.push_back(pBlockInfo);
13758  }
13759 
13760  // Sort them by m_pBlock pointer value.
13761  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13762 }
13763 
13764 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13765 {
13766  for(size_t i = m_Blocks.size(); i--; )
13767  {
13768  vma_delete(m_hAllocator, m_Blocks[i]);
13769  }
13770 }
13771 
13772 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13773 {
13774  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13775  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13776  {
13777  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13778  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13779  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13780  {
13781  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13782  (*it)->m_Allocations.push_back(allocInfo);
13783  }
13784  else
13785  {
13786  VMA_ASSERT(0);
13787  }
13788 
13789  ++m_AllocationCount;
13790  }
13791 }
13792 
13793 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13794  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13795  VkDeviceSize maxBytesToMove,
13796  uint32_t maxAllocationsToMove,
13797  bool freeOldAllocations)
13798 {
13799  if(m_Blocks.empty())
13800  {
13801  return VK_SUCCESS;
13802  }
13803 
13804  // This is a choice based on research.
13805  // Option 1:
13806  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13807  // Option 2:
13808  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13809  // Option 3:
13810  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13811 
13812  size_t srcBlockMinIndex = 0;
13813  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13814  /*
13815  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13816  {
13817  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13818  if(blocksWithNonMovableCount > 0)
13819  {
13820  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13821  }
13822  }
13823  */
13824 
13825  size_t srcBlockIndex = m_Blocks.size() - 1;
13826  size_t srcAllocIndex = SIZE_MAX;
13827  for(;;)
13828  {
13829  // 1. Find next allocation to move.
13830  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13831  // 1.2. Then start from last to first m_Allocations.
13832  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13833  {
13834  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13835  {
13836  // Finished: no more allocations to process.
13837  if(srcBlockIndex == srcBlockMinIndex)
13838  {
13839  return VK_SUCCESS;
13840  }
13841  else
13842  {
13843  --srcBlockIndex;
13844  srcAllocIndex = SIZE_MAX;
13845  }
13846  }
13847  else
13848  {
13849  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13850  }
13851  }
13852 
13853  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13854  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13855 
13856  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13857  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13858  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13859  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13860 
13861  // 2. Try to find new place for this allocation in preceding or current block.
13862  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13863  {
13864  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13865  VmaAllocationRequest dstAllocRequest;
13866  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13867  m_CurrentFrameIndex,
13868  m_pBlockVector->GetFrameInUseCount(),
13869  m_pBlockVector->GetBufferImageGranularity(),
13870  size,
13871  alignment,
13872  false, // upperAddress
13873  suballocType,
13874  false, // canMakeOtherLost
13875  strategy,
13876  &dstAllocRequest) &&
13877  MoveMakesSense(
13878  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13879  {
13880  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13881 
13882  // Reached limit on number of allocations or bytes to move.
13883  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13884  (m_BytesMoved + size > maxBytesToMove))
13885  {
13886  return VK_SUCCESS;
13887  }
13888 
13889  VmaDefragmentationMove move = {};
13890  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13891  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13892  move.srcOffset = srcOffset;
13893  move.dstOffset = dstAllocRequest.offset;
13894  move.size = size;
13895  move.hAllocation = allocInfo.m_hAllocation;
13896  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13897  move.pDstBlock = pDstBlockInfo->m_pBlock;
13898 
13899  moves.push_back(move);
13900 
13901  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13902  dstAllocRequest,
13903  suballocType,
13904  size,
13905  allocInfo.m_hAllocation);
13906 
13907  if(freeOldAllocations)
13908  {
13909  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13910  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13911  }
13912 
13913  if(allocInfo.m_pChanged != VMA_NULL)
13914  {
13915  *allocInfo.m_pChanged = VK_TRUE;
13916  }
13917 
13918  ++m_AllocationsMoved;
13919  m_BytesMoved += size;
13920 
13921  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13922 
13923  break;
13924  }
13925  }
13926 
13927  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13928 
13929  if(srcAllocIndex > 0)
13930  {
13931  --srcAllocIndex;
13932  }
13933  else
13934  {
13935  if(srcBlockIndex > 0)
13936  {
13937  --srcBlockIndex;
13938  srcAllocIndex = SIZE_MAX;
13939  }
13940  else
13941  {
13942  return VK_SUCCESS;
13943  }
13944  }
13945  }
13946 }
13947 
13948 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13949 {
13950  size_t result = 0;
13951  for(size_t i = 0; i < m_Blocks.size(); ++i)
13952  {
13953  if(m_Blocks[i]->m_HasNonMovableAllocations)
13954  {
13955  ++result;
13956  }
13957  }
13958  return result;
13959 }
13960 
13961 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13962  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13963  VkDeviceSize maxBytesToMove,
13964  uint32_t maxAllocationsToMove,
13966 {
13967  if(!m_AllAllocations && m_AllocationCount == 0)
13968  {
13969  return VK_SUCCESS;
13970  }
13971 
13972  const size_t blockCount = m_Blocks.size();
13973  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13974  {
13975  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13976 
13977  if(m_AllAllocations)
13978  {
13979  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13980  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13981  it != pMetadata->m_Suballocations.end();
13982  ++it)
13983  {
13984  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13985  {
13986  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13987  pBlockInfo->m_Allocations.push_back(allocInfo);
13988  }
13989  }
13990  }
13991 
13992  pBlockInfo->CalcHasNonMovableAllocations();
13993 
13994  // This is a choice based on research.
13995  // Option 1:
13996  pBlockInfo->SortAllocationsByOffsetDescending();
13997  // Option 2:
13998  //pBlockInfo->SortAllocationsBySizeDescending();
13999  }
14000 
14001  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14002  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14003 
14004  // This is a choice based on research.
14005  const uint32_t roundCount = 2;
14006 
14007  // Execute defragmentation rounds (the main part).
14008  VkResult result = VK_SUCCESS;
14009  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14010  {
14011  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14012  }
14013 
14014  return result;
14015 }
14016 
14017 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14018  size_t dstBlockIndex, VkDeviceSize dstOffset,
14019  size_t srcBlockIndex, VkDeviceSize srcOffset)
14020 {
14021  if(dstBlockIndex < srcBlockIndex)
14022  {
14023  return true;
14024  }
14025  if(dstBlockIndex > srcBlockIndex)
14026  {
14027  return false;
14028  }
14029  if(dstOffset < srcOffset)
14030  {
14031  return true;
14032  }
14033  return false;
14034 }
14035 
14037 // VmaDefragmentationAlgorithm_Fast
14038 
14039 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14040  VmaAllocator hAllocator,
14041  VmaBlockVector* pBlockVector,
14042  uint32_t currentFrameIndex,
14043  bool overlappingMoveSupported) :
14044  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14045  m_OverlappingMoveSupported(overlappingMoveSupported),
14046  m_AllocationCount(0),
14047  m_AllAllocations(false),
14048  m_BytesMoved(0),
14049  m_AllocationsMoved(0),
14050  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14051 {
14052  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14053 
14054 }
14055 
14056 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14057 {
14058 }
14059 
14060 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14061  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14062  VkDeviceSize maxBytesToMove,
14063  uint32_t maxAllocationsToMove,
14065 {
14066  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14067 
14068  const size_t blockCount = m_pBlockVector->GetBlockCount();
14069  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14070  {
14071  return VK_SUCCESS;
14072  }
14073 
14074  PreprocessMetadata();
14075 
14076  // Sort blocks in order from most destination.
14077 
14078  m_BlockInfos.resize(blockCount);
14079  for(size_t i = 0; i < blockCount; ++i)
14080  {
14081  m_BlockInfos[i].origBlockIndex = i;
14082  }
14083 
14084  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14085  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14086  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14087  });
14088 
14089  // THE MAIN ALGORITHM
14090 
14091  FreeSpaceDatabase freeSpaceDb;
14092 
14093  size_t dstBlockInfoIndex = 0;
14094  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14095  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14096  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14097  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14098  VkDeviceSize dstOffset = 0;
14099 
14100  bool end = false;
14101  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14102  {
14103  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14104  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14105  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14106  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14107  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14108  {
14109  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14110  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14111  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14112  if(m_AllocationsMoved == maxAllocationsToMove ||
14113  m_BytesMoved + srcAllocSize > maxBytesToMove)
14114  {
14115  end = true;
14116  break;
14117  }
14118  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14119 
14120  VmaDefragmentationMove move = {};
14121  // Try to place it in one of free spaces from the database.
14122  size_t freeSpaceInfoIndex;
14123  VkDeviceSize dstAllocOffset;
14124  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14125  freeSpaceInfoIndex, dstAllocOffset))
14126  {
14127  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14128  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14129  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14130 
14131  // Same block
14132  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14133  {
14134  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14135 
14136  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14137 
14138  VmaSuballocation suballoc = *srcSuballocIt;
14139  suballoc.offset = dstAllocOffset;
14140  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14141  m_BytesMoved += srcAllocSize;
14142  ++m_AllocationsMoved;
14143 
14144  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14145  ++nextSuballocIt;
14146  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14147  srcSuballocIt = nextSuballocIt;
14148 
14149  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14150 
14151  move.srcBlockIndex = srcOrigBlockIndex;
14152  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14153  move.srcOffset = srcAllocOffset;
14154  move.dstOffset = dstAllocOffset;
14155  move.size = srcAllocSize;
14156 
14157  moves.push_back(move);
14158  }
14159  // Different block
14160  else
14161  {
14162  // MOVE OPTION 2: Move the allocation to a different block.
14163 
14164  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14165 
14166  VmaSuballocation suballoc = *srcSuballocIt;
14167  suballoc.offset = dstAllocOffset;
14168  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14169  m_BytesMoved += srcAllocSize;
14170  ++m_AllocationsMoved;
14171 
14172  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14173  ++nextSuballocIt;
14174  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14175  srcSuballocIt = nextSuballocIt;
14176 
14177  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14178 
14179  move.srcBlockIndex = srcOrigBlockIndex;
14180  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14181  move.srcOffset = srcAllocOffset;
14182  move.dstOffset = dstAllocOffset;
14183  move.size = srcAllocSize;
14184 
14185  moves.push_back(move);
14186  }
14187  }
14188  else
14189  {
14190  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14191 
14192  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14193  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14194  dstAllocOffset + srcAllocSize > dstBlockSize)
14195  {
14196  // But before that, register remaining free space at the end of dst block.
14197  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14198 
14199  ++dstBlockInfoIndex;
14200  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14201  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14202  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14203  dstBlockSize = pDstMetadata->GetSize();
14204  dstOffset = 0;
14205  dstAllocOffset = 0;
14206  }
14207 
14208  // Same block
14209  if(dstBlockInfoIndex == srcBlockInfoIndex)
14210  {
14211  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14212 
14213  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14214 
14215  bool skipOver = overlap;
14216  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14217  {
14218  // If destination and source place overlap, skip if it would move it
14219  // by only < 1/64 of its size.
14220  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14221  }
14222 
14223  if(skipOver)
14224  {
14225  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14226 
14227  dstOffset = srcAllocOffset + srcAllocSize;
14228  ++srcSuballocIt;
14229  }
14230  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14231  else
14232  {
14233  srcSuballocIt->offset = dstAllocOffset;
14234  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14235  dstOffset = dstAllocOffset + srcAllocSize;
14236  m_BytesMoved += srcAllocSize;
14237  ++m_AllocationsMoved;
14238  ++srcSuballocIt;
14239 
14240  move.srcBlockIndex = srcOrigBlockIndex;
14241  move.dstBlockIndex = dstOrigBlockIndex;
14242  move.srcOffset = srcAllocOffset;
14243  move.dstOffset = dstAllocOffset;
14244  move.size = srcAllocSize;
14245 
14246  moves.push_back(move);
14247  }
14248  }
14249  // Different block
14250  else
14251  {
14252  // MOVE OPTION 2: Move the allocation to a different block.
14253 
14254  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14255  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14256 
14257  VmaSuballocation suballoc = *srcSuballocIt;
14258  suballoc.offset = dstAllocOffset;
14259  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14260  dstOffset = dstAllocOffset + srcAllocSize;
14261  m_BytesMoved += srcAllocSize;
14262  ++m_AllocationsMoved;
14263 
14264  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14265  ++nextSuballocIt;
14266  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14267  srcSuballocIt = nextSuballocIt;
14268 
14269  pDstMetadata->m_Suballocations.push_back(suballoc);
14270 
14271  move.srcBlockIndex = srcOrigBlockIndex;
14272  move.dstBlockIndex = dstOrigBlockIndex;
14273  move.srcOffset = srcAllocOffset;
14274  move.dstOffset = dstAllocOffset;
14275  move.size = srcAllocSize;
14276 
14277  moves.push_back(move);
14278  }
14279  }
14280  }
14281  }
14282 
14283  m_BlockInfos.clear();
14284 
14285  PostprocessMetadata();
14286 
14287  return VK_SUCCESS;
14288 }
14289 
14290 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14291 {
14292  const size_t blockCount = m_pBlockVector->GetBlockCount();
14293  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14294  {
14295  VmaBlockMetadata_Generic* const pMetadata =
14296  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14297  pMetadata->m_FreeCount = 0;
14298  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14299  pMetadata->m_FreeSuballocationsBySize.clear();
14300  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14301  it != pMetadata->m_Suballocations.end(); )
14302  {
14303  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14304  {
14305  VmaSuballocationList::iterator nextIt = it;
14306  ++nextIt;
14307  pMetadata->m_Suballocations.erase(it);
14308  it = nextIt;
14309  }
14310  else
14311  {
14312  ++it;
14313  }
14314  }
14315  }
14316 }
14317 
14318 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14319 {
14320  const size_t blockCount = m_pBlockVector->GetBlockCount();
14321  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14322  {
14323  VmaBlockMetadata_Generic* const pMetadata =
14324  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14325  const VkDeviceSize blockSize = pMetadata->GetSize();
14326 
14327  // No allocations in this block - entire area is free.
14328  if(pMetadata->m_Suballocations.empty())
14329  {
14330  pMetadata->m_FreeCount = 1;
14331  //pMetadata->m_SumFreeSize is already set to blockSize.
14332  VmaSuballocation suballoc = {
14333  0, // offset
14334  blockSize, // size
14335  VMA_NULL, // hAllocation
14336  VMA_SUBALLOCATION_TYPE_FREE };
14337  pMetadata->m_Suballocations.push_back(suballoc);
14338  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14339  }
14340  // There are some allocations in this block.
14341  else
14342  {
14343  VkDeviceSize offset = 0;
14344  VmaSuballocationList::iterator it;
14345  for(it = pMetadata->m_Suballocations.begin();
14346  it != pMetadata->m_Suballocations.end();
14347  ++it)
14348  {
14349  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14350  VMA_ASSERT(it->offset >= offset);
14351 
14352  // Need to insert preceding free space.
14353  if(it->offset > offset)
14354  {
14355  ++pMetadata->m_FreeCount;
14356  const VkDeviceSize freeSize = it->offset - offset;
14357  VmaSuballocation suballoc = {
14358  offset, // offset
14359  freeSize, // size
14360  VMA_NULL, // hAllocation
14361  VMA_SUBALLOCATION_TYPE_FREE };
14362  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14363  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14364  {
14365  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14366  }
14367  }
14368 
14369  pMetadata->m_SumFreeSize -= it->size;
14370  offset = it->offset + it->size;
14371  }
14372 
14373  // Need to insert trailing free space.
14374  if(offset < blockSize)
14375  {
14376  ++pMetadata->m_FreeCount;
14377  const VkDeviceSize freeSize = blockSize - offset;
14378  VmaSuballocation suballoc = {
14379  offset, // offset
14380  freeSize, // size
14381  VMA_NULL, // hAllocation
14382  VMA_SUBALLOCATION_TYPE_FREE };
14383  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14384  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14385  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14386  {
14387  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14388  }
14389  }
14390 
14391  VMA_SORT(
14392  pMetadata->m_FreeSuballocationsBySize.begin(),
14393  pMetadata->m_FreeSuballocationsBySize.end(),
14394  VmaSuballocationItemSizeLess());
14395  }
14396 
14397  VMA_HEAVY_ASSERT(pMetadata->Validate());
14398  }
14399 }
14400 
14401 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14402 {
14403  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14404  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14405  while(it != pMetadata->m_Suballocations.end())
14406  {
14407  if(it->offset < suballoc.offset)
14408  {
14409  ++it;
14410  }
14411  }
14412  pMetadata->m_Suballocations.insert(it, suballoc);
14413 }
14414 
14416 // VmaBlockVectorDefragmentationContext
14417 
14418 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14419  VmaAllocator hAllocator,
14420  VmaPool hCustomPool,
14421  VmaBlockVector* pBlockVector,
14422  uint32_t currFrameIndex) :
14423  res(VK_SUCCESS),
14424  mutexLocked(false),
14425  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14426  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14427  defragmentationMovesProcessed(0),
14428  defragmentationMovesCommitted(0),
14429  hasDefragmentationPlan(0),
14430  m_hAllocator(hAllocator),
14431  m_hCustomPool(hCustomPool),
14432  m_pBlockVector(pBlockVector),
14433  m_CurrFrameIndex(currFrameIndex),
14434  m_pAlgorithm(VMA_NULL),
14435  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14436  m_AllAllocations(false)
14437 {
14438 }
14439 
14440 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14441 {
14442  vma_delete(m_hAllocator, m_pAlgorithm);
14443 }
14444 
14445 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14446 {
14447  AllocInfo info = { hAlloc, pChanged };
14448  m_Allocations.push_back(info);
14449 }
14450 
14451 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14452 {
14453  const bool allAllocations = m_AllAllocations ||
14454  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14455 
14456  /********************************
14457  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14458  ********************************/
14459 
14460  /*
14461  Fast algorithm is supported only when certain criteria are met:
14462  - VMA_DEBUG_MARGIN is 0.
14463  - All allocations in this block vector are moveable.
14464  - There is no possibility of image/buffer granularity conflict.
14465  - The defragmentation is not incremental
14466  */
14467  if(VMA_DEBUG_MARGIN == 0 &&
14468  allAllocations &&
14469  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14471  {
14472  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14473  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14474  }
14475  else
14476  {
14477  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14478  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14479  }
14480 
14481  if(allAllocations)
14482  {
14483  m_pAlgorithm->AddAll();
14484  }
14485  else
14486  {
14487  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14488  {
14489  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14490  }
14491  }
14492 }
14493 
14495 // VmaDefragmentationContext
14496 
14497 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14498  VmaAllocator hAllocator,
14499  uint32_t currFrameIndex,
14500  uint32_t flags,
14501  VmaDefragmentationStats* pStats) :
14502  m_hAllocator(hAllocator),
14503  m_CurrFrameIndex(currFrameIndex),
14504  m_Flags(flags),
14505  m_pStats(pStats),
14506  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14507 {
14508  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14509 }
14510 
14511 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14512 {
14513  for(size_t i = m_CustomPoolContexts.size(); i--; )
14514  {
14515  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14516  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14517  vma_delete(m_hAllocator, pBlockVectorCtx);
14518  }
14519  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14520  {
14521  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14522  if(pBlockVectorCtx)
14523  {
14524  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14525  vma_delete(m_hAllocator, pBlockVectorCtx);
14526  }
14527  }
14528 }
14529 
14530 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
14531 {
14532  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14533  {
14534  VmaPool pool = pPools[poolIndex];
14535  VMA_ASSERT(pool);
14536  // Pools with algorithm other than default are not defragmented.
14537  if(pool->m_BlockVector.GetAlgorithm() == 0)
14538  {
14539  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14540 
14541  for(size_t i = m_CustomPoolContexts.size(); i--; )
14542  {
14543  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14544  {
14545  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14546  break;
14547  }
14548  }
14549 
14550  if(!pBlockVectorDefragCtx)
14551  {
14552  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14553  m_hAllocator,
14554  pool,
14555  &pool->m_BlockVector,
14556  m_CurrFrameIndex);
14557  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14558  }
14559 
14560  pBlockVectorDefragCtx->AddAll();
14561  }
14562  }
14563 }
14564 
14565 void VmaDefragmentationContext_T::AddAllocations(
14566  uint32_t allocationCount,
14567  const VmaAllocation* pAllocations,
14568  VkBool32* pAllocationsChanged)
14569 {
14570  // Dispatch pAllocations among defragmentators. Create them when necessary.
14571  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14572  {
14573  const VmaAllocation hAlloc = pAllocations[allocIndex];
14574  VMA_ASSERT(hAlloc);
14575  // DedicatedAlloc cannot be defragmented.
14576  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14577  // Lost allocation cannot be defragmented.
14578  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14579  {
14580  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14581 
14582  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14583  // This allocation belongs to custom pool.
14584  if(hAllocPool != VK_NULL_HANDLE)
14585  {
14586  // Pools with algorithm other than default are not defragmented.
14587  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14588  {
14589  for(size_t i = m_CustomPoolContexts.size(); i--; )
14590  {
14591  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14592  {
14593  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14594  break;
14595  }
14596  }
14597  if(!pBlockVectorDefragCtx)
14598  {
14599  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14600  m_hAllocator,
14601  hAllocPool,
14602  &hAllocPool->m_BlockVector,
14603  m_CurrFrameIndex);
14604  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14605  }
14606  }
14607  }
14608  // This allocation belongs to default pool.
14609  else
14610  {
14611  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14612  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14613  if(!pBlockVectorDefragCtx)
14614  {
14615  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14616  m_hAllocator,
14617  VMA_NULL, // hCustomPool
14618  m_hAllocator->m_pBlockVectors[memTypeIndex],
14619  m_CurrFrameIndex);
14620  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14621  }
14622  }
14623 
14624  if(pBlockVectorDefragCtx)
14625  {
14626  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14627  &pAllocationsChanged[allocIndex] : VMA_NULL;
14628  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14629  }
14630  }
14631  }
14632 }
14633 
14634 VkResult VmaDefragmentationContext_T::Defragment(
14635  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14636  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14637  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14638 {
14639  if(pStats)
14640  {
14641  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14642  }
14643 
14645  {
14646  // For incremental defragmetnations, we just earmark how much we can move
14647  // The real meat is in the defragmentation steps
14648  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14649  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14650 
14651  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14652  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14653 
14654  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14655  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14656  return VK_SUCCESS;
14657 
14658  return VK_NOT_READY;
14659  }
14660 
14661  if(commandBuffer == VK_NULL_HANDLE)
14662  {
14663  maxGpuBytesToMove = 0;
14664  maxGpuAllocationsToMove = 0;
14665  }
14666 
14667  VkResult res = VK_SUCCESS;
14668 
14669  // Process default pools.
14670  for(uint32_t memTypeIndex = 0;
14671  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14672  ++memTypeIndex)
14673  {
14674  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14675  if(pBlockVectorCtx)
14676  {
14677  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14678  pBlockVectorCtx->GetBlockVector()->Defragment(
14679  pBlockVectorCtx,
14680  pStats, flags,
14681  maxCpuBytesToMove, maxCpuAllocationsToMove,
14682  maxGpuBytesToMove, maxGpuAllocationsToMove,
14683  commandBuffer);
14684  if(pBlockVectorCtx->res != VK_SUCCESS)
14685  {
14686  res = pBlockVectorCtx->res;
14687  }
14688  }
14689  }
14690 
14691  // Process custom pools.
14692  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14693  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14694  ++customCtxIndex)
14695  {
14696  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14697  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14698  pBlockVectorCtx->GetBlockVector()->Defragment(
14699  pBlockVectorCtx,
14700  pStats, flags,
14701  maxCpuBytesToMove, maxCpuAllocationsToMove,
14702  maxGpuBytesToMove, maxGpuAllocationsToMove,
14703  commandBuffer);
14704  if(pBlockVectorCtx->res != VK_SUCCESS)
14705  {
14706  res = pBlockVectorCtx->res;
14707  }
14708  }
14709 
14710  return res;
14711 }
14712 
14713 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14714 {
14715  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14716  uint32_t movesLeft = pInfo->moveCount;
14717 
14718  // Process default pools.
14719  for(uint32_t memTypeIndex = 0;
14720  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14721  ++memTypeIndex)
14722  {
14723  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14724  if(pBlockVectorCtx)
14725  {
14726  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14727 
14728  if(!pBlockVectorCtx->hasDefragmentationPlan)
14729  {
14730  pBlockVectorCtx->GetBlockVector()->Defragment(
14731  pBlockVectorCtx,
14732  m_pStats, m_Flags,
14733  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14734  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14735  VK_NULL_HANDLE);
14736 
14737  if(pBlockVectorCtx->res < VK_SUCCESS)
14738  continue;
14739 
14740  pBlockVectorCtx->hasDefragmentationPlan = true;
14741  }
14742 
14743  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14744  pBlockVectorCtx,
14745  pCurrentMove, movesLeft);
14746 
14747  movesLeft -= processed;
14748  pCurrentMove += processed;
14749  }
14750  }
14751 
14752  // Process custom pools.
14753  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14754  customCtxIndex < customCtxCount;
14755  ++customCtxIndex)
14756  {
14757  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14758  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14759 
14760  if(!pBlockVectorCtx->hasDefragmentationPlan)
14761  {
14762  pBlockVectorCtx->GetBlockVector()->Defragment(
14763  pBlockVectorCtx,
14764  m_pStats, m_Flags,
14765  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14766  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14767  VK_NULL_HANDLE);
14768 
14769  if(pBlockVectorCtx->res < VK_SUCCESS)
14770  continue;
14771 
14772  pBlockVectorCtx->hasDefragmentationPlan = true;
14773  }
14774 
14775  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14776  pBlockVectorCtx,
14777  pCurrentMove, movesLeft);
14778 
14779  movesLeft -= processed;
14780  pCurrentMove += processed;
14781  }
14782 
14783  pInfo->moveCount = pInfo->moveCount - movesLeft;
14784 
14785  return VK_SUCCESS;
14786 }
14787 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14788 {
14789  VkResult res = VK_SUCCESS;
14790 
14791  // Process default pools.
14792  for(uint32_t memTypeIndex = 0;
14793  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14794  ++memTypeIndex)
14795  {
14796  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14797  if(pBlockVectorCtx)
14798  {
14799  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14800 
14801  if(!pBlockVectorCtx->hasDefragmentationPlan)
14802  {
14803  res = VK_NOT_READY;
14804  continue;
14805  }
14806 
14807  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14808  pBlockVectorCtx, m_pStats);
14809 
14810  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14811  res = VK_NOT_READY;
14812  }
14813  }
14814 
14815  // Process custom pools.
14816  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14817  customCtxIndex < customCtxCount;
14818  ++customCtxIndex)
14819  {
14820  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14821  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14822 
14823  if(!pBlockVectorCtx->hasDefragmentationPlan)
14824  {
14825  res = VK_NOT_READY;
14826  continue;
14827  }
14828 
14829  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14830  pBlockVectorCtx, m_pStats);
14831 
14832  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14833  res = VK_NOT_READY;
14834  }
14835 
14836  return res;
14837 }
14838 
14840 // VmaRecorder
14841 
14842 #if VMA_RECORDING_ENABLED
14843 
14844 VmaRecorder::VmaRecorder() :
14845  m_UseMutex(true),
14846  m_Flags(0),
14847  m_File(VMA_NULL),
14848  m_Freq(INT64_MAX),
14849  m_StartCounter(INT64_MAX)
14850 {
14851 }
14852 
14853 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14854 {
14855  m_UseMutex = useMutex;
14856  m_Flags = settings.flags;
14857 
14858  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
14859  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
14860 
14861  // Open file for writing.
14862  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14863  if(err != 0)
14864  {
14865  return VK_ERROR_INITIALIZATION_FAILED;
14866  }
14867 
14868  // Write header.
14869  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14870  fprintf(m_File, "%s\n", "1,8");
14871 
14872  return VK_SUCCESS;
14873 }
14874 
14875 VmaRecorder::~VmaRecorder()
14876 {
14877  if(m_File != VMA_NULL)
14878  {
14879  fclose(m_File);
14880  }
14881 }
14882 
14883 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14884 {
14885  CallParams callParams;
14886  GetBasicParams(callParams);
14887 
14888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14889  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14890  Flush();
14891 }
14892 
14893 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14894 {
14895  CallParams callParams;
14896  GetBasicParams(callParams);
14897 
14898  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14899  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14900  Flush();
14901 }
14902 
14903 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14904 {
14905  CallParams callParams;
14906  GetBasicParams(callParams);
14907 
14908  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14909  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14910  createInfo.memoryTypeIndex,
14911  createInfo.flags,
14912  createInfo.blockSize,
14913  (uint64_t)createInfo.minBlockCount,
14914  (uint64_t)createInfo.maxBlockCount,
14915  createInfo.frameInUseCount,
14916  pool);
14917  Flush();
14918 }
14919 
14920 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14921 {
14922  CallParams callParams;
14923  GetBasicParams(callParams);
14924 
14925  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14926  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14927  pool);
14928  Flush();
14929 }
14930 
14931 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14932  const VkMemoryRequirements& vkMemReq,
14933  const VmaAllocationCreateInfo& createInfo,
14934  VmaAllocation allocation)
14935 {
14936  CallParams callParams;
14937  GetBasicParams(callParams);
14938 
14939  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14940  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14941  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14942  vkMemReq.size,
14943  vkMemReq.alignment,
14944  vkMemReq.memoryTypeBits,
14945  createInfo.flags,
14946  createInfo.usage,
14947  createInfo.requiredFlags,
14948  createInfo.preferredFlags,
14949  createInfo.memoryTypeBits,
14950  createInfo.pool,
14951  allocation,
14952  userDataStr.GetString());
14953  Flush();
14954 }
14955 
14956 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14957  const VkMemoryRequirements& vkMemReq,
14958  const VmaAllocationCreateInfo& createInfo,
14959  uint64_t allocationCount,
14960  const VmaAllocation* pAllocations)
14961 {
14962  CallParams callParams;
14963  GetBasicParams(callParams);
14964 
14965  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14966  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14967  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14968  vkMemReq.size,
14969  vkMemReq.alignment,
14970  vkMemReq.memoryTypeBits,
14971  createInfo.flags,
14972  createInfo.usage,
14973  createInfo.requiredFlags,
14974  createInfo.preferredFlags,
14975  createInfo.memoryTypeBits,
14976  createInfo.pool);
14977  PrintPointerList(allocationCount, pAllocations);
14978  fprintf(m_File, ",%s\n", userDataStr.GetString());
14979  Flush();
14980 }
14981 
14982 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14983  const VkMemoryRequirements& vkMemReq,
14984  bool requiresDedicatedAllocation,
14985  bool prefersDedicatedAllocation,
14986  const VmaAllocationCreateInfo& createInfo,
14987  VmaAllocation allocation)
14988 {
14989  CallParams callParams;
14990  GetBasicParams(callParams);
14991 
14992  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14993  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14994  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14995  vkMemReq.size,
14996  vkMemReq.alignment,
14997  vkMemReq.memoryTypeBits,
14998  requiresDedicatedAllocation ? 1 : 0,
14999  prefersDedicatedAllocation ? 1 : 0,
15000  createInfo.flags,
15001  createInfo.usage,
15002  createInfo.requiredFlags,
15003  createInfo.preferredFlags,
15004  createInfo.memoryTypeBits,
15005  createInfo.pool,
15006  allocation,
15007  userDataStr.GetString());
15008  Flush();
15009 }
15010 
15011 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15012  const VkMemoryRequirements& vkMemReq,
15013  bool requiresDedicatedAllocation,
15014  bool prefersDedicatedAllocation,
15015  const VmaAllocationCreateInfo& createInfo,
15016  VmaAllocation allocation)
15017 {
15018  CallParams callParams;
15019  GetBasicParams(callParams);
15020 
15021  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15022  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15023  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15024  vkMemReq.size,
15025  vkMemReq.alignment,
15026  vkMemReq.memoryTypeBits,
15027  requiresDedicatedAllocation ? 1 : 0,
15028  prefersDedicatedAllocation ? 1 : 0,
15029  createInfo.flags,
15030  createInfo.usage,
15031  createInfo.requiredFlags,
15032  createInfo.preferredFlags,
15033  createInfo.memoryTypeBits,
15034  createInfo.pool,
15035  allocation,
15036  userDataStr.GetString());
15037  Flush();
15038 }
15039 
15040 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15041  VmaAllocation allocation)
15042 {
15043  CallParams callParams;
15044  GetBasicParams(callParams);
15045 
15046  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15047  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15048  allocation);
15049  Flush();
15050 }
15051 
15052 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15053  uint64_t allocationCount,
15054  const VmaAllocation* pAllocations)
15055 {
15056  CallParams callParams;
15057  GetBasicParams(callParams);
15058 
15059  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15060  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15061  PrintPointerList(allocationCount, pAllocations);
15062  fprintf(m_File, "\n");
15063  Flush();
15064 }
15065 
15066 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15067  VmaAllocation allocation,
15068  const void* pUserData)
15069 {
15070  CallParams callParams;
15071  GetBasicParams(callParams);
15072 
15073  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15074  UserDataString userDataStr(
15075  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15076  pUserData);
15077  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15078  allocation,
15079  userDataStr.GetString());
15080  Flush();
15081 }
15082 
15083 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15084  VmaAllocation allocation)
15085 {
15086  CallParams callParams;
15087  GetBasicParams(callParams);
15088 
15089  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15090  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15091  allocation);
15092  Flush();
15093 }
15094 
15095 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15096  VmaAllocation allocation)
15097 {
15098  CallParams callParams;
15099  GetBasicParams(callParams);
15100 
15101  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15102  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15103  allocation);
15104  Flush();
15105 }
15106 
15107 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15108  VmaAllocation allocation)
15109 {
15110  CallParams callParams;
15111  GetBasicParams(callParams);
15112 
15113  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15114  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15115  allocation);
15116  Flush();
15117 }
15118 
15119 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15120  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15121 {
15122  CallParams callParams;
15123  GetBasicParams(callParams);
15124 
15125  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15126  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15127  allocation,
15128  offset,
15129  size);
15130  Flush();
15131 }
15132 
15133 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15134  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15135 {
15136  CallParams callParams;
15137  GetBasicParams(callParams);
15138 
15139  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15140  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15141  allocation,
15142  offset,
15143  size);
15144  Flush();
15145 }
15146 
15147 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15148  const VkBufferCreateInfo& bufCreateInfo,
15149  const VmaAllocationCreateInfo& allocCreateInfo,
15150  VmaAllocation allocation)
15151 {
15152  CallParams callParams;
15153  GetBasicParams(callParams);
15154 
15155  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15156  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15157  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15158  bufCreateInfo.flags,
15159  bufCreateInfo.size,
15160  bufCreateInfo.usage,
15161  bufCreateInfo.sharingMode,
15162  allocCreateInfo.flags,
15163  allocCreateInfo.usage,
15164  allocCreateInfo.requiredFlags,
15165  allocCreateInfo.preferredFlags,
15166  allocCreateInfo.memoryTypeBits,
15167  allocCreateInfo.pool,
15168  allocation,
15169  userDataStr.GetString());
15170  Flush();
15171 }
15172 
15173 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15174  const VkImageCreateInfo& imageCreateInfo,
15175  const VmaAllocationCreateInfo& allocCreateInfo,
15176  VmaAllocation allocation)
15177 {
15178  CallParams callParams;
15179  GetBasicParams(callParams);
15180 
15181  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15182  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15183  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15184  imageCreateInfo.flags,
15185  imageCreateInfo.imageType,
15186  imageCreateInfo.format,
15187  imageCreateInfo.extent.width,
15188  imageCreateInfo.extent.height,
15189  imageCreateInfo.extent.depth,
15190  imageCreateInfo.mipLevels,
15191  imageCreateInfo.arrayLayers,
15192  imageCreateInfo.samples,
15193  imageCreateInfo.tiling,
15194  imageCreateInfo.usage,
15195  imageCreateInfo.sharingMode,
15196  imageCreateInfo.initialLayout,
15197  allocCreateInfo.flags,
15198  allocCreateInfo.usage,
15199  allocCreateInfo.requiredFlags,
15200  allocCreateInfo.preferredFlags,
15201  allocCreateInfo.memoryTypeBits,
15202  allocCreateInfo.pool,
15203  allocation,
15204  userDataStr.GetString());
15205  Flush();
15206 }
15207 
15208 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15209  VmaAllocation allocation)
15210 {
15211  CallParams callParams;
15212  GetBasicParams(callParams);
15213 
15214  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15215  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15216  allocation);
15217  Flush();
15218 }
15219 
15220 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15221  VmaAllocation allocation)
15222 {
15223  CallParams callParams;
15224  GetBasicParams(callParams);
15225 
15226  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15227  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15228  allocation);
15229  Flush();
15230 }
15231 
15232 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15233  VmaAllocation allocation)
15234 {
15235  CallParams callParams;
15236  GetBasicParams(callParams);
15237 
15238  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15239  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15240  allocation);
15241  Flush();
15242 }
15243 
15244 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15245  VmaAllocation allocation)
15246 {
15247  CallParams callParams;
15248  GetBasicParams(callParams);
15249 
15250  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15251  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15252  allocation);
15253  Flush();
15254 }
15255 
15256 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15257  VmaPool pool)
15258 {
15259  CallParams callParams;
15260  GetBasicParams(callParams);
15261 
15262  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15263  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15264  pool);
15265  Flush();
15266 }
15267 
15268 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15269  const VmaDefragmentationInfo2& info,
15271 {
15272  CallParams callParams;
15273  GetBasicParams(callParams);
15274 
15275  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15276  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15277  info.flags);
15278  PrintPointerList(info.allocationCount, info.pAllocations);
15279  fprintf(m_File, ",");
15280  PrintPointerList(info.poolCount, info.pPools);
15281  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15282  info.maxCpuBytesToMove,
15284  info.maxGpuBytesToMove,
15286  info.commandBuffer,
15287  ctx);
15288  Flush();
15289 }
15290 
15291 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15293 {
15294  CallParams callParams;
15295  GetBasicParams(callParams);
15296 
15297  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15298  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15299  ctx);
15300  Flush();
15301 }
15302 
15303 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15304  VmaPool pool,
15305  const char* name)
15306 {
15307  CallParams callParams;
15308  GetBasicParams(callParams);
15309 
15310  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15311  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15312  pool, name != VMA_NULL ? name : "");
15313  Flush();
15314 }
15315 
15316 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15317 {
15318  if(pUserData != VMA_NULL)
15319  {
15320  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15321  {
15322  m_Str = (const char*)pUserData;
15323  }
15324  else
15325  {
15326  sprintf_s(m_PtrStr, "%p", pUserData);
15327  m_Str = m_PtrStr;
15328  }
15329  }
15330  else
15331  {
15332  m_Str = "";
15333  }
15334 }
15335 
15336 void VmaRecorder::WriteConfiguration(
15337  const VkPhysicalDeviceProperties& devProps,
15338  const VkPhysicalDeviceMemoryProperties& memProps,
15339  uint32_t vulkanApiVersion,
15340  bool dedicatedAllocationExtensionEnabled,
15341  bool bindMemory2ExtensionEnabled,
15342  bool memoryBudgetExtensionEnabled,
15343  bool deviceCoherentMemoryExtensionEnabled)
15344 {
15345  fprintf(m_File, "Config,Begin\n");
15346 
15347  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15348 
15349  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15350  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15351  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15352  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15353  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15354  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15355 
15356  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15357  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15358  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15359 
15360  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15361  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15362  {
15363  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15364  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15365  }
15366  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15367  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15368  {
15369  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15370  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15371  }
15372 
15373  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15374  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15375  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15376  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15377 
15378  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15379  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15380  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15381  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15382  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15383  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15384  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15385  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15386  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15387 
15388  fprintf(m_File, "Config,End\n");
15389 }
15390 
15391 void VmaRecorder::GetBasicParams(CallParams& outParams)
15392 {
15393  outParams.threadId = GetCurrentThreadId();
15394 
15395  LARGE_INTEGER counter;
15396  QueryPerformanceCounter(&counter);
15397  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
15398 }
15399 
15400 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15401 {
15402  if(count)
15403  {
15404  fprintf(m_File, "%p", pItems[0]);
15405  for(uint64_t i = 1; i < count; ++i)
15406  {
15407  fprintf(m_File, " %p", pItems[i]);
15408  }
15409  }
15410 }
15411 
15412 void VmaRecorder::Flush()
15413 {
15414  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15415  {
15416  fflush(m_File);
15417  }
15418 }
15419 
15420 #endif // #if VMA_RECORDING_ENABLED
15421 
15423 // VmaAllocationObjectAllocator
15424 
15425 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15426  m_Allocator(pAllocationCallbacks, 1024)
15427 {
15428 }
15429 
15430 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15431 {
15432  VmaMutexLock mutexLock(m_Mutex);
15433  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15434 }
15435 
15436 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15437 {
15438  VmaMutexLock mutexLock(m_Mutex);
15439  m_Allocator.Free(hAlloc);
15440 }
15441 
15443 // VmaAllocator_T
15444 
15445 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15446  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15447  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15448  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15449  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15450  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15451  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15452  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15453  m_hDevice(pCreateInfo->device),
15454  m_hInstance(pCreateInfo->instance),
15455  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15456  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15457  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15458  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15459  m_HeapSizeLimitMask(0),
15460  m_PreferredLargeHeapBlockSize(0),
15461  m_PhysicalDevice(pCreateInfo->physicalDevice),
15462  m_CurrentFrameIndex(0),
15463  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15464  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15465  m_NextPoolId(0),
15466  m_GlobalMemoryTypeBits(UINT32_MAX)
15468  ,m_pRecorder(VMA_NULL)
15469 #endif
15470 {
15471  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15472  {
15473  m_UseKhrDedicatedAllocation = false;
15474  m_UseKhrBindMemory2 = false;
15475  }
15476 
15477  if(VMA_DEBUG_DETECT_CORRUPTION)
15478  {
15479  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15480  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15481  }
15482 
15483  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15484 
15485  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15486  {
15487 #if !(VMA_DEDICATED_ALLOCATION)
15489  {
15490  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15491  }
15492 #endif
15493 #if !(VMA_BIND_MEMORY2)
15494  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15495  {
15496  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15497  }
15498 #endif
15499  }
15500 #if !(VMA_MEMORY_BUDGET)
15501  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15502  {
15503  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15504  }
15505 #endif
15506 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15507  if(m_UseKhrBufferDeviceAddress)
15508  {
15509  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15510  }
15511 #endif
15512 #if VMA_VULKAN_VERSION < 1002000
15513  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15514  {
15515  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15516  }
15517 #endif
15518 #if VMA_VULKAN_VERSION < 1001000
15519  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15520  {
15521  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15522  }
15523 #endif
15524 
15525  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15526  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15527  memset(&m_MemProps, 0, sizeof(m_MemProps));
15528 
15529  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15530  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15531  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15532 
15533  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15534  {
15535  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15536  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15537  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15538  }
15539 
15540  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15541 
15542  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15543  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15544 
15545  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15546  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15547  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15548  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15549 
15550  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15551  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15552 
15553  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15554 
15555  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15556  {
15557  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15558  {
15559  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15560  if(limit != VK_WHOLE_SIZE)
15561  {
15562  m_HeapSizeLimitMask |= 1u << heapIndex;
15563  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15564  {
15565  m_MemProps.memoryHeaps[heapIndex].size = limit;
15566  }
15567  }
15568  }
15569  }
15570 
15571  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15572  {
15573  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15574 
15575  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15576  this,
15577  VK_NULL_HANDLE, // hParentPool
15578  memTypeIndex,
15579  preferredBlockSize,
15580  0,
15581  SIZE_MAX,
15582  GetBufferImageGranularity(),
15583  pCreateInfo->frameInUseCount,
15584  false, // explicitBlockSize
15585  false); // linearAlgorithm
15586  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15587  // becase minBlockCount is 0.
15588  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15589 
15590  }
15591 }
15592 
15593 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15594 {
15595  VkResult res = VK_SUCCESS;
15596 
15597  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15598  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15599  {
15600 #if VMA_RECORDING_ENABLED
15601  m_pRecorder = vma_new(this, VmaRecorder)();
15602  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15603  if(res != VK_SUCCESS)
15604  {
15605  return res;
15606  }
15607  m_pRecorder->WriteConfiguration(
15608  m_PhysicalDeviceProperties,
15609  m_MemProps,
15610  m_VulkanApiVersion,
15611  m_UseKhrDedicatedAllocation,
15612  m_UseKhrBindMemory2,
15613  m_UseExtMemoryBudget,
15614  m_UseAmdDeviceCoherentMemory);
15615  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15616 #else
15617  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15618  return VK_ERROR_FEATURE_NOT_PRESENT;
15619 #endif
15620  }
15621 
15622 #if VMA_MEMORY_BUDGET
15623  if(m_UseExtMemoryBudget)
15624  {
15625  UpdateVulkanBudget();
15626  }
15627 #endif // #if VMA_MEMORY_BUDGET
15628 
15629  return res;
15630 }
15631 
15632 VmaAllocator_T::~VmaAllocator_T()
15633 {
15634 #if VMA_RECORDING_ENABLED
15635  if(m_pRecorder != VMA_NULL)
15636  {
15637  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15638  vma_delete(this, m_pRecorder);
15639  }
15640 #endif
15641 
15642  VMA_ASSERT(m_Pools.empty());
15643 
15644  for(size_t i = GetMemoryTypeCount(); i--; )
15645  {
15646  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15647  {
15648  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15649  }
15650 
15651  vma_delete(this, m_pDedicatedAllocations[i]);
15652  vma_delete(this, m_pBlockVectors[i]);
15653  }
15654 }
15655 
15656 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15657 {
15658 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15659  ImportVulkanFunctions_Static();
15660 #endif
15661 
15662  if(pVulkanFunctions != VMA_NULL)
15663  {
15664  ImportVulkanFunctions_Custom(pVulkanFunctions);
15665  }
15666 
15667 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15668  ImportVulkanFunctions_Dynamic();
15669 #endif
15670 
15671  ValidateVulkanFunctions();
15672 }
15673 
15674 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15675 
15676 void VmaAllocator_T::ImportVulkanFunctions_Static()
15677 {
15678  // Vulkan 1.0
15679  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15680  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15681  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15682  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15683  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15684  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15685  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15686  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15687  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15688  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15689  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15690  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15691  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15692  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15693  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15694  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15695  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15696 
15697  // Vulkan 1.1
15698 #if VMA_VULKAN_VERSION >= 1001000
15699  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15700  {
15701  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15702  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15703  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15704  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15705  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15706  }
15707 #endif
15708 }
15709 
15710 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15711 
15712 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15713 {
15714  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15715 
15716 #define VMA_COPY_IF_NOT_NULL(funcName) \
15717  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15718 
15719  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15720  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15721  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15722  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15723  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15724  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15725  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15726  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15727  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15728  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15729  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15730  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15731  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15732  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15733  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15734  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15735  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15736 
15737 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15738  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15739  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15740 #endif
15741 
15742 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15743  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15744  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15745 #endif
15746 
15747 #if VMA_MEMORY_BUDGET
15748  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15749 #endif
15750 
15751 #undef VMA_COPY_IF_NOT_NULL
15752 }
15753 
15754 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15755 
15756 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
15757 {
15758 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
15759  if(m_VulkanFunctions.memberName == VMA_NULL) \
15760  m_VulkanFunctions.memberName = \
15761  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
15762 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
15763  if(m_VulkanFunctions.memberName == VMA_NULL) \
15764  m_VulkanFunctions.memberName = \
15765  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
15766 
15767  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
15768  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
15769  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
15770  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
15771  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
15772  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
15773  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
15774  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
15775  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
15776  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
15777  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
15778  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
15779  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
15780  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
15781  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
15782  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
15783  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
15784 
15785 #if VMA_DEDICATED_ALLOCATION
15786  if(m_UseKhrDedicatedAllocation)
15787  {
15788  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
15789  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
15790  }
15791 #endif
15792 
15793 #if VMA_BIND_MEMORY2
15794  if(m_UseKhrBindMemory2)
15795  {
15796  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
15797  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
15798  }
15799 #endif // #if VMA_BIND_MEMORY2
15800 
15801 #if VMA_MEMORY_BUDGET
15802  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15803  {
15804  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
15805  }
15806 #endif // #if VMA_MEMORY_BUDGET
15807 
15808 #undef VMA_FETCH_DEVICE_FUNC
15809 #undef VMA_FETCH_INSTANCE_FUNC
15810 }
15811 
15812 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15813 
15814 void VmaAllocator_T::ValidateVulkanFunctions()
15815 {
15816  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15817  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15818  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15819  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15820  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15821  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15822  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15823  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15824  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15825  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15826  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15827  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15828  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15829  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15830  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15831  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15832  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15833 
15834 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15835  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15836  {
15837  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15838  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15839  }
15840 #endif
15841 
15842 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15843  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15844  {
15845  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15846  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15847  }
15848 #endif
15849 
15850 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15851  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15852  {
15853  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15854  }
15855 #endif
15856 }
15857 
15858 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15859 {
15860  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15861  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15862  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15863  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15864 }
15865 
15866 VkResult VmaAllocator_T::AllocateMemoryOfType(
15867  VkDeviceSize size,
15868  VkDeviceSize alignment,
15869  bool dedicatedAllocation,
15870  VkBuffer dedicatedBuffer,
15871  VkBufferUsageFlags dedicatedBufferUsage,
15872  VkImage dedicatedImage,
15873  const VmaAllocationCreateInfo& createInfo,
15874  uint32_t memTypeIndex,
15875  VmaSuballocationType suballocType,
15876  size_t allocationCount,
15877  VmaAllocation* pAllocations)
15878 {
15879  VMA_ASSERT(pAllocations != VMA_NULL);
15880  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15881 
15882  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15883 
15884  // If memory type is not HOST_VISIBLE, disable MAPPED.
15885  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15886  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15887  {
15888  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15889  }
15890  // If memory is lazily allocated, it should be always dedicated.
15891  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15892  {
15894  }
15895 
15896  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15897  VMA_ASSERT(blockVector);
15898 
15899  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15900  bool preferDedicatedMemory =
15901  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15902  dedicatedAllocation ||
15903  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15904  size > preferredBlockSize / 2;
15905 
15906  if(preferDedicatedMemory &&
15907  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15908  finalCreateInfo.pool == VK_NULL_HANDLE)
15909  {
15911  }
15912 
15913  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15914  {
15915  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15916  {
15917  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15918  }
15919  else
15920  {
15921  return AllocateDedicatedMemory(
15922  size,
15923  suballocType,
15924  memTypeIndex,
15925  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15926  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15927  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15928  finalCreateInfo.pUserData,
15929  dedicatedBuffer,
15930  dedicatedBufferUsage,
15931  dedicatedImage,
15932  allocationCount,
15933  pAllocations);
15934  }
15935  }
15936  else
15937  {
15938  VkResult res = blockVector->Allocate(
15939  m_CurrentFrameIndex.load(),
15940  size,
15941  alignment,
15942  finalCreateInfo,
15943  suballocType,
15944  allocationCount,
15945  pAllocations);
15946  if(res == VK_SUCCESS)
15947  {
15948  return res;
15949  }
15950 
15951  // 5. Try dedicated memory.
15952  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15953  {
15954  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15955  }
15956  else
15957  {
15958  res = AllocateDedicatedMemory(
15959  size,
15960  suballocType,
15961  memTypeIndex,
15962  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15963  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15964  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15965  finalCreateInfo.pUserData,
15966  dedicatedBuffer,
15967  dedicatedBufferUsage,
15968  dedicatedImage,
15969  allocationCount,
15970  pAllocations);
15971  if(res == VK_SUCCESS)
15972  {
15973  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
15974  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
15975  return VK_SUCCESS;
15976  }
15977  else
15978  {
15979  // Everything failed: Return error code.
15980  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15981  return res;
15982  }
15983  }
15984  }
15985 }
15986 
15987 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15988  VkDeviceSize size,
15989  VmaSuballocationType suballocType,
15990  uint32_t memTypeIndex,
15991  bool withinBudget,
15992  bool map,
15993  bool isUserDataString,
15994  void* pUserData,
15995  VkBuffer dedicatedBuffer,
15996  VkBufferUsageFlags dedicatedBufferUsage,
15997  VkImage dedicatedImage,
15998  size_t allocationCount,
15999  VmaAllocation* pAllocations)
16000 {
16001  VMA_ASSERT(allocationCount > 0 && pAllocations);
16002 
16003  if(withinBudget)
16004  {
16005  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16006  VmaBudget heapBudget = {};
16007  GetBudget(&heapBudget, heapIndex, 1);
16008  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16009  {
16010  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16011  }
16012  }
16013 
16014  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16015  allocInfo.memoryTypeIndex = memTypeIndex;
16016  allocInfo.allocationSize = size;
16017 
16018 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16019  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16020  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16021  {
16022  if(dedicatedBuffer != VK_NULL_HANDLE)
16023  {
16024  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16025  dedicatedAllocInfo.buffer = dedicatedBuffer;
16026  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16027  }
16028  else if(dedicatedImage != VK_NULL_HANDLE)
16029  {
16030  dedicatedAllocInfo.image = dedicatedImage;
16031  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16032  }
16033  }
16034 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16035 
16036 #if VMA_BUFFER_DEVICE_ADDRESS
16037  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16038  if(m_UseKhrBufferDeviceAddress)
16039  {
16040  bool canContainBufferWithDeviceAddress = true;
16041  if(dedicatedBuffer != VK_NULL_HANDLE)
16042  {
16043  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16044  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16045  }
16046  else if(dedicatedImage != VK_NULL_HANDLE)
16047  {
16048  canContainBufferWithDeviceAddress = false;
16049  }
16050  if(canContainBufferWithDeviceAddress)
16051  {
16052  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16053  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16054  }
16055  }
16056 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16057 
16058  size_t allocIndex;
16059  VkResult res = VK_SUCCESS;
16060  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16061  {
16062  res = AllocateDedicatedMemoryPage(
16063  size,
16064  suballocType,
16065  memTypeIndex,
16066  allocInfo,
16067  map,
16068  isUserDataString,
16069  pUserData,
16070  pAllocations + allocIndex);
16071  if(res != VK_SUCCESS)
16072  {
16073  break;
16074  }
16075  }
16076 
16077  if(res == VK_SUCCESS)
16078  {
16079  // Register them in m_pDedicatedAllocations.
16080  {
16081  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16082  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16083  VMA_ASSERT(pDedicatedAllocations);
16084  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16085  {
16086  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
16087  }
16088  }
16089 
16090  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16091  }
16092  else
16093  {
16094  // Free all already created allocations.
16095  while(allocIndex--)
16096  {
16097  VmaAllocation currAlloc = pAllocations[allocIndex];
16098  VkDeviceMemory hMemory = currAlloc->GetMemory();
16099 
16100  /*
16101  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16102  before vkFreeMemory.
16103 
16104  if(currAlloc->GetMappedData() != VMA_NULL)
16105  {
16106  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16107  }
16108  */
16109 
16110  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16111  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16112  currAlloc->SetUserData(this, VMA_NULL);
16113  m_AllocationObjectAllocator.Free(currAlloc);
16114  }
16115 
16116  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16117  }
16118 
16119  return res;
16120 }
16121 
16122 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16123  VkDeviceSize size,
16124  VmaSuballocationType suballocType,
16125  uint32_t memTypeIndex,
16126  const VkMemoryAllocateInfo& allocInfo,
16127  bool map,
16128  bool isUserDataString,
16129  void* pUserData,
16130  VmaAllocation* pAllocation)
16131 {
16132  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16133  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16134  if(res < 0)
16135  {
16136  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16137  return res;
16138  }
16139 
16140  void* pMappedData = VMA_NULL;
16141  if(map)
16142  {
16143  res = (*m_VulkanFunctions.vkMapMemory)(
16144  m_hDevice,
16145  hMemory,
16146  0,
16147  VK_WHOLE_SIZE,
16148  0,
16149  &pMappedData);
16150  if(res < 0)
16151  {
16152  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16153  FreeVulkanMemory(memTypeIndex, size, hMemory);
16154  return res;
16155  }
16156  }
16157 
16158  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16159  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16160  (*pAllocation)->SetUserData(this, pUserData);
16161  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16162  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16163  {
16164  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16165  }
16166 
16167  return VK_SUCCESS;
16168 }
16169 
16170 void VmaAllocator_T::GetBufferMemoryRequirements(
16171  VkBuffer hBuffer,
16172  VkMemoryRequirements& memReq,
16173  bool& requiresDedicatedAllocation,
16174  bool& prefersDedicatedAllocation) const
16175 {
16176 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16177  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16178  {
16179  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16180  memReqInfo.buffer = hBuffer;
16181 
16182  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16183 
16184  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16185  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16186 
16187  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16188 
16189  memReq = memReq2.memoryRequirements;
16190  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16191  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16192  }
16193  else
16194 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16195  {
16196  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16197  requiresDedicatedAllocation = false;
16198  prefersDedicatedAllocation = false;
16199  }
16200 }
16201 
16202 void VmaAllocator_T::GetImageMemoryRequirements(
16203  VkImage hImage,
16204  VkMemoryRequirements& memReq,
16205  bool& requiresDedicatedAllocation,
16206  bool& prefersDedicatedAllocation) const
16207 {
16208 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16209  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16210  {
16211  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16212  memReqInfo.image = hImage;
16213 
16214  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16215 
16216  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16217  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16218 
16219  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16220 
16221  memReq = memReq2.memoryRequirements;
16222  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16223  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16224  }
16225  else
16226 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16227  {
16228  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16229  requiresDedicatedAllocation = false;
16230  prefersDedicatedAllocation = false;
16231  }
16232 }
16233 
16234 VkResult VmaAllocator_T::AllocateMemory(
16235  const VkMemoryRequirements& vkMemReq,
16236  bool requiresDedicatedAllocation,
16237  bool prefersDedicatedAllocation,
16238  VkBuffer dedicatedBuffer,
16239  VkBufferUsageFlags dedicatedBufferUsage,
16240  VkImage dedicatedImage,
16241  const VmaAllocationCreateInfo& createInfo,
16242  VmaSuballocationType suballocType,
16243  size_t allocationCount,
16244  VmaAllocation* pAllocations)
16245 {
16246  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16247 
16248  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16249 
16250  if(vkMemReq.size == 0)
16251  {
16252  return VK_ERROR_VALIDATION_FAILED_EXT;
16253  }
16254  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16255  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16256  {
16257  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16258  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16259  }
16260  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16262  {
16263  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16264  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16265  }
16266  if(requiresDedicatedAllocation)
16267  {
16268  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16269  {
16270  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16271  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16272  }
16273  if(createInfo.pool != VK_NULL_HANDLE)
16274  {
16275  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16276  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16277  }
16278  }
16279  if((createInfo.pool != VK_NULL_HANDLE) &&
16280  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16281  {
16282  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16283  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16284  }
16285 
16286  if(createInfo.pool != VK_NULL_HANDLE)
16287  {
16288  const VkDeviceSize alignmentForPool = VMA_MAX(
16289  vkMemReq.alignment,
16290  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
16291 
16292  VmaAllocationCreateInfo createInfoForPool = createInfo;
16293  // If memory type is not HOST_VISIBLE, disable MAPPED.
16294  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16295  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16296  {
16297  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16298  }
16299 
16300  return createInfo.pool->m_BlockVector.Allocate(
16301  m_CurrentFrameIndex.load(),
16302  vkMemReq.size,
16303  alignmentForPool,
16304  createInfoForPool,
16305  suballocType,
16306  allocationCount,
16307  pAllocations);
16308  }
16309  else
16310  {
16311  // Bit mask of memory Vulkan types acceptable for this allocation.
16312  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16313  uint32_t memTypeIndex = UINT32_MAX;
16314  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16315  if(res == VK_SUCCESS)
16316  {
16317  VkDeviceSize alignmentForMemType = VMA_MAX(
16318  vkMemReq.alignment,
16319  GetMemoryTypeMinAlignment(memTypeIndex));
16320 
16321  res = AllocateMemoryOfType(
16322  vkMemReq.size,
16323  alignmentForMemType,
16324  requiresDedicatedAllocation || prefersDedicatedAllocation,
16325  dedicatedBuffer,
16326  dedicatedBufferUsage,
16327  dedicatedImage,
16328  createInfo,
16329  memTypeIndex,
16330  suballocType,
16331  allocationCount,
16332  pAllocations);
16333  // Succeeded on first try.
16334  if(res == VK_SUCCESS)
16335  {
16336  return res;
16337  }
16338  // Allocation from this memory type failed. Try other compatible memory types.
16339  else
16340  {
16341  for(;;)
16342  {
16343  // Remove old memTypeIndex from list of possibilities.
16344  memoryTypeBits &= ~(1u << memTypeIndex);
16345  // Find alternative memTypeIndex.
16346  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16347  if(res == VK_SUCCESS)
16348  {
16349  alignmentForMemType = VMA_MAX(
16350  vkMemReq.alignment,
16351  GetMemoryTypeMinAlignment(memTypeIndex));
16352 
16353  res = AllocateMemoryOfType(
16354  vkMemReq.size,
16355  alignmentForMemType,
16356  requiresDedicatedAllocation || prefersDedicatedAllocation,
16357  dedicatedBuffer,
16358  dedicatedBufferUsage,
16359  dedicatedImage,
16360  createInfo,
16361  memTypeIndex,
16362  suballocType,
16363  allocationCount,
16364  pAllocations);
16365  // Allocation from this alternative memory type succeeded.
16366  if(res == VK_SUCCESS)
16367  {
16368  return res;
16369  }
16370  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16371  }
16372  // No other matching memory type index could be found.
16373  else
16374  {
16375  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16376  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16377  }
16378  }
16379  }
16380  }
16381  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16382  else
16383  return res;
16384  }
16385 }
16386 
16387 void VmaAllocator_T::FreeMemory(
16388  size_t allocationCount,
16389  const VmaAllocation* pAllocations)
16390 {
16391  VMA_ASSERT(pAllocations);
16392 
16393  for(size_t allocIndex = allocationCount; allocIndex--; )
16394  {
16395  VmaAllocation allocation = pAllocations[allocIndex];
16396 
16397  if(allocation != VK_NULL_HANDLE)
16398  {
16399  if(TouchAllocation(allocation))
16400  {
16401  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16402  {
16403  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16404  }
16405 
16406  switch(allocation->GetType())
16407  {
16408  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16409  {
16410  VmaBlockVector* pBlockVector = VMA_NULL;
16411  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16412  if(hPool != VK_NULL_HANDLE)
16413  {
16414  pBlockVector = &hPool->m_BlockVector;
16415  }
16416  else
16417  {
16418  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16419  pBlockVector = m_pBlockVectors[memTypeIndex];
16420  }
16421  pBlockVector->Free(allocation);
16422  }
16423  break;
16424  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16425  FreeDedicatedMemory(allocation);
16426  break;
16427  default:
16428  VMA_ASSERT(0);
16429  }
16430  }
16431 
16432  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16433  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16434  allocation->SetUserData(this, VMA_NULL);
16435  m_AllocationObjectAllocator.Free(allocation);
16436  }
16437  }
16438 }
16439 
16440 VkResult VmaAllocator_T::ResizeAllocation(
16441  const VmaAllocation alloc,
16442  VkDeviceSize newSize)
16443 {
16444  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16445  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16446  {
16447  return VK_ERROR_VALIDATION_FAILED_EXT;
16448  }
16449  if(newSize == alloc->GetSize())
16450  {
16451  return VK_SUCCESS;
16452  }
16453  return VK_ERROR_OUT_OF_POOL_MEMORY;
16454 }
16455 
16456 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16457 {
16458  // Initialize.
16459  InitStatInfo(pStats->total);
16460  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16461  InitStatInfo(pStats->memoryType[i]);
16462  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16463  InitStatInfo(pStats->memoryHeap[i]);
16464 
16465  // Process default pools.
16466  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16467  {
16468  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16469  VMA_ASSERT(pBlockVector);
16470  pBlockVector->AddStats(pStats);
16471  }
16472 
16473  // Process custom pools.
16474  {
16475  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16476  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16477  {
16478  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16479  }
16480  }
16481 
16482  // Process dedicated allocations.
16483  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16484  {
16485  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16486  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16487  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16488  VMA_ASSERT(pDedicatedAllocVector);
16489  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16490  {
16491  VmaStatInfo allocationStatInfo;
16492  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16493  VmaAddStatInfo(pStats->total, allocationStatInfo);
16494  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16495  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16496  }
16497  }
16498 
16499  // Postprocess.
16500  VmaPostprocessCalcStatInfo(pStats->total);
16501  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16502  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16503  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16504  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16505 }
16506 
16507 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16508 {
16509 #if VMA_MEMORY_BUDGET
16510  if(m_UseExtMemoryBudget)
16511  {
16512  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16513  {
16514  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16515  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16516  {
16517  const uint32_t heapIndex = firstHeap + i;
16518 
16519  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16520  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16521 
16522  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16523  {
16524  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16525  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16526  }
16527  else
16528  {
16529  outBudget->usage = 0;
16530  }
16531 
16532  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16533  outBudget->budget = VMA_MIN(
16534  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16535  }
16536  }
16537  else
16538  {
16539  UpdateVulkanBudget(); // Outside of mutex lock
16540  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16541  }
16542  }
16543  else
16544 #endif
16545  {
16546  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16547  {
16548  const uint32_t heapIndex = firstHeap + i;
16549 
16550  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16551  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16552 
16553  outBudget->usage = outBudget->blockBytes;
16554  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16555  }
16556  }
16557 }
16558 
16559 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16560 
16561 VkResult VmaAllocator_T::DefragmentationBegin(
16562  const VmaDefragmentationInfo2& info,
16563  VmaDefragmentationStats* pStats,
16564  VmaDefragmentationContext* pContext)
16565 {
16566  if(info.pAllocationsChanged != VMA_NULL)
16567  {
16568  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16569  }
16570 
16571  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16572  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16573 
16574  (*pContext)->AddPools(info.poolCount, info.pPools);
16575  (*pContext)->AddAllocations(
16577 
16578  VkResult res = (*pContext)->Defragment(
16581  info.commandBuffer, pStats, info.flags);
16582 
16583  if(res != VK_NOT_READY)
16584  {
16585  vma_delete(this, *pContext);
16586  *pContext = VMA_NULL;
16587  }
16588 
16589  return res;
16590 }
16591 
16592 VkResult VmaAllocator_T::DefragmentationEnd(
16593  VmaDefragmentationContext context)
16594 {
16595  vma_delete(this, context);
16596  return VK_SUCCESS;
16597 }
16598 
16599 VkResult VmaAllocator_T::DefragmentationPassBegin(
16601  VmaDefragmentationContext context)
16602 {
16603  return context->DefragmentPassBegin(pInfo);
16604 }
16605 VkResult VmaAllocator_T::DefragmentationPassEnd(
16606  VmaDefragmentationContext context)
16607 {
16608  return context->DefragmentPassEnd();
16609 
16610 }
16611 
16612 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16613 {
16614  if(hAllocation->CanBecomeLost())
16615  {
16616  /*
16617  Warning: This is a carefully designed algorithm.
16618  Do not modify unless you really know what you're doing :)
16619  */
16620  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16621  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16622  for(;;)
16623  {
16624  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16625  {
16626  pAllocationInfo->memoryType = UINT32_MAX;
16627  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16628  pAllocationInfo->offset = 0;
16629  pAllocationInfo->size = hAllocation->GetSize();
16630  pAllocationInfo->pMappedData = VMA_NULL;
16631  pAllocationInfo->pUserData = hAllocation->GetUserData();
16632  return;
16633  }
16634  else if(localLastUseFrameIndex == localCurrFrameIndex)
16635  {
16636  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16637  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16638  pAllocationInfo->offset = hAllocation->GetOffset();
16639  pAllocationInfo->size = hAllocation->GetSize();
16640  pAllocationInfo->pMappedData = VMA_NULL;
16641  pAllocationInfo->pUserData = hAllocation->GetUserData();
16642  return;
16643  }
16644  else // Last use time earlier than current time.
16645  {
16646  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16647  {
16648  localLastUseFrameIndex = localCurrFrameIndex;
16649  }
16650  }
16651  }
16652  }
16653  else
16654  {
16655 #if VMA_STATS_STRING_ENABLED
16656  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16657  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16658  for(;;)
16659  {
16660  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16661  if(localLastUseFrameIndex == localCurrFrameIndex)
16662  {
16663  break;
16664  }
16665  else // Last use time earlier than current time.
16666  {
16667  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16668  {
16669  localLastUseFrameIndex = localCurrFrameIndex;
16670  }
16671  }
16672  }
16673 #endif
16674 
16675  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16676  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16677  pAllocationInfo->offset = hAllocation->GetOffset();
16678  pAllocationInfo->size = hAllocation->GetSize();
16679  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16680  pAllocationInfo->pUserData = hAllocation->GetUserData();
16681  }
16682 }
16683 
16684 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16685 {
16686  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16687  if(hAllocation->CanBecomeLost())
16688  {
16689  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16690  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16691  for(;;)
16692  {
16693  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16694  {
16695  return false;
16696  }
16697  else if(localLastUseFrameIndex == localCurrFrameIndex)
16698  {
16699  return true;
16700  }
16701  else // Last use time earlier than current time.
16702  {
16703  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16704  {
16705  localLastUseFrameIndex = localCurrFrameIndex;
16706  }
16707  }
16708  }
16709  }
16710  else
16711  {
16712 #if VMA_STATS_STRING_ENABLED
16713  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16714  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16715  for(;;)
16716  {
16717  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16718  if(localLastUseFrameIndex == localCurrFrameIndex)
16719  {
16720  break;
16721  }
16722  else // Last use time earlier than current time.
16723  {
16724  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16725  {
16726  localLastUseFrameIndex = localCurrFrameIndex;
16727  }
16728  }
16729  }
16730 #endif
16731 
16732  return true;
16733  }
16734 }
16735 
16736 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16737 {
16738  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16739 
16740  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16741 
16742  if(newCreateInfo.maxBlockCount == 0)
16743  {
16744  newCreateInfo.maxBlockCount = SIZE_MAX;
16745  }
16746  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16747  {
16748  return VK_ERROR_INITIALIZATION_FAILED;
16749  }
16750  // Memory type index out of range or forbidden.
16751  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16752  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16753  {
16754  return VK_ERROR_FEATURE_NOT_PRESENT;
16755  }
16756 
16757  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16758 
16759  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16760 
16761  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16762  if(res != VK_SUCCESS)
16763  {
16764  vma_delete(this, *pPool);
16765  *pPool = VMA_NULL;
16766  return res;
16767  }
16768 
16769  // Add to m_Pools.
16770  {
16771  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16772  (*pPool)->SetId(m_NextPoolId++);
16773  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16774  }
16775 
16776  return VK_SUCCESS;
16777 }
16778 
16779 void VmaAllocator_T::DestroyPool(VmaPool pool)
16780 {
16781  // Remove from m_Pools.
16782  {
16783  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16784  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16785  VMA_ASSERT(success && "Pool not found in Allocator.");
16786  }
16787 
16788  vma_delete(this, pool);
16789 }
16790 
16791 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16792 {
16793  pool->m_BlockVector.GetPoolStats(pPoolStats);
16794 }
16795 
16796 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16797 {
16798  m_CurrentFrameIndex.store(frameIndex);
16799 
16800 #if VMA_MEMORY_BUDGET
16801  if(m_UseExtMemoryBudget)
16802  {
16803  UpdateVulkanBudget();
16804  }
16805 #endif // #if VMA_MEMORY_BUDGET
16806 }
16807 
16808 void VmaAllocator_T::MakePoolAllocationsLost(
16809  VmaPool hPool,
16810  size_t* pLostAllocationCount)
16811 {
16812  hPool->m_BlockVector.MakePoolAllocationsLost(
16813  m_CurrentFrameIndex.load(),
16814  pLostAllocationCount);
16815 }
16816 
16817 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16818 {
16819  return hPool->m_BlockVector.CheckCorruption();
16820 }
16821 
16822 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16823 {
16824  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16825 
16826  // Process default pools.
16827  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16828  {
16829  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16830  {
16831  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16832  VMA_ASSERT(pBlockVector);
16833  VkResult localRes = pBlockVector->CheckCorruption();
16834  switch(localRes)
16835  {
16836  case VK_ERROR_FEATURE_NOT_PRESENT:
16837  break;
16838  case VK_SUCCESS:
16839  finalRes = VK_SUCCESS;
16840  break;
16841  default:
16842  return localRes;
16843  }
16844  }
16845  }
16846 
16847  // Process custom pools.
16848  {
16849  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16850  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16851  {
16852  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16853  {
16854  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16855  switch(localRes)
16856  {
16857  case VK_ERROR_FEATURE_NOT_PRESENT:
16858  break;
16859  case VK_SUCCESS:
16860  finalRes = VK_SUCCESS;
16861  break;
16862  default:
16863  return localRes;
16864  }
16865  }
16866  }
16867  }
16868 
16869  return finalRes;
16870 }
16871 
16872 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16873 {
16874  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16875  (*pAllocation)->InitLost();
16876 }
16877 
16878 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16879 {
16880  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16881 
16882  // HeapSizeLimit is in effect for this heap.
16883  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16884  {
16885  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16886  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16887  for(;;)
16888  {
16889  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16890  if(blockBytesAfterAllocation > heapSize)
16891  {
16892  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16893  }
16894  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16895  {
16896  break;
16897  }
16898  }
16899  }
16900  else
16901  {
16902  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16903  }
16904 
16905  // VULKAN CALL vkAllocateMemory.
16906  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16907 
16908  if(res == VK_SUCCESS)
16909  {
16910 #if VMA_MEMORY_BUDGET
16911  ++m_Budget.m_OperationsSinceBudgetFetch;
16912 #endif
16913 
16914  // Informative callback.
16915  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16916  {
16917  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
16918  }
16919  }
16920  else
16921  {
16922  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16923  }
16924 
16925  return res;
16926 }
16927 
16928 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
16929 {
16930  // Informative callback.
16931  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
16932  {
16933  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
16934  }
16935 
16936  // VULKAN CALL vkFreeMemory.
16937  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
16938 
16939  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
16940 }
16941 
16942 VkResult VmaAllocator_T::BindVulkanBuffer(
16943  VkDeviceMemory memory,
16944  VkDeviceSize memoryOffset,
16945  VkBuffer buffer,
16946  const void* pNext)
16947 {
16948  if(pNext != VMA_NULL)
16949  {
16950 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16951  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16952  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
16953  {
16954  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
16955  bindBufferMemoryInfo.pNext = pNext;
16956  bindBufferMemoryInfo.buffer = buffer;
16957  bindBufferMemoryInfo.memory = memory;
16958  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16959  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16960  }
16961  else
16962 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16963  {
16964  return VK_ERROR_EXTENSION_NOT_PRESENT;
16965  }
16966  }
16967  else
16968  {
16969  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
16970  }
16971 }
16972 
16973 VkResult VmaAllocator_T::BindVulkanImage(
16974  VkDeviceMemory memory,
16975  VkDeviceSize memoryOffset,
16976  VkImage image,
16977  const void* pNext)
16978 {
16979  if(pNext != VMA_NULL)
16980  {
16981 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16982  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16983  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
16984  {
16985  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
16986  bindBufferMemoryInfo.pNext = pNext;
16987  bindBufferMemoryInfo.image = image;
16988  bindBufferMemoryInfo.memory = memory;
16989  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16990  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16991  }
16992  else
16993 #endif // #if VMA_BIND_MEMORY2
16994  {
16995  return VK_ERROR_EXTENSION_NOT_PRESENT;
16996  }
16997  }
16998  else
16999  {
17000  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17001  }
17002 }
17003 
17004 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17005 {
17006  if(hAllocation->CanBecomeLost())
17007  {
17008  return VK_ERROR_MEMORY_MAP_FAILED;
17009  }
17010 
17011  switch(hAllocation->GetType())
17012  {
17013  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17014  {
17015  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17016  char *pBytes = VMA_NULL;
17017  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17018  if(res == VK_SUCCESS)
17019  {
17020  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17021  hAllocation->BlockAllocMap();
17022  }
17023  return res;
17024  }
17025  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17026  return hAllocation->DedicatedAllocMap(this, ppData);
17027  default:
17028  VMA_ASSERT(0);
17029  return VK_ERROR_MEMORY_MAP_FAILED;
17030  }
17031 }
17032 
17033 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17034 {
17035  switch(hAllocation->GetType())
17036  {
17037  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17038  {
17039  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17040  hAllocation->BlockAllocUnmap();
17041  pBlock->Unmap(this, 1);
17042  }
17043  break;
17044  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17045  hAllocation->DedicatedAllocUnmap(this);
17046  break;
17047  default:
17048  VMA_ASSERT(0);
17049  }
17050 }
17051 
17052 VkResult VmaAllocator_T::BindBufferMemory(
17053  VmaAllocation hAllocation,
17054  VkDeviceSize allocationLocalOffset,
17055  VkBuffer hBuffer,
17056  const void* pNext)
17057 {
17058  VkResult res = VK_SUCCESS;
17059  switch(hAllocation->GetType())
17060  {
17061  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17062  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17063  break;
17064  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17065  {
17066  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17067  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17068  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17069  break;
17070  }
17071  default:
17072  VMA_ASSERT(0);
17073  }
17074  return res;
17075 }
17076 
17077 VkResult VmaAllocator_T::BindImageMemory(
17078  VmaAllocation hAllocation,
17079  VkDeviceSize allocationLocalOffset,
17080  VkImage hImage,
17081  const void* pNext)
17082 {
17083  VkResult res = VK_SUCCESS;
17084  switch(hAllocation->GetType())
17085  {
17086  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17087  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17088  break;
17089  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17090  {
17091  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17092  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17093  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17094  break;
17095  }
17096  default:
17097  VMA_ASSERT(0);
17098  }
17099  return res;
17100 }
17101 
17102 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17103  VmaAllocation hAllocation,
17104  VkDeviceSize offset, VkDeviceSize size,
17105  VMA_CACHE_OPERATION op)
17106 {
17107  VkResult res = VK_SUCCESS;
17108 
17109  VkMappedMemoryRange memRange = {};
17110  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17111  {
17112  switch(op)
17113  {
17114  case VMA_CACHE_FLUSH:
17115  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17116  break;
17117  case VMA_CACHE_INVALIDATE:
17118  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17119  break;
17120  default:
17121  VMA_ASSERT(0);
17122  }
17123  }
17124  // else: Just ignore this call.
17125  return res;
17126 }
17127 
17128 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17129  uint32_t allocationCount,
17130  const VmaAllocation* allocations,
17131  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17132  VMA_CACHE_OPERATION op)
17133 {
17134  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17135  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17136  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17137 
17138  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17139  {
17140  const VmaAllocation alloc = allocations[allocIndex];
17141  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17142  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17143  VkMappedMemoryRange newRange;
17144  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17145  {
17146  ranges.push_back(newRange);
17147  }
17148  }
17149 
17150  VkResult res = VK_SUCCESS;
17151  if(!ranges.empty())
17152  {
17153  switch(op)
17154  {
17155  case VMA_CACHE_FLUSH:
17156  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17157  break;
17158  case VMA_CACHE_INVALIDATE:
17159  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17160  break;
17161  default:
17162  VMA_ASSERT(0);
17163  }
17164  }
17165  // else: Just ignore this call.
17166  return res;
17167 }
17168 
17169 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17170 {
17171  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17172 
17173  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17174  {
17175  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17176  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
17177  VMA_ASSERT(pDedicatedAllocations);
17178  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
17179  VMA_ASSERT(success);
17180  }
17181 
17182  VkDeviceMemory hMemory = allocation->GetMemory();
17183 
17184  /*
17185  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17186  before vkFreeMemory.
17187 
17188  if(allocation->GetMappedData() != VMA_NULL)
17189  {
17190  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17191  }
17192  */
17193 
17194  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17195 
17196  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17197 }
17198 
17199 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17200 {
17201  VkBufferCreateInfo dummyBufCreateInfo;
17202  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17203 
17204  uint32_t memoryTypeBits = 0;
17205 
17206  // Create buffer.
17207  VkBuffer buf = VK_NULL_HANDLE;
17208  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17209  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17210  if(res == VK_SUCCESS)
17211  {
17212  // Query for supported memory types.
17213  VkMemoryRequirements memReq;
17214  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17215  memoryTypeBits = memReq.memoryTypeBits;
17216 
17217  // Destroy buffer.
17218  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17219  }
17220 
17221  return memoryTypeBits;
17222 }
17223 
17224 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17225 {
17226  // Make sure memory information is already fetched.
17227  VMA_ASSERT(GetMemoryTypeCount() > 0);
17228 
17229  uint32_t memoryTypeBits = UINT32_MAX;
17230 
17231  if(!m_UseAmdDeviceCoherentMemory)
17232  {
17233  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17234  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17235  {
17236  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17237  {
17238  memoryTypeBits &= ~(1u << memTypeIndex);
17239  }
17240  }
17241  }
17242 
17243  return memoryTypeBits;
17244 }
17245 
17246 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17247  VmaAllocation allocation,
17248  VkDeviceSize offset, VkDeviceSize size,
17249  VkMappedMemoryRange& outRange) const
17250 {
17251  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17252  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17253  {
17254  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17255  const VkDeviceSize allocationSize = allocation->GetSize();
17256  VMA_ASSERT(offset <= allocationSize);
17257 
17258  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17259  outRange.pNext = VMA_NULL;
17260  outRange.memory = allocation->GetMemory();
17261 
17262  switch(allocation->GetType())
17263  {
17264  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17265  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17266  if(size == VK_WHOLE_SIZE)
17267  {
17268  outRange.size = allocationSize - outRange.offset;
17269  }
17270  else
17271  {
17272  VMA_ASSERT(offset + size <= allocationSize);
17273  outRange.size = VMA_MIN(
17274  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17275  allocationSize - outRange.offset);
17276  }
17277  break;
17278  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17279  {
17280  // 1. Still within this allocation.
17281  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17282  if(size == VK_WHOLE_SIZE)
17283  {
17284  size = allocationSize - offset;
17285  }
17286  else
17287  {
17288  VMA_ASSERT(offset + size <= allocationSize);
17289  }
17290  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17291 
17292  // 2. Adjust to whole block.
17293  const VkDeviceSize allocationOffset = allocation->GetOffset();
17294  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17295  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17296  outRange.offset += allocationOffset;
17297  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17298 
17299  break;
17300  }
17301  default:
17302  VMA_ASSERT(0);
17303  }
17304  return true;
17305  }
17306  return false;
17307 }
17308 
17309 #if VMA_MEMORY_BUDGET
17310 
17311 void VmaAllocator_T::UpdateVulkanBudget()
17312 {
17313  VMA_ASSERT(m_UseExtMemoryBudget);
17314 
17315  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17316 
17317  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17318  VmaPnextChainPushFront(&memProps, &budgetProps);
17319 
17320  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17321 
17322  {
17323  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17324 
17325  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17326  {
17327  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17328  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17329  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17330 
17331  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17332  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17333  {
17334  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17335  }
17336  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17337  {
17338  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17339  }
17340  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17341  {
17342  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17343  }
17344  }
17345  m_Budget.m_OperationsSinceBudgetFetch = 0;
17346  }
17347 }
17348 
17349 #endif // #if VMA_MEMORY_BUDGET
17350 
17351 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17352 {
17353  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17354  !hAllocation->CanBecomeLost() &&
17355  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17356  {
17357  void* pData = VMA_NULL;
17358  VkResult res = Map(hAllocation, &pData);
17359  if(res == VK_SUCCESS)
17360  {
17361  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17362  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17363  Unmap(hAllocation);
17364  }
17365  else
17366  {
17367  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17368  }
17369  }
17370 }
17371 
17372 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
17373 {
17374  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
17375  if(memoryTypeBits == UINT32_MAX)
17376  {
17377  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
17378  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
17379  }
17380  return memoryTypeBits;
17381 }
17382 
17383 #if VMA_STATS_STRING_ENABLED
17384 
17385 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
17386 {
17387  bool dedicatedAllocationsStarted = false;
17388  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17389  {
17390  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17391  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17392  VMA_ASSERT(pDedicatedAllocVector);
17393  if(pDedicatedAllocVector->empty() == false)
17394  {
17395  if(dedicatedAllocationsStarted == false)
17396  {
17397  dedicatedAllocationsStarted = true;
17398  json.WriteString("DedicatedAllocations");
17399  json.BeginObject();
17400  }
17401 
17402  json.BeginString("Type ");
17403  json.ContinueString(memTypeIndex);
17404  json.EndString();
17405 
17406  json.BeginArray();
17407 
17408  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17409  {
17410  json.BeginObject(true);
17411  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17412  hAlloc->PrintParameters(json);
17413  json.EndObject();
17414  }
17415 
17416  json.EndArray();
17417  }
17418  }
17419  if(dedicatedAllocationsStarted)
17420  {
17421  json.EndObject();
17422  }
17423 
17424  {
17425  bool allocationsStarted = false;
17426  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17427  {
17428  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17429  {
17430  if(allocationsStarted == false)
17431  {
17432  allocationsStarted = true;
17433  json.WriteString("DefaultPools");
17434  json.BeginObject();
17435  }
17436 
17437  json.BeginString("Type ");
17438  json.ContinueString(memTypeIndex);
17439  json.EndString();
17440 
17441  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17442  }
17443  }
17444  if(allocationsStarted)
17445  {
17446  json.EndObject();
17447  }
17448  }
17449 
17450  // Custom pools
17451  {
17452  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17453  const size_t poolCount = m_Pools.size();
17454  if(poolCount > 0)
17455  {
17456  json.WriteString("Pools");
17457  json.BeginObject();
17458  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17459  {
17460  json.BeginString();
17461  json.ContinueString(m_Pools[poolIndex]->GetId());
17462  json.EndString();
17463 
17464  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17465  }
17466  json.EndObject();
17467  }
17468  }
17469 }
17470 
17471 #endif // #if VMA_STATS_STRING_ENABLED
17472 
17474 // Public interface
17475 
17476 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17477  const VmaAllocatorCreateInfo* pCreateInfo,
17478  VmaAllocator* pAllocator)
17479 {
17480  VMA_ASSERT(pCreateInfo && pAllocator);
17481  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17482  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17483  VMA_DEBUG_LOG("vmaCreateAllocator");
17484  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17485  return (*pAllocator)->Init(pCreateInfo);
17486 }
17487 
17488 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17489  VmaAllocator allocator)
17490 {
17491  if(allocator != VK_NULL_HANDLE)
17492  {
17493  VMA_DEBUG_LOG("vmaDestroyAllocator");
17494  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17495  vma_delete(&allocationCallbacks, allocator);
17496  }
17497 }
17498 
17499 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17500 {
17501  VMA_ASSERT(allocator && pAllocatorInfo);
17502  pAllocatorInfo->instance = allocator->m_hInstance;
17503  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17504  pAllocatorInfo->device = allocator->m_hDevice;
17505 }
17506 
17507 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17508  VmaAllocator allocator,
17509  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17510 {
17511  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17512  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17513 }
17514 
17515 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17516  VmaAllocator allocator,
17517  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17518 {
17519  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17520  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17521 }
17522 
17523 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17524  VmaAllocator allocator,
17525  uint32_t memoryTypeIndex,
17526  VkMemoryPropertyFlags* pFlags)
17527 {
17528  VMA_ASSERT(allocator && pFlags);
17529  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17530  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17531 }
17532 
17533 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17534  VmaAllocator allocator,
17535  uint32_t frameIndex)
17536 {
17537  VMA_ASSERT(allocator);
17538  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17539 
17540  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17541 
17542  allocator->SetCurrentFrameIndex(frameIndex);
17543 }
17544 
17545 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17546  VmaAllocator allocator,
17547  VmaStats* pStats)
17548 {
17549  VMA_ASSERT(allocator && pStats);
17550  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17551  allocator->CalculateStats(pStats);
17552 }
17553 
17554 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17555  VmaAllocator allocator,
17556  VmaBudget* pBudget)
17557 {
17558  VMA_ASSERT(allocator && pBudget);
17559  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17560  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17561 }
17562 
17563 #if VMA_STATS_STRING_ENABLED
17564 
17565 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17566  VmaAllocator allocator,
17567  char** ppStatsString,
17568  VkBool32 detailedMap)
17569 {
17570  VMA_ASSERT(allocator && ppStatsString);
17571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17572 
17573  VmaStringBuilder sb(allocator);
17574  {
17575  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17576  json.BeginObject();
17577 
17578  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17579  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17580 
17581  VmaStats stats;
17582  allocator->CalculateStats(&stats);
17583 
17584  json.WriteString("Total");
17585  VmaPrintStatInfo(json, stats.total);
17586 
17587  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17588  {
17589  json.BeginString("Heap ");
17590  json.ContinueString(heapIndex);
17591  json.EndString();
17592  json.BeginObject();
17593 
17594  json.WriteString("Size");
17595  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17596 
17597  json.WriteString("Flags");
17598  json.BeginArray(true);
17599  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17600  {
17601  json.WriteString("DEVICE_LOCAL");
17602  }
17603  json.EndArray();
17604 
17605  json.WriteString("Budget");
17606  json.BeginObject();
17607  {
17608  json.WriteString("BlockBytes");
17609  json.WriteNumber(budget[heapIndex].blockBytes);
17610  json.WriteString("AllocationBytes");
17611  json.WriteNumber(budget[heapIndex].allocationBytes);
17612  json.WriteString("Usage");
17613  json.WriteNumber(budget[heapIndex].usage);
17614  json.WriteString("Budget");
17615  json.WriteNumber(budget[heapIndex].budget);
17616  }
17617  json.EndObject();
17618 
17619  if(stats.memoryHeap[heapIndex].blockCount > 0)
17620  {
17621  json.WriteString("Stats");
17622  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17623  }
17624 
17625  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17626  {
17627  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17628  {
17629  json.BeginString("Type ");
17630  json.ContinueString(typeIndex);
17631  json.EndString();
17632 
17633  json.BeginObject();
17634 
17635  json.WriteString("Flags");
17636  json.BeginArray(true);
17637  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17638  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17639  {
17640  json.WriteString("DEVICE_LOCAL");
17641  }
17642  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17643  {
17644  json.WriteString("HOST_VISIBLE");
17645  }
17646  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17647  {
17648  json.WriteString("HOST_COHERENT");
17649  }
17650  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17651  {
17652  json.WriteString("HOST_CACHED");
17653  }
17654  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17655  {
17656  json.WriteString("LAZILY_ALLOCATED");
17657  }
17658  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17659  {
17660  json.WriteString(" PROTECTED");
17661  }
17662  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17663  {
17664  json.WriteString(" DEVICE_COHERENT");
17665  }
17666  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17667  {
17668  json.WriteString(" DEVICE_UNCACHED");
17669  }
17670  json.EndArray();
17671 
17672  if(stats.memoryType[typeIndex].blockCount > 0)
17673  {
17674  json.WriteString("Stats");
17675  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17676  }
17677 
17678  json.EndObject();
17679  }
17680  }
17681 
17682  json.EndObject();
17683  }
17684  if(detailedMap == VK_TRUE)
17685  {
17686  allocator->PrintDetailedMap(json);
17687  }
17688 
17689  json.EndObject();
17690  }
17691 
17692  const size_t len = sb.GetLength();
17693  char* const pChars = vma_new_array(allocator, char, len + 1);
17694  if(len > 0)
17695  {
17696  memcpy(pChars, sb.GetData(), len);
17697  }
17698  pChars[len] = '\0';
17699  *ppStatsString = pChars;
17700 }
17701 
17702 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17703  VmaAllocator allocator,
17704  char* pStatsString)
17705 {
17706  if(pStatsString != VMA_NULL)
17707  {
17708  VMA_ASSERT(allocator);
17709  size_t len = strlen(pStatsString);
17710  vma_delete_array(allocator, pStatsString, len + 1);
17711  }
17712 }
17713 
17714 #endif // #if VMA_STATS_STRING_ENABLED
17715 
17716 /*
17717 This function is not protected by any mutex because it just reads immutable data.
17718 */
17719 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17720  VmaAllocator allocator,
17721  uint32_t memoryTypeBits,
17722  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17723  uint32_t* pMemoryTypeIndex)
17724 {
17725  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17726  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17727  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17728 
17729  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17730 
17731  if(pAllocationCreateInfo->memoryTypeBits != 0)
17732  {
17733  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17734  }
17735 
17736  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17737  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17738  uint32_t notPreferredFlags = 0;
17739 
17740  // Convert usage to requiredFlags and preferredFlags.
17741  switch(pAllocationCreateInfo->usage)
17742  {
17744  break;
17746  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17747  {
17748  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17749  }
17750  break;
17752  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17753  break;
17755  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17756  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17757  {
17758  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17759  }
17760  break;
17762  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17763  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17764  break;
17766  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17767  break;
17769  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17770  break;
17771  default:
17772  VMA_ASSERT(0);
17773  break;
17774  }
17775 
17776  // Avoid DEVICE_COHERENT unless explicitly requested.
17777  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17778  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17779  {
17780  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17781  }
17782 
17783  *pMemoryTypeIndex = UINT32_MAX;
17784  uint32_t minCost = UINT32_MAX;
17785  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17786  memTypeIndex < allocator->GetMemoryTypeCount();
17787  ++memTypeIndex, memTypeBit <<= 1)
17788  {
17789  // This memory type is acceptable according to memoryTypeBits bitmask.
17790  if((memTypeBit & memoryTypeBits) != 0)
17791  {
17792  const VkMemoryPropertyFlags currFlags =
17793  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17794  // This memory type contains requiredFlags.
17795  if((requiredFlags & ~currFlags) == 0)
17796  {
17797  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17798  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17799  VmaCountBitsSet(currFlags & notPreferredFlags);
17800  // Remember memory type with lowest cost.
17801  if(currCost < minCost)
17802  {
17803  *pMemoryTypeIndex = memTypeIndex;
17804  if(currCost == 0)
17805  {
17806  return VK_SUCCESS;
17807  }
17808  minCost = currCost;
17809  }
17810  }
17811  }
17812  }
17813  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17814 }
17815 
17816 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17817  VmaAllocator allocator,
17818  const VkBufferCreateInfo* pBufferCreateInfo,
17819  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17820  uint32_t* pMemoryTypeIndex)
17821 {
17822  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17823  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17824  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17825  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17826 
17827  const VkDevice hDev = allocator->m_hDevice;
17828  VkBuffer hBuffer = VK_NULL_HANDLE;
17829  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17830  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17831  if(res == VK_SUCCESS)
17832  {
17833  VkMemoryRequirements memReq = {};
17834  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17835  hDev, hBuffer, &memReq);
17836 
17837  res = vmaFindMemoryTypeIndex(
17838  allocator,
17839  memReq.memoryTypeBits,
17840  pAllocationCreateInfo,
17841  pMemoryTypeIndex);
17842 
17843  allocator->GetVulkanFunctions().vkDestroyBuffer(
17844  hDev, hBuffer, allocator->GetAllocationCallbacks());
17845  }
17846  return res;
17847 }
17848 
17849 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17850  VmaAllocator allocator,
17851  const VkImageCreateInfo* pImageCreateInfo,
17852  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17853  uint32_t* pMemoryTypeIndex)
17854 {
17855  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17856  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17857  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17858  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17859 
17860  const VkDevice hDev = allocator->m_hDevice;
17861  VkImage hImage = VK_NULL_HANDLE;
17862  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17863  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17864  if(res == VK_SUCCESS)
17865  {
17866  VkMemoryRequirements memReq = {};
17867  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17868  hDev, hImage, &memReq);
17869 
17870  res = vmaFindMemoryTypeIndex(
17871  allocator,
17872  memReq.memoryTypeBits,
17873  pAllocationCreateInfo,
17874  pMemoryTypeIndex);
17875 
17876  allocator->GetVulkanFunctions().vkDestroyImage(
17877  hDev, hImage, allocator->GetAllocationCallbacks());
17878  }
17879  return res;
17880 }
17881 
17882 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17883  VmaAllocator allocator,
17884  const VmaPoolCreateInfo* pCreateInfo,
17885  VmaPool* pPool)
17886 {
17887  VMA_ASSERT(allocator && pCreateInfo && pPool);
17888 
17889  VMA_DEBUG_LOG("vmaCreatePool");
17890 
17891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17892 
17893  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17894 
17895 #if VMA_RECORDING_ENABLED
17896  if(allocator->GetRecorder() != VMA_NULL)
17897  {
17898  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17899  }
17900 #endif
17901 
17902  return res;
17903 }
17904 
17905 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17906  VmaAllocator allocator,
17907  VmaPool pool)
17908 {
17909  VMA_ASSERT(allocator);
17910 
17911  if(pool == VK_NULL_HANDLE)
17912  {
17913  return;
17914  }
17915 
17916  VMA_DEBUG_LOG("vmaDestroyPool");
17917 
17918  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17919 
17920 #if VMA_RECORDING_ENABLED
17921  if(allocator->GetRecorder() != VMA_NULL)
17922  {
17923  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17924  }
17925 #endif
17926 
17927  allocator->DestroyPool(pool);
17928 }
17929 
17930 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
17931  VmaAllocator allocator,
17932  VmaPool pool,
17933  VmaPoolStats* pPoolStats)
17934 {
17935  VMA_ASSERT(allocator && pool && pPoolStats);
17936 
17937  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17938 
17939  allocator->GetPoolStats(pool, pPoolStats);
17940 }
17941 
17942 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
17943  VmaAllocator allocator,
17944  VmaPool pool,
17945  size_t* pLostAllocationCount)
17946 {
17947  VMA_ASSERT(allocator && pool);
17948 
17949  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17950 
17951 #if VMA_RECORDING_ENABLED
17952  if(allocator->GetRecorder() != VMA_NULL)
17953  {
17954  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
17955  }
17956 #endif
17957 
17958  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
17959 }
17960 
17961 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
17962 {
17963  VMA_ASSERT(allocator && pool);
17964 
17965  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17966 
17967  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
17968 
17969  return allocator->CheckPoolCorruption(pool);
17970 }
17971 
17972 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
17973  VmaAllocator allocator,
17974  VmaPool pool,
17975  const char** ppName)
17976 {
17977  VMA_ASSERT(allocator && pool && ppName);
17978 
17979  VMA_DEBUG_LOG("vmaGetPoolName");
17980 
17981  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17982 
17983  *ppName = pool->GetName();
17984 }
17985 
17986 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
17987  VmaAllocator allocator,
17988  VmaPool pool,
17989  const char* pName)
17990 {
17991  VMA_ASSERT(allocator && pool);
17992 
17993  VMA_DEBUG_LOG("vmaSetPoolName");
17994 
17995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17996 
17997  pool->SetName(pName);
17998 
17999 #if VMA_RECORDING_ENABLED
18000  if(allocator->GetRecorder() != VMA_NULL)
18001  {
18002  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18003  }
18004 #endif
18005 }
18006 
18007 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18008  VmaAllocator allocator,
18009  const VkMemoryRequirements* pVkMemoryRequirements,
18010  const VmaAllocationCreateInfo* pCreateInfo,
18011  VmaAllocation* pAllocation,
18012  VmaAllocationInfo* pAllocationInfo)
18013 {
18014  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18015 
18016  VMA_DEBUG_LOG("vmaAllocateMemory");
18017 
18018  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18019 
18020  VkResult result = allocator->AllocateMemory(
18021  *pVkMemoryRequirements,
18022  false, // requiresDedicatedAllocation
18023  false, // prefersDedicatedAllocation
18024  VK_NULL_HANDLE, // dedicatedBuffer
18025  UINT32_MAX, // dedicatedBufferUsage
18026  VK_NULL_HANDLE, // dedicatedImage
18027  *pCreateInfo,
18028  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18029  1, // allocationCount
18030  pAllocation);
18031 
18032 #if VMA_RECORDING_ENABLED
18033  if(allocator->GetRecorder() != VMA_NULL)
18034  {
18035  allocator->GetRecorder()->RecordAllocateMemory(
18036  allocator->GetCurrentFrameIndex(),
18037  *pVkMemoryRequirements,
18038  *pCreateInfo,
18039  *pAllocation);
18040  }
18041 #endif
18042 
18043  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18044  {
18045  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18046  }
18047 
18048  return result;
18049 }
18050 
18051 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18052  VmaAllocator allocator,
18053  const VkMemoryRequirements* pVkMemoryRequirements,
18054  const VmaAllocationCreateInfo* pCreateInfo,
18055  size_t allocationCount,
18056  VmaAllocation* pAllocations,
18057  VmaAllocationInfo* pAllocationInfo)
18058 {
18059  if(allocationCount == 0)
18060  {
18061  return VK_SUCCESS;
18062  }
18063 
18064  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18065 
18066  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18067 
18068  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18069 
18070  VkResult result = allocator->AllocateMemory(
18071  *pVkMemoryRequirements,
18072  false, // requiresDedicatedAllocation
18073  false, // prefersDedicatedAllocation
18074  VK_NULL_HANDLE, // dedicatedBuffer
18075  UINT32_MAX, // dedicatedBufferUsage
18076  VK_NULL_HANDLE, // dedicatedImage
18077  *pCreateInfo,
18078  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18079  allocationCount,
18080  pAllocations);
18081 
18082 #if VMA_RECORDING_ENABLED
18083  if(allocator->GetRecorder() != VMA_NULL)
18084  {
18085  allocator->GetRecorder()->RecordAllocateMemoryPages(
18086  allocator->GetCurrentFrameIndex(),
18087  *pVkMemoryRequirements,
18088  *pCreateInfo,
18089  (uint64_t)allocationCount,
18090  pAllocations);
18091  }
18092 #endif
18093 
18094  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18095  {
18096  for(size_t i = 0; i < allocationCount; ++i)
18097  {
18098  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18099  }
18100  }
18101 
18102  return result;
18103 }
18104 
18105 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18106  VmaAllocator allocator,
18107  VkBuffer buffer,
18108  const VmaAllocationCreateInfo* pCreateInfo,
18109  VmaAllocation* pAllocation,
18110  VmaAllocationInfo* pAllocationInfo)
18111 {
18112  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18113 
18114  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18115 
18116  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18117 
18118  VkMemoryRequirements vkMemReq = {};
18119  bool requiresDedicatedAllocation = false;
18120  bool prefersDedicatedAllocation = false;
18121  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18122  requiresDedicatedAllocation,
18123  prefersDedicatedAllocation);
18124 
18125  VkResult result = allocator->AllocateMemory(
18126  vkMemReq,
18127  requiresDedicatedAllocation,
18128  prefersDedicatedAllocation,
18129  buffer, // dedicatedBuffer
18130  UINT32_MAX, // dedicatedBufferUsage
18131  VK_NULL_HANDLE, // dedicatedImage
18132  *pCreateInfo,
18133  VMA_SUBALLOCATION_TYPE_BUFFER,
18134  1, // allocationCount
18135  pAllocation);
18136 
18137 #if VMA_RECORDING_ENABLED
18138  if(allocator->GetRecorder() != VMA_NULL)
18139  {
18140  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18141  allocator->GetCurrentFrameIndex(),
18142  vkMemReq,
18143  requiresDedicatedAllocation,
18144  prefersDedicatedAllocation,
18145  *pCreateInfo,
18146  *pAllocation);
18147  }
18148 #endif
18149 
18150  if(pAllocationInfo && result == VK_SUCCESS)
18151  {
18152  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18153  }
18154 
18155  return result;
18156 }
18157 
18158 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18159  VmaAllocator allocator,
18160  VkImage image,
18161  const VmaAllocationCreateInfo* pCreateInfo,
18162  VmaAllocation* pAllocation,
18163  VmaAllocationInfo* pAllocationInfo)
18164 {
18165  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18166 
18167  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18168 
18169  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18170 
18171  VkMemoryRequirements vkMemReq = {};
18172  bool requiresDedicatedAllocation = false;
18173  bool prefersDedicatedAllocation = false;
18174  allocator->GetImageMemoryRequirements(image, vkMemReq,
18175  requiresDedicatedAllocation, prefersDedicatedAllocation);
18176 
18177  VkResult result = allocator->AllocateMemory(
18178  vkMemReq,
18179  requiresDedicatedAllocation,
18180  prefersDedicatedAllocation,
18181  VK_NULL_HANDLE, // dedicatedBuffer
18182  UINT32_MAX, // dedicatedBufferUsage
18183  image, // dedicatedImage
18184  *pCreateInfo,
18185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18186  1, // allocationCount
18187  pAllocation);
18188 
18189 #if VMA_RECORDING_ENABLED
18190  if(allocator->GetRecorder() != VMA_NULL)
18191  {
18192  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18193  allocator->GetCurrentFrameIndex(),
18194  vkMemReq,
18195  requiresDedicatedAllocation,
18196  prefersDedicatedAllocation,
18197  *pCreateInfo,
18198  *pAllocation);
18199  }
18200 #endif
18201 
18202  if(pAllocationInfo && result == VK_SUCCESS)
18203  {
18204  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18205  }
18206 
18207  return result;
18208 }
18209 
18210 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18211  VmaAllocator allocator,
18212  VmaAllocation allocation)
18213 {
18214  VMA_ASSERT(allocator);
18215 
18216  if(allocation == VK_NULL_HANDLE)
18217  {
18218  return;
18219  }
18220 
18221  VMA_DEBUG_LOG("vmaFreeMemory");
18222 
18223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18224 
18225 #if VMA_RECORDING_ENABLED
18226  if(allocator->GetRecorder() != VMA_NULL)
18227  {
18228  allocator->GetRecorder()->RecordFreeMemory(
18229  allocator->GetCurrentFrameIndex(),
18230  allocation);
18231  }
18232 #endif
18233 
18234  allocator->FreeMemory(
18235  1, // allocationCount
18236  &allocation);
18237 }
18238 
18239 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18240  VmaAllocator allocator,
18241  size_t allocationCount,
18242  const VmaAllocation* pAllocations)
18243 {
18244  if(allocationCount == 0)
18245  {
18246  return;
18247  }
18248 
18249  VMA_ASSERT(allocator);
18250 
18251  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18252 
18253  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18254 
18255 #if VMA_RECORDING_ENABLED
18256  if(allocator->GetRecorder() != VMA_NULL)
18257  {
18258  allocator->GetRecorder()->RecordFreeMemoryPages(
18259  allocator->GetCurrentFrameIndex(),
18260  (uint64_t)allocationCount,
18261  pAllocations);
18262  }
18263 #endif
18264 
18265  allocator->FreeMemory(allocationCount, pAllocations);
18266 }
18267 
18268 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
18269  VmaAllocator allocator,
18270  VmaAllocation allocation,
18271  VkDeviceSize newSize)
18272 {
18273  VMA_ASSERT(allocator && allocation);
18274 
18275  VMA_DEBUG_LOG("vmaResizeAllocation");
18276 
18277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18278 
18279  return allocator->ResizeAllocation(allocation, newSize);
18280 }
18281 
18282 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18283  VmaAllocator allocator,
18284  VmaAllocation allocation,
18285  VmaAllocationInfo* pAllocationInfo)
18286 {
18287  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18288 
18289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18290 
18291 #if VMA_RECORDING_ENABLED
18292  if(allocator->GetRecorder() != VMA_NULL)
18293  {
18294  allocator->GetRecorder()->RecordGetAllocationInfo(
18295  allocator->GetCurrentFrameIndex(),
18296  allocation);
18297  }
18298 #endif
18299 
18300  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18301 }
18302 
18303 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18304  VmaAllocator allocator,
18305  VmaAllocation allocation)
18306 {
18307  VMA_ASSERT(allocator && allocation);
18308 
18309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18310 
18311 #if VMA_RECORDING_ENABLED
18312  if(allocator->GetRecorder() != VMA_NULL)
18313  {
18314  allocator->GetRecorder()->RecordTouchAllocation(
18315  allocator->GetCurrentFrameIndex(),
18316  allocation);
18317  }
18318 #endif
18319 
18320  return allocator->TouchAllocation(allocation);
18321 }
18322 
18323 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18324  VmaAllocator allocator,
18325  VmaAllocation allocation,
18326  void* pUserData)
18327 {
18328  VMA_ASSERT(allocator && allocation);
18329 
18330  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18331 
18332  allocation->SetUserData(allocator, pUserData);
18333 
18334 #if VMA_RECORDING_ENABLED
18335  if(allocator->GetRecorder() != VMA_NULL)
18336  {
18337  allocator->GetRecorder()->RecordSetAllocationUserData(
18338  allocator->GetCurrentFrameIndex(),
18339  allocation,
18340  pUserData);
18341  }
18342 #endif
18343 }
18344 
18345 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18346  VmaAllocator allocator,
18347  VmaAllocation* pAllocation)
18348 {
18349  VMA_ASSERT(allocator && pAllocation);
18350 
18351  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18352 
18353  allocator->CreateLostAllocation(pAllocation);
18354 
18355 #if VMA_RECORDING_ENABLED
18356  if(allocator->GetRecorder() != VMA_NULL)
18357  {
18358  allocator->GetRecorder()->RecordCreateLostAllocation(
18359  allocator->GetCurrentFrameIndex(),
18360  *pAllocation);
18361  }
18362 #endif
18363 }
18364 
18365 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18366  VmaAllocator allocator,
18367  VmaAllocation allocation,
18368  void** ppData)
18369 {
18370  VMA_ASSERT(allocator && allocation && ppData);
18371 
18372  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18373 
18374  VkResult res = allocator->Map(allocation, ppData);
18375 
18376 #if VMA_RECORDING_ENABLED
18377  if(allocator->GetRecorder() != VMA_NULL)
18378  {
18379  allocator->GetRecorder()->RecordMapMemory(
18380  allocator->GetCurrentFrameIndex(),
18381  allocation);
18382  }
18383 #endif
18384 
18385  return res;
18386 }
18387 
18388 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18389  VmaAllocator allocator,
18390  VmaAllocation allocation)
18391 {
18392  VMA_ASSERT(allocator && allocation);
18393 
18394  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18395 
18396 #if VMA_RECORDING_ENABLED
18397  if(allocator->GetRecorder() != VMA_NULL)
18398  {
18399  allocator->GetRecorder()->RecordUnmapMemory(
18400  allocator->GetCurrentFrameIndex(),
18401  allocation);
18402  }
18403 #endif
18404 
18405  allocator->Unmap(allocation);
18406 }
18407 
18408 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18409 {
18410  VMA_ASSERT(allocator && allocation);
18411 
18412  VMA_DEBUG_LOG("vmaFlushAllocation");
18413 
18414  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18415 
18416  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18417 
18418 #if VMA_RECORDING_ENABLED
18419  if(allocator->GetRecorder() != VMA_NULL)
18420  {
18421  allocator->GetRecorder()->RecordFlushAllocation(
18422  allocator->GetCurrentFrameIndex(),
18423  allocation, offset, size);
18424  }
18425 #endif
18426 
18427  return res;
18428 }
18429 
18430 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18431 {
18432  VMA_ASSERT(allocator && allocation);
18433 
18434  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18435 
18436  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18437 
18438  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18439 
18440 #if VMA_RECORDING_ENABLED
18441  if(allocator->GetRecorder() != VMA_NULL)
18442  {
18443  allocator->GetRecorder()->RecordInvalidateAllocation(
18444  allocator->GetCurrentFrameIndex(),
18445  allocation, offset, size);
18446  }
18447 #endif
18448 
18449  return res;
18450 }
18451 
18452 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
18453  VmaAllocator allocator,
18454  uint32_t allocationCount,
18455  const VmaAllocation* allocations,
18456  const VkDeviceSize* offsets,
18457  const VkDeviceSize* sizes)
18458 {
18459  VMA_ASSERT(allocator);
18460 
18461  if(allocationCount == 0)
18462  {
18463  return VK_SUCCESS;
18464  }
18465 
18466  VMA_ASSERT(allocations);
18467 
18468  VMA_DEBUG_LOG("vmaFlushAllocations");
18469 
18470  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18471 
18472  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
18473 
18474 #if VMA_RECORDING_ENABLED
18475  if(allocator->GetRecorder() != VMA_NULL)
18476  {
18477  //TODO
18478  }
18479 #endif
18480 
18481  return res;
18482 }
18483 
18484 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
18485  VmaAllocator allocator,
18486  uint32_t allocationCount,
18487  const VmaAllocation* allocations,
18488  const VkDeviceSize* offsets,
18489  const VkDeviceSize* sizes)
18490 {
18491  VMA_ASSERT(allocator);
18492 
18493  if(allocationCount == 0)
18494  {
18495  return VK_SUCCESS;
18496  }
18497 
18498  VMA_ASSERT(allocations);
18499 
18500  VMA_DEBUG_LOG("vmaInvalidateAllocations");
18501 
18502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18503 
18504  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
18505 
18506 #if VMA_RECORDING_ENABLED
18507  if(allocator->GetRecorder() != VMA_NULL)
18508  {
18509  //TODO
18510  }
18511 #endif
18512 
18513  return res;
18514 }
18515 
18516 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18517 {
18518  VMA_ASSERT(allocator);
18519 
18520  VMA_DEBUG_LOG("vmaCheckCorruption");
18521 
18522  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18523 
18524  return allocator->CheckCorruption(memoryTypeBits);
18525 }
18526 
18527 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18528  VmaAllocator allocator,
18529  const VmaAllocation* pAllocations,
18530  size_t allocationCount,
18531  VkBool32* pAllocationsChanged,
18532  const VmaDefragmentationInfo *pDefragmentationInfo,
18533  VmaDefragmentationStats* pDefragmentationStats)
18534 {
18535  // Deprecated interface, reimplemented using new one.
18536 
18537  VmaDefragmentationInfo2 info2 = {};
18538  info2.allocationCount = (uint32_t)allocationCount;
18539  info2.pAllocations = pAllocations;
18540  info2.pAllocationsChanged = pAllocationsChanged;
18541  if(pDefragmentationInfo != VMA_NULL)
18542  {
18543  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18544  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18545  }
18546  else
18547  {
18548  info2.maxCpuAllocationsToMove = UINT32_MAX;
18549  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18550  }
18551  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18552 
18554  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18555  if(res == VK_NOT_READY)
18556  {
18557  res = vmaDefragmentationEnd( allocator, ctx);
18558  }
18559  return res;
18560 }
18561 
18562 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18563  VmaAllocator allocator,
18564  const VmaDefragmentationInfo2* pInfo,
18565  VmaDefragmentationStats* pStats,
18566  VmaDefragmentationContext *pContext)
18567 {
18568  VMA_ASSERT(allocator && pInfo && pContext);
18569 
18570  // Degenerate case: Nothing to defragment.
18571  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18572  {
18573  return VK_SUCCESS;
18574  }
18575 
18576  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18577  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18578  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18579  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18580 
18581  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18582 
18583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18584 
18585  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18586 
18587 #if VMA_RECORDING_ENABLED
18588  if(allocator->GetRecorder() != VMA_NULL)
18589  {
18590  allocator->GetRecorder()->RecordDefragmentationBegin(
18591  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18592  }
18593 #endif
18594 
18595  return res;
18596 }
18597 
18598 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18599  VmaAllocator allocator,
18600  VmaDefragmentationContext context)
18601 {
18602  VMA_ASSERT(allocator);
18603 
18604  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18605 
18606  if(context != VK_NULL_HANDLE)
18607  {
18608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18609 
18610 #if VMA_RECORDING_ENABLED
18611  if(allocator->GetRecorder() != VMA_NULL)
18612  {
18613  allocator->GetRecorder()->RecordDefragmentationEnd(
18614  allocator->GetCurrentFrameIndex(), context);
18615  }
18616 #endif
18617 
18618  return allocator->DefragmentationEnd(context);
18619  }
18620  else
18621  {
18622  return VK_SUCCESS;
18623  }
18624 }
18625 
18626 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18627  VmaAllocator allocator,
18628  VmaDefragmentationContext context,
18630  )
18631 {
18632  VMA_ASSERT(allocator);
18633  VMA_ASSERT(pInfo);
18634  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
18635 
18636  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18637 
18638  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18639 
18640  if(context == VK_NULL_HANDLE)
18641  {
18642  pInfo->moveCount = 0;
18643  return VK_SUCCESS;
18644  }
18645 
18646  return allocator->DefragmentationPassBegin(pInfo, context);
18647 }
18648 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18649  VmaAllocator allocator,
18650  VmaDefragmentationContext context)
18651 {
18652  VMA_ASSERT(allocator);
18653 
18654  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18655  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18656 
18657  if(context == VK_NULL_HANDLE)
18658  return VK_SUCCESS;
18659 
18660  return allocator->DefragmentationPassEnd(context);
18661 }
18662 
18663 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18664  VmaAllocator allocator,
18665  VmaAllocation allocation,
18666  VkBuffer buffer)
18667 {
18668  VMA_ASSERT(allocator && allocation && buffer);
18669 
18670  VMA_DEBUG_LOG("vmaBindBufferMemory");
18671 
18672  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18673 
18674  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18675 }
18676 
18677 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18678  VmaAllocator allocator,
18679  VmaAllocation allocation,
18680  VkDeviceSize allocationLocalOffset,
18681  VkBuffer buffer,
18682  const void* pNext)
18683 {
18684  VMA_ASSERT(allocator && allocation && buffer);
18685 
18686  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18687 
18688  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18689 
18690  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18691 }
18692 
18693 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18694  VmaAllocator allocator,
18695  VmaAllocation allocation,
18696  VkImage image)
18697 {
18698  VMA_ASSERT(allocator && allocation && image);
18699 
18700  VMA_DEBUG_LOG("vmaBindImageMemory");
18701 
18702  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18703 
18704  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18705 }
18706 
18707 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18708  VmaAllocator allocator,
18709  VmaAllocation allocation,
18710  VkDeviceSize allocationLocalOffset,
18711  VkImage image,
18712  const void* pNext)
18713 {
18714  VMA_ASSERT(allocator && allocation && image);
18715 
18716  VMA_DEBUG_LOG("vmaBindImageMemory2");
18717 
18718  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18719 
18720  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18721 }
18722 
18723 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18724  VmaAllocator allocator,
18725  const VkBufferCreateInfo* pBufferCreateInfo,
18726  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18727  VkBuffer* pBuffer,
18728  VmaAllocation* pAllocation,
18729  VmaAllocationInfo* pAllocationInfo)
18730 {
18731  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18732 
18733  if(pBufferCreateInfo->size == 0)
18734  {
18735  return VK_ERROR_VALIDATION_FAILED_EXT;
18736  }
18737  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18738  !allocator->m_UseKhrBufferDeviceAddress)
18739  {
18740  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18741  return VK_ERROR_VALIDATION_FAILED_EXT;
18742  }
18743 
18744  VMA_DEBUG_LOG("vmaCreateBuffer");
18745 
18746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18747 
18748  *pBuffer = VK_NULL_HANDLE;
18749  *pAllocation = VK_NULL_HANDLE;
18750 
18751  // 1. Create VkBuffer.
18752  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18753  allocator->m_hDevice,
18754  pBufferCreateInfo,
18755  allocator->GetAllocationCallbacks(),
18756  pBuffer);
18757  if(res >= 0)
18758  {
18759  // 2. vkGetBufferMemoryRequirements.
18760  VkMemoryRequirements vkMemReq = {};
18761  bool requiresDedicatedAllocation = false;
18762  bool prefersDedicatedAllocation = false;
18763  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18764  requiresDedicatedAllocation, prefersDedicatedAllocation);
18765 
18766  // 3. Allocate memory using allocator.
18767  res = allocator->AllocateMemory(
18768  vkMemReq,
18769  requiresDedicatedAllocation,
18770  prefersDedicatedAllocation,
18771  *pBuffer, // dedicatedBuffer
18772  pBufferCreateInfo->usage, // dedicatedBufferUsage
18773  VK_NULL_HANDLE, // dedicatedImage
18774  *pAllocationCreateInfo,
18775  VMA_SUBALLOCATION_TYPE_BUFFER,
18776  1, // allocationCount
18777  pAllocation);
18778 
18779 #if VMA_RECORDING_ENABLED
18780  if(allocator->GetRecorder() != VMA_NULL)
18781  {
18782  allocator->GetRecorder()->RecordCreateBuffer(
18783  allocator->GetCurrentFrameIndex(),
18784  *pBufferCreateInfo,
18785  *pAllocationCreateInfo,
18786  *pAllocation);
18787  }
18788 #endif
18789 
18790  if(res >= 0)
18791  {
18792  // 3. Bind buffer with memory.
18793  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18794  {
18795  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18796  }
18797  if(res >= 0)
18798  {
18799  // All steps succeeded.
18800  #if VMA_STATS_STRING_ENABLED
18801  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18802  #endif
18803  if(pAllocationInfo != VMA_NULL)
18804  {
18805  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18806  }
18807 
18808  return VK_SUCCESS;
18809  }
18810  allocator->FreeMemory(
18811  1, // allocationCount
18812  pAllocation);
18813  *pAllocation = VK_NULL_HANDLE;
18814  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18815  *pBuffer = VK_NULL_HANDLE;
18816  return res;
18817  }
18818  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18819  *pBuffer = VK_NULL_HANDLE;
18820  return res;
18821  }
18822  return res;
18823 }
18824 
18825 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18826  VmaAllocator allocator,
18827  VkBuffer buffer,
18828  VmaAllocation allocation)
18829 {
18830  VMA_ASSERT(allocator);
18831 
18832  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18833  {
18834  return;
18835  }
18836 
18837  VMA_DEBUG_LOG("vmaDestroyBuffer");
18838 
18839  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18840 
18841 #if VMA_RECORDING_ENABLED
18842  if(allocator->GetRecorder() != VMA_NULL)
18843  {
18844  allocator->GetRecorder()->RecordDestroyBuffer(
18845  allocator->GetCurrentFrameIndex(),
18846  allocation);
18847  }
18848 #endif
18849 
18850  if(buffer != VK_NULL_HANDLE)
18851  {
18852  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18853  }
18854 
18855  if(allocation != VK_NULL_HANDLE)
18856  {
18857  allocator->FreeMemory(
18858  1, // allocationCount
18859  &allocation);
18860  }
18861 }
18862 
18863 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18864  VmaAllocator allocator,
18865  const VkImageCreateInfo* pImageCreateInfo,
18866  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18867  VkImage* pImage,
18868  VmaAllocation* pAllocation,
18869  VmaAllocationInfo* pAllocationInfo)
18870 {
18871  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18872 
18873  if(pImageCreateInfo->extent.width == 0 ||
18874  pImageCreateInfo->extent.height == 0 ||
18875  pImageCreateInfo->extent.depth == 0 ||
18876  pImageCreateInfo->mipLevels == 0 ||
18877  pImageCreateInfo->arrayLayers == 0)
18878  {
18879  return VK_ERROR_VALIDATION_FAILED_EXT;
18880  }
18881 
18882  VMA_DEBUG_LOG("vmaCreateImage");
18883 
18884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18885 
18886  *pImage = VK_NULL_HANDLE;
18887  *pAllocation = VK_NULL_HANDLE;
18888 
18889  // 1. Create VkImage.
18890  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18891  allocator->m_hDevice,
18892  pImageCreateInfo,
18893  allocator->GetAllocationCallbacks(),
18894  pImage);
18895  if(res >= 0)
18896  {
18897  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18898  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18899  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18900 
18901  // 2. Allocate memory using allocator.
18902  VkMemoryRequirements vkMemReq = {};
18903  bool requiresDedicatedAllocation = false;
18904  bool prefersDedicatedAllocation = false;
18905  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18906  requiresDedicatedAllocation, prefersDedicatedAllocation);
18907 
18908  res = allocator->AllocateMemory(
18909  vkMemReq,
18910  requiresDedicatedAllocation,
18911  prefersDedicatedAllocation,
18912  VK_NULL_HANDLE, // dedicatedBuffer
18913  UINT32_MAX, // dedicatedBufferUsage
18914  *pImage, // dedicatedImage
18915  *pAllocationCreateInfo,
18916  suballocType,
18917  1, // allocationCount
18918  pAllocation);
18919 
18920 #if VMA_RECORDING_ENABLED
18921  if(allocator->GetRecorder() != VMA_NULL)
18922  {
18923  allocator->GetRecorder()->RecordCreateImage(
18924  allocator->GetCurrentFrameIndex(),
18925  *pImageCreateInfo,
18926  *pAllocationCreateInfo,
18927  *pAllocation);
18928  }
18929 #endif
18930 
18931  if(res >= 0)
18932  {
18933  // 3. Bind image with memory.
18934  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18935  {
18936  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
18937  }
18938  if(res >= 0)
18939  {
18940  // All steps succeeded.
18941  #if VMA_STATS_STRING_ENABLED
18942  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
18943  #endif
18944  if(pAllocationInfo != VMA_NULL)
18945  {
18946  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18947  }
18948 
18949  return VK_SUCCESS;
18950  }
18951  allocator->FreeMemory(
18952  1, // allocationCount
18953  pAllocation);
18954  *pAllocation = VK_NULL_HANDLE;
18955  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18956  *pImage = VK_NULL_HANDLE;
18957  return res;
18958  }
18959  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18960  *pImage = VK_NULL_HANDLE;
18961  return res;
18962  }
18963  return res;
18964 }
18965 
18966 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
18967  VmaAllocator allocator,
18968  VkImage image,
18969  VmaAllocation allocation)
18970 {
18971  VMA_ASSERT(allocator);
18972 
18973  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18974  {
18975  return;
18976  }
18977 
18978  VMA_DEBUG_LOG("vmaDestroyImage");
18979 
18980  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18981 
18982 #if VMA_RECORDING_ENABLED
18983  if(allocator->GetRecorder() != VMA_NULL)
18984  {
18985  allocator->GetRecorder()->RecordDestroyImage(
18986  allocator->GetCurrentFrameIndex(),
18987  allocation);
18988  }
18989 #endif
18990 
18991  if(image != VK_NULL_HANDLE)
18992  {
18993  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
18994  }
18995  if(allocation != VK_NULL_HANDLE)
18996  {
18997  allocator->FreeMemory(
18998  1, // allocationCount
18999  &allocation);
19000  }
19001 }
19002 
19003 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2245
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2203
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2091
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2240
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2201
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2266
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2863
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3470
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2292
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2154
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2464
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3538
VmaDefragmentationInfo2::pPools
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3504
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2609
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2935
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2692
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2102
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2941
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2672
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2232
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2087
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2659
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2272
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2238
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2206
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3078
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2432
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2766
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2438
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2205
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1895
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3539
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3548
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2703
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2364
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2489
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2217
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2260
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2633
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2374
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3456
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2954
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2696
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2127
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3565
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2687
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2547
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2428
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2891
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2903
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3488
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3536
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2944
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2194
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2192
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3454
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3083
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2615
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2202
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2884
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaInvalidateAllocations
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2759
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2446
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2622
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2142
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3467
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2210
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3519
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3569
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2938
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2204
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2242
vmaFlushAllocation
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2579
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2601
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3455
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2330
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2209
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2211
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3533
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2442
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2908
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2344
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2425
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3573
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3547
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2569
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaFlushAllocations
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2436
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2190
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2434
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2275
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2097
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2916
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2888
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2089
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2595
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2666
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2894
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2611
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2430
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2208
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3464
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3560
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2468
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2358
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2882
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2740
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2713
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2437
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2278
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2717
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2269
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2317
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3537
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2500
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2200
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3097
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2263
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3458
VmaDefragmentationInfo2::pAllocations
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3479
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2215
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2093
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2438
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2586
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2710
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2707
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2172
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2735
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2437
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3088
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2248
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2207
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3102
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2846
vmaInvalidateAllocation
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2445
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2323
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2957
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3546
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2444
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2724
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2335
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2552
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3524
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2214
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2930
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2216
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3509
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3069
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2646
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2213
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2066
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2947
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2828
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3571
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2438
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2677
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2073
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2729
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2212
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2726
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2745
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3485
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3575
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2753
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2369
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3577
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2256
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2437
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3064
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2874
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2479
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2722
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2878
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3514
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3555
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2683
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.