Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
1893 #if VMA_RECORDING_ENABLED
1894  #include <chrono>
1895  #if defined(_WIN32)
1896  #include <windows.h>
1897  #else
1898  #include <sstream>
1899  #include <thread>
1900  #endif
1901 #endif
1902 
1903 #ifdef __cplusplus
1904 extern "C" {
1905 #endif
1906 
1907 /*
1908 Define this macro to 0/1 to disable/enable support for recording functionality,
1909 available through VmaAllocatorCreateInfo::pRecordSettings.
1910 */
1911 #ifndef VMA_RECORDING_ENABLED
1912  #define VMA_RECORDING_ENABLED 0
1913 #endif
1914 
1915 #ifndef NOMINMAX
1916  #define NOMINMAX // For windows.h
1917 #endif
1918 
1919 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
1920  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
1921  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
1922  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1923  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1924  extern PFN_vkAllocateMemory vkAllocateMemory;
1925  extern PFN_vkFreeMemory vkFreeMemory;
1926  extern PFN_vkMapMemory vkMapMemory;
1927  extern PFN_vkUnmapMemory vkUnmapMemory;
1928  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1929  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1930  extern PFN_vkBindBufferMemory vkBindBufferMemory;
1931  extern PFN_vkBindImageMemory vkBindImageMemory;
1932  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1933  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1934  extern PFN_vkCreateBuffer vkCreateBuffer;
1935  extern PFN_vkDestroyBuffer vkDestroyBuffer;
1936  extern PFN_vkCreateImage vkCreateImage;
1937  extern PFN_vkDestroyImage vkDestroyImage;
1938  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1939  #if VMA_VULKAN_VERSION >= 1001000
1940  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
1941  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
1942  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
1943  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
1944  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
1945  #endif // #if VMA_VULKAN_VERSION >= 1001000
1946 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
1947 
1948 #ifndef VULKAN_H_
1949  #include <vulkan/vulkan.h>
1950 #endif
1951 
1952 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1953 // where AAA = major, BBB = minor, CCC = patch.
1954 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1955 #if !defined(VMA_VULKAN_VERSION)
1956  #if defined(VK_VERSION_1_2)
1957  #define VMA_VULKAN_VERSION 1002000
1958  #elif defined(VK_VERSION_1_1)
1959  #define VMA_VULKAN_VERSION 1001000
1960  #else
1961  #define VMA_VULKAN_VERSION 1000000
1962  #endif
1963 #endif
1964 
1965 #if !defined(VMA_DEDICATED_ALLOCATION)
1966  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1967  #define VMA_DEDICATED_ALLOCATION 1
1968  #else
1969  #define VMA_DEDICATED_ALLOCATION 0
1970  #endif
1971 #endif
1972 
1973 #if !defined(VMA_BIND_MEMORY2)
1974  #if VK_KHR_bind_memory2
1975  #define VMA_BIND_MEMORY2 1
1976  #else
1977  #define VMA_BIND_MEMORY2 0
1978  #endif
1979 #endif
1980 
1981 #if !defined(VMA_MEMORY_BUDGET)
1982  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1983  #define VMA_MEMORY_BUDGET 1
1984  #else
1985  #define VMA_MEMORY_BUDGET 0
1986  #endif
1987 #endif
1988 
1989 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
1990 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
1991  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
1992  #define VMA_BUFFER_DEVICE_ADDRESS 1
1993  #else
1994  #define VMA_BUFFER_DEVICE_ADDRESS 0
1995  #endif
1996 #endif
1997 
1998 // Define these macros to decorate all public functions with additional code,
1999 // before and after returned type, appropriately. This may be useful for
2000 // exporing the functions when compiling VMA as a separate library. Example:
2001 // #define VMA_CALL_PRE __declspec(dllexport)
2002 // #define VMA_CALL_POST __cdecl
2003 #ifndef VMA_CALL_PRE
2004  #define VMA_CALL_PRE
2005 #endif
2006 #ifndef VMA_CALL_POST
2007  #define VMA_CALL_POST
2008 #endif
2009 
2010 // Define this macro to decorate pointers with an attribute specifying the
2011 // length of the array they point to if they are not null.
2012 //
2013 // The length may be one of
2014 // - The name of another parameter in the argument list where the pointer is declared
2015 // - The name of another member in the struct where the pointer is declared
2016 // - The name of a member of a struct type, meaning the value of that member in
2017 // the context of the call. For example
2018 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2019 // this means the number of memory heaps available in the device associated
2020 // with the VmaAllocator being dealt with.
2021 #ifndef VMA_LEN_IF_NOT_NULL
2022  #define VMA_LEN_IF_NOT_NULL(len)
2023 #endif
2024 
2025 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2026 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2027 #ifndef VMA_NULLABLE
2028  #ifdef __clang__
2029  #define VMA_NULLABLE _Nullable
2030  #else
2031  #define VMA_NULLABLE
2032  #endif
2033 #endif
2034 
2035 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2036 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2037 #ifndef VMA_NOT_NULL
2038  #ifdef __clang__
2039  #define VMA_NOT_NULL _Nonnull
2040  #else
2041  #define VMA_NOT_NULL
2042  #endif
2043 #endif
2044 
2045 // If non-dispatchable handles are represented as pointers then we can give
2046 // then nullability annotations
2047 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2048  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2049  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2050  #else
2051  #define VMA_NOT_NULL_NON_DISPATCHABLE
2052  #endif
2053 #endif
2054 
2055 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2056  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2057  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2058  #else
2059  #define VMA_NULLABLE_NON_DISPATCHABLE
2060  #endif
2061 #endif
2062 
2072 VK_DEFINE_HANDLE(VmaAllocator)
2073 
2074 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2076  VmaAllocator VMA_NOT_NULL allocator,
2077  uint32_t memoryType,
2078  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2079  VkDeviceSize size,
2080  void* VMA_NULLABLE pUserData);
2082 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2083  VmaAllocator VMA_NOT_NULL allocator,
2084  uint32_t memoryType,
2085  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2086  VkDeviceSize size,
2087  void* VMA_NULLABLE pUserData);
2088 
2102  void* VMA_NULLABLE pUserData;
2104 
2200 
2203 typedef VkFlags VmaAllocatorCreateFlags;
2204 
2209 typedef struct VmaVulkanFunctions {
2210  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2211  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2212  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2213  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2214  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2215  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2216  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2217  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2218  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2219  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2220  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2221  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2222  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2223  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2224  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2225  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2226  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2227 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2228  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2229  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2230 #endif
2231 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2232  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2233  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2234 #endif
2235 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2236  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2237 #endif
2239 
2241 typedef enum VmaRecordFlagBits {
2248 
2249  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2251 typedef VkFlags VmaRecordFlags;
2252 
2254 typedef struct VmaRecordSettings
2255 {
2265  const char* VMA_NOT_NULL pFilePath;
2267 
2270 {
2274 
2275  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2277 
2278  VkDevice VMA_NOT_NULL device;
2280 
2283 
2284  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2286 
2326  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2327 
2339  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2344  VkInstance VMA_NOT_NULL instance;
2355 
2357 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2358  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2359  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2360 
2362 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2363  VmaAllocator VMA_NULLABLE allocator);
2364 
2367 typedef struct VmaAllocatorInfo
2368 {
2373  VkInstance VMA_NOT_NULL instance;
2378  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2383  VkDevice VMA_NOT_NULL device;
2385 
2391 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2392 
2397 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2398  VmaAllocator VMA_NOT_NULL allocator,
2399  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2400 
2405 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2406  VmaAllocator VMA_NOT_NULL allocator,
2407  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2408 
2415 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2416  VmaAllocator VMA_NOT_NULL allocator,
2417  uint32_t memoryTypeIndex,
2418  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2419 
2428 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2429  VmaAllocator VMA_NOT_NULL allocator,
2430  uint32_t frameIndex);
2431 
2434 typedef struct VmaStatInfo
2435 {
2437  uint32_t blockCount;
2443  VkDeviceSize usedBytes;
2445  VkDeviceSize unusedBytes;
2446  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2447  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2449 
2451 typedef struct VmaStats
2452 {
2453  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2454  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2457 
2467 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2468  VmaAllocator VMA_NOT_NULL allocator,
2469  VmaStats* VMA_NOT_NULL pStats);
2470 
2473 typedef struct VmaBudget
2474 {
2477  VkDeviceSize blockBytes;
2478 
2488  VkDeviceSize allocationBytes;
2489 
2498  VkDeviceSize usage;
2499 
2509  VkDeviceSize budget;
2511 
2522 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2523  VmaAllocator VMA_NOT_NULL allocator,
2524  VmaBudget* VMA_NOT_NULL pBudget);
2525 
2526 #ifndef VMA_STATS_STRING_ENABLED
2527 #define VMA_STATS_STRING_ENABLED 1
2528 #endif
2529 
2530 #if VMA_STATS_STRING_ENABLED
2531 
2533 
2535 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2536  VmaAllocator VMA_NOT_NULL allocator,
2537  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2538  VkBool32 detailedMap);
2539 
2540 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2541  VmaAllocator VMA_NOT_NULL allocator,
2542  char* VMA_NULLABLE pStatsString);
2543 
2544 #endif // #if VMA_STATS_STRING_ENABLED
2545 
2554 VK_DEFINE_HANDLE(VmaPool)
2555 
2556 typedef enum VmaMemoryUsage
2557 {
2619 
2620  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2622 
2632 
2697 
2713 
2723 
2730 
2734 
2736 {
2749  VkMemoryPropertyFlags requiredFlags;
2754  VkMemoryPropertyFlags preferredFlags;
2762  uint32_t memoryTypeBits;
2768  VmaPool VMA_NULLABLE pool;
2775  void* VMA_NULLABLE pUserData;
2777 
2794 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2795  VmaAllocator VMA_NOT_NULL allocator,
2796  uint32_t memoryTypeBits,
2797  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2798  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2799 
2812 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2813  VmaAllocator VMA_NOT_NULL allocator,
2814  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2815  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2816  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2817 
2830 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2831  VmaAllocator VMA_NOT_NULL allocator,
2832  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2833  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2834  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2835 
2856 
2873 
2884 
2890 
2893 typedef VkFlags VmaPoolCreateFlags;
2894 
2897 typedef struct VmaPoolCreateInfo {
2912  VkDeviceSize blockSize;
2941 
2944 typedef struct VmaPoolStats {
2947  VkDeviceSize size;
2950  VkDeviceSize unusedSize;
2963  VkDeviceSize unusedRangeSizeMax;
2966  size_t blockCount;
2968 
2975 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2976  VmaAllocator VMA_NOT_NULL allocator,
2977  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
2978  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
2979 
2982 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2983  VmaAllocator VMA_NOT_NULL allocator,
2984  VmaPool VMA_NULLABLE pool);
2985 
2992 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2993  VmaAllocator VMA_NOT_NULL allocator,
2994  VmaPool VMA_NOT_NULL pool,
2995  VmaPoolStats* VMA_NOT_NULL pPoolStats);
2996 
3003 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3004  VmaAllocator VMA_NOT_NULL allocator,
3005  VmaPool VMA_NOT_NULL pool,
3006  size_t* VMA_NULLABLE pLostAllocationCount);
3007 
3022 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3023 
3030 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3031  VmaAllocator VMA_NOT_NULL allocator,
3032  VmaPool VMA_NOT_NULL pool,
3033  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3034 
3040 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3041  VmaAllocator VMA_NOT_NULL allocator,
3042  VmaPool VMA_NOT_NULL pool,
3043  const char* VMA_NULLABLE pName);
3044 
3069 VK_DEFINE_HANDLE(VmaAllocation)
3070 
3071 
3073 typedef struct VmaAllocationInfo {
3078  uint32_t memoryType;
3087  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3092  VkDeviceSize offset;
3103  VkDeviceSize size;
3112  void* VMA_NULLABLE pMappedData;
3117  void* VMA_NULLABLE pUserData;
3119 
3130 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3131  VmaAllocator VMA_NOT_NULL allocator,
3132  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3133  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3134  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3135  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3136 
3156 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3157  VmaAllocator VMA_NOT_NULL allocator,
3158  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3159  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3160  size_t allocationCount,
3161  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3162  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3163 
3170 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3171  VmaAllocator VMA_NOT_NULL allocator,
3172  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3173  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3174  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3175  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3176 
3178 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3179  VmaAllocator VMA_NOT_NULL allocator,
3180  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3181  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3182  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3183  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3184 
3189 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3190  VmaAllocator VMA_NOT_NULL allocator,
3191  const VmaAllocation VMA_NULLABLE allocation);
3192 
3203 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3204  VmaAllocator VMA_NOT_NULL allocator,
3205  size_t allocationCount,
3206  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3207 
3215 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3216  VmaAllocator VMA_NOT_NULL allocator,
3217  VmaAllocation VMA_NOT_NULL allocation,
3218  VkDeviceSize newSize);
3219 
3236 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3237  VmaAllocator VMA_NOT_NULL allocator,
3238  VmaAllocation VMA_NOT_NULL allocation,
3239  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3240 
3255 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3256  VmaAllocator VMA_NOT_NULL allocator,
3257  VmaAllocation VMA_NOT_NULL allocation);
3258 
3272 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3273  VmaAllocator VMA_NOT_NULL allocator,
3274  VmaAllocation VMA_NOT_NULL allocation,
3275  void* VMA_NULLABLE pUserData);
3276 
3287 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3288  VmaAllocator VMA_NOT_NULL allocator,
3289  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3290 
3329 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3330  VmaAllocator VMA_NOT_NULL allocator,
3331  VmaAllocation VMA_NOT_NULL allocation,
3332  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3333 
3342 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3343  VmaAllocator VMA_NOT_NULL allocator,
3344  VmaAllocation VMA_NOT_NULL allocation);
3345 
3367 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3368  VmaAllocator VMA_NOT_NULL allocator,
3369  VmaAllocation VMA_NOT_NULL allocation,
3370  VkDeviceSize offset,
3371  VkDeviceSize size);
3372 
3394 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3395  VmaAllocator VMA_NOT_NULL allocator,
3396  VmaAllocation VMA_NOT_NULL allocation,
3397  VkDeviceSize offset,
3398  VkDeviceSize size);
3399 
3414 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3415  VmaAllocator VMA_NOT_NULL allocator,
3416  uint32_t allocationCount,
3417  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3418  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3419  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3420 
3435 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3436  VmaAllocator VMA_NOT_NULL allocator,
3437  uint32_t allocationCount,
3438  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3439  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3440  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3441 
3458 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3459 
3466 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3467 
3468 typedef enum VmaDefragmentationFlagBits {
3473 typedef VkFlags VmaDefragmentationFlags;
3474 
3479 typedef struct VmaDefragmentationInfo2 {
3494  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3500  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3503  uint32_t poolCount;
3519  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3524  VkDeviceSize maxCpuBytesToMove;
3534  VkDeviceSize maxGpuBytesToMove;
3548  VkCommandBuffer VMA_NULLABLE commandBuffer;
3550 
3553  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3554  VkDeviceSize offset;
3556 
3562  uint32_t moveCount;
3563  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3565 
3570 typedef struct VmaDefragmentationInfo {
3575  VkDeviceSize maxBytesToMove;
3582 
3584 typedef struct VmaDefragmentationStats {
3586  VkDeviceSize bytesMoved;
3588  VkDeviceSize bytesFreed;
3594 
3624 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3625  VmaAllocator VMA_NOT_NULL allocator,
3626  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3627  VmaDefragmentationStats* VMA_NULLABLE pStats,
3628  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3629 
3635 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3636  VmaAllocator VMA_NOT_NULL allocator,
3637  VmaDefragmentationContext VMA_NULLABLE context);
3638 
3639 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3640  VmaAllocator VMA_NOT_NULL allocator,
3641  VmaDefragmentationContext VMA_NULLABLE context,
3642  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3643 );
3644 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3645  VmaAllocator VMA_NOT_NULL allocator,
3646  VmaDefragmentationContext VMA_NULLABLE context
3647 );
3648 
3689 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3690  VmaAllocator VMA_NOT_NULL allocator,
3691  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3692  size_t allocationCount,
3693  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3694  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3695  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3696 
3709 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3710  VmaAllocator VMA_NOT_NULL allocator,
3711  VmaAllocation VMA_NOT_NULL allocation,
3712  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3713 
3724 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3725  VmaAllocator VMA_NOT_NULL allocator,
3726  VmaAllocation VMA_NOT_NULL allocation,
3727  VkDeviceSize allocationLocalOffset,
3728  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3729  const void* VMA_NULLABLE pNext);
3730 
3743 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3744  VmaAllocator VMA_NOT_NULL allocator,
3745  VmaAllocation VMA_NOT_NULL allocation,
3746  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3747 
3758 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3759  VmaAllocator VMA_NOT_NULL allocator,
3760  VmaAllocation VMA_NOT_NULL allocation,
3761  VkDeviceSize allocationLocalOffset,
3762  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3763  const void* VMA_NULLABLE pNext);
3764 
3791 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3792  VmaAllocator VMA_NOT_NULL allocator,
3793  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3794  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3795  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3796  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3797  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3798 
3810 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3811  VmaAllocator VMA_NOT_NULL allocator,
3812  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3813  VmaAllocation VMA_NULLABLE allocation);
3814 
3816 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3817  VmaAllocator VMA_NOT_NULL allocator,
3818  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3819  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3820  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3821  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3822  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3823 
3835 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3836  VmaAllocator VMA_NOT_NULL allocator,
3837  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
3838  VmaAllocation VMA_NULLABLE allocation);
3839 
3840 #ifdef __cplusplus
3841 }
3842 #endif
3843 
3844 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3845 
3846 // For Visual Studio IntelliSense.
3847 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3848 #define VMA_IMPLEMENTATION
3849 #endif
3850 
3851 #ifdef VMA_IMPLEMENTATION
3852 #undef VMA_IMPLEMENTATION
3853 
3854 #include <cstdint>
3855 #include <cstdlib>
3856 #include <cstring>
3857 #include <utility>
3858 
3859 /*******************************************************************************
3860 CONFIGURATION SECTION
3861 
3862 Define some of these macros before each #include of this header or change them
3863 here if you need other then default behavior depending on your environment.
3864 */
3865 
3866 /*
3867 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3868 internally, like:
3869 
3870  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3871 */
3872 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3873  #define VMA_STATIC_VULKAN_FUNCTIONS 1
3874 #endif
3875 
3876 /*
3877 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3878 internally, like:
3879 
3880  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
3881 */
3882 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
3883  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
3884 #endif
3885 
3886 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3887 //#define VMA_USE_STL_CONTAINERS 1
3888 
3889 /* Set this macro to 1 to make the library including and using STL containers:
3890 std::pair, std::vector, std::list, std::unordered_map.
3891 
3892 Set it to 0 or undefined to make the library using its own implementation of
3893 the containers.
3894 */
3895 #if VMA_USE_STL_CONTAINERS
3896  #define VMA_USE_STL_VECTOR 1
3897  #define VMA_USE_STL_UNORDERED_MAP 1
3898  #define VMA_USE_STL_LIST 1
3899 #endif
3900 
3901 #ifndef VMA_USE_STL_SHARED_MUTEX
3902  // Compiler conforms to C++17.
3903  #if __cplusplus >= 201703L
3904  #define VMA_USE_STL_SHARED_MUTEX 1
3905  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3906  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3907  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3908  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3909  #define VMA_USE_STL_SHARED_MUTEX 1
3910  #else
3911  #define VMA_USE_STL_SHARED_MUTEX 0
3912  #endif
3913 #endif
3914 
3915 /*
3916 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3917 Library has its own container implementation.
3918 */
3919 #if VMA_USE_STL_VECTOR
3920  #include <vector>
3921 #endif
3922 
3923 #if VMA_USE_STL_UNORDERED_MAP
3924  #include <unordered_map>
3925 #endif
3926 
3927 #if VMA_USE_STL_LIST
3928  #include <list>
3929 #endif
3930 
3931 /*
3932 Following headers are used in this CONFIGURATION section only, so feel free to
3933 remove them if not needed.
3934 */
3935 #include <cassert> // for assert
3936 #include <algorithm> // for min, max
3937 #include <mutex>
3938 
3939 #ifndef VMA_NULL
3940  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3941  #define VMA_NULL nullptr
3942 #endif
3943 
3944 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3945 #include <cstdlib>
3946 void *vma_aligned_alloc(size_t alignment, size_t size)
3947 {
3948  // alignment must be >= sizeof(void*)
3949  if(alignment < sizeof(void*))
3950  {
3951  alignment = sizeof(void*);
3952  }
3953 
3954  return memalign(alignment, size);
3955 }
3956 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3957 #include <cstdlib>
3958 
3959 #if defined(__APPLE__)
3960 #include <AvailabilityMacros.h>
3961 #endif
3962 
3963 void *vma_aligned_alloc(size_t alignment, size_t size)
3964 {
3965 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
3966 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
3967  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
3968  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
3969  // MAC_OS_X_VERSION_10_16), even though the function is marked
3970  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
3971  // the __builtin_available checks for 10.15.
3972  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
3973  if (__builtin_available(macOS 10.15, iOS 13, *))
3974  return aligned_alloc(alignment, size);
3975 #endif
3976 #endif
3977  // alignment must be >= sizeof(void*)
3978  if(alignment < sizeof(void*))
3979  {
3980  alignment = sizeof(void*);
3981  }
3982 
3983  void *pointer;
3984  if(posix_memalign(&pointer, alignment, size) == 0)
3985  return pointer;
3986  return VMA_NULL;
3987 }
3988 #elif defined(_WIN32)
3989 void *vma_aligned_alloc(size_t alignment, size_t size)
3990 {
3991  return _aligned_malloc(size, alignment);
3992 }
3993 #else
3994 void *vma_aligned_alloc(size_t alignment, size_t size)
3995 {
3996  return aligned_alloc(alignment, size);
3997 }
3998 #endif
3999 
4000 // If your compiler is not compatible with C++11 and definition of
4001 // aligned_alloc() function is missing, uncommeting following line may help:
4002 
4003 //#include <malloc.h>
4004 
4005 // Normal assert to check for programmer's errors, especially in Debug configuration.
4006 #ifndef VMA_ASSERT
4007  #ifdef NDEBUG
4008  #define VMA_ASSERT(expr)
4009  #else
4010  #define VMA_ASSERT(expr) assert(expr)
4011  #endif
4012 #endif
4013 
4014 // Assert that will be called very often, like inside data structures e.g. operator[].
4015 // Making it non-empty can make program slow.
4016 #ifndef VMA_HEAVY_ASSERT
4017  #ifdef NDEBUG
4018  #define VMA_HEAVY_ASSERT(expr)
4019  #else
4020  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4021  #endif
4022 #endif
4023 
4024 #ifndef VMA_ALIGN_OF
4025  #define VMA_ALIGN_OF(type) (__alignof(type))
4026 #endif
4027 
4028 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4029  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4030 #endif
4031 
4032 #ifndef VMA_SYSTEM_FREE
4033  #if defined(_WIN32)
4034  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
4035  #else
4036  #define VMA_SYSTEM_FREE(ptr) free(ptr)
4037  #endif
4038 #endif
4039 
4040 #ifndef VMA_MIN
4041  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4042 #endif
4043 
4044 #ifndef VMA_MAX
4045  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4046 #endif
4047 
4048 #ifndef VMA_SWAP
4049  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4050 #endif
4051 
4052 #ifndef VMA_SORT
4053  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4054 #endif
4055 
4056 #ifndef VMA_DEBUG_LOG
4057  #define VMA_DEBUG_LOG(format, ...)
4058  /*
4059  #define VMA_DEBUG_LOG(format, ...) do { \
4060  printf(format, __VA_ARGS__); \
4061  printf("\n"); \
4062  } while(false)
4063  */
4064 #endif
4065 
4066 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4067 #if VMA_STATS_STRING_ENABLED
4068  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
4069  {
4070  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4071  }
4072  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
4073  {
4074  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4075  }
4076  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
4077  {
4078  snprintf(outStr, strLen, "%p", ptr);
4079  }
4080 #endif
4081 
4082 #ifndef VMA_MUTEX
4083  class VmaMutex
4084  {
4085  public:
4086  void Lock() { m_Mutex.lock(); }
4087  void Unlock() { m_Mutex.unlock(); }
4088  bool TryLock() { return m_Mutex.try_lock(); }
4089  private:
4090  std::mutex m_Mutex;
4091  };
4092  #define VMA_MUTEX VmaMutex
4093 #endif
4094 
4095 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4096 #ifndef VMA_RW_MUTEX
4097  #if VMA_USE_STL_SHARED_MUTEX
4098  // Use std::shared_mutex from C++17.
4099  #include <shared_mutex>
4100  class VmaRWMutex
4101  {
4102  public:
4103  void LockRead() { m_Mutex.lock_shared(); }
4104  void UnlockRead() { m_Mutex.unlock_shared(); }
4105  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4106  void LockWrite() { m_Mutex.lock(); }
4107  void UnlockWrite() { m_Mutex.unlock(); }
4108  bool TryLockWrite() { return m_Mutex.try_lock(); }
4109  private:
4110  std::shared_mutex m_Mutex;
4111  };
4112  #define VMA_RW_MUTEX VmaRWMutex
4113  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4114  // Use SRWLOCK from WinAPI.
4115  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4116  class VmaRWMutex
4117  {
4118  public:
4119  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4120  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4121  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4122  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4123  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4124  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4125  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4126  private:
4127  SRWLOCK m_Lock;
4128  };
4129  #define VMA_RW_MUTEX VmaRWMutex
4130  #else
4131  // Less efficient fallback: Use normal mutex.
4132  class VmaRWMutex
4133  {
4134  public:
4135  void LockRead() { m_Mutex.Lock(); }
4136  void UnlockRead() { m_Mutex.Unlock(); }
4137  bool TryLockRead() { return m_Mutex.TryLock(); }
4138  void LockWrite() { m_Mutex.Lock(); }
4139  void UnlockWrite() { m_Mutex.Unlock(); }
4140  bool TryLockWrite() { return m_Mutex.TryLock(); }
4141  private:
4142  VMA_MUTEX m_Mutex;
4143  };
4144  #define VMA_RW_MUTEX VmaRWMutex
4145  #endif // #if VMA_USE_STL_SHARED_MUTEX
4146 #endif // #ifndef VMA_RW_MUTEX
4147 
4148 /*
4149 If providing your own implementation, you need to implement a subset of std::atomic.
4150 */
4151 #ifndef VMA_ATOMIC_UINT32
4152  #include <atomic>
4153  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4154 #endif
4155 
4156 #ifndef VMA_ATOMIC_UINT64
4157  #include <atomic>
4158  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4159 #endif
4160 
4161 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4162 
4166  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4167 #endif
4168 
4169 #ifndef VMA_DEBUG_ALIGNMENT
4170 
4174  #define VMA_DEBUG_ALIGNMENT (1)
4175 #endif
4176 
4177 #ifndef VMA_DEBUG_MARGIN
4178 
4182  #define VMA_DEBUG_MARGIN (0)
4183 #endif
4184 
4185 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4186 
4190  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4191 #endif
4192 
4193 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4194 
4199  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4200 #endif
4201 
4202 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4203 
4207  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4208 #endif
4209 
4210 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4211 
4215  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4216 #endif
4217 
4218 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4219  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4221 #endif
4222 
4223 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4224  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4226 #endif
4227 
4228 #ifndef VMA_CLASS_NO_COPY
4229  #define VMA_CLASS_NO_COPY(className) \
4230  private: \
4231  className(const className&) = delete; \
4232  className& operator=(const className&) = delete;
4233 #endif
4234 
4235 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4236 
4237 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4238 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4239 
4240 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4241 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4242 
4243 /*******************************************************************************
4244 END OF CONFIGURATION
4245 */
4246 
4247 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4248 
4249 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4250 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4251 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4252 
4253 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4254 
4255 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4256  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4257 
4258 // Returns number of bits set to 1 in (v).
4259 static inline uint32_t VmaCountBitsSet(uint32_t v)
4260 {
4261  uint32_t c = v - ((v >> 1) & 0x55555555);
4262  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4263  c = ((c >> 4) + c) & 0x0F0F0F0F;
4264  c = ((c >> 8) + c) & 0x00FF00FF;
4265  c = ((c >> 16) + c) & 0x0000FFFF;
4266  return c;
4267 }
4268 
4269 /*
4270 Returns true if given number is a power of two.
4271 T must be unsigned integer number or signed integer but always nonnegative.
4272 For 0 returns true.
4273 */
4274 template <typename T>
4275 inline bool VmaIsPow2(T x)
4276 {
4277  return (x & (x-1)) == 0;
4278 }
4279 
4280 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4281 // Use types like uint32_t, uint64_t as T.
4282 template <typename T>
4283 static inline T VmaAlignUp(T val, T alignment)
4284 {
4285  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4286  return (val + alignment - 1) & ~(alignment - 1);
4287 }
4288 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4289 // Use types like uint32_t, uint64_t as T.
4290 template <typename T>
4291 static inline T VmaAlignDown(T val, T alignment)
4292 {
4293  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4294  return val & ~(alignment - 1);
4295 }
4296 
4297 // Division with mathematical rounding to nearest number.
4298 template <typename T>
4299 static inline T VmaRoundDiv(T x, T y)
4300 {
4301  return (x + (y / (T)2)) / y;
4302 }
4303 
4304 // Returns smallest power of 2 greater or equal to v.
4305 static inline uint32_t VmaNextPow2(uint32_t v)
4306 {
4307  v--;
4308  v |= v >> 1;
4309  v |= v >> 2;
4310  v |= v >> 4;
4311  v |= v >> 8;
4312  v |= v >> 16;
4313  v++;
4314  return v;
4315 }
4316 static inline uint64_t VmaNextPow2(uint64_t v)
4317 {
4318  v--;
4319  v |= v >> 1;
4320  v |= v >> 2;
4321  v |= v >> 4;
4322  v |= v >> 8;
4323  v |= v >> 16;
4324  v |= v >> 32;
4325  v++;
4326  return v;
4327 }
4328 
4329 // Returns largest power of 2 less or equal to v.
4330 static inline uint32_t VmaPrevPow2(uint32_t v)
4331 {
4332  v |= v >> 1;
4333  v |= v >> 2;
4334  v |= v >> 4;
4335  v |= v >> 8;
4336  v |= v >> 16;
4337  v = v ^ (v >> 1);
4338  return v;
4339 }
4340 static inline uint64_t VmaPrevPow2(uint64_t v)
4341 {
4342  v |= v >> 1;
4343  v |= v >> 2;
4344  v |= v >> 4;
4345  v |= v >> 8;
4346  v |= v >> 16;
4347  v |= v >> 32;
4348  v = v ^ (v >> 1);
4349  return v;
4350 }
4351 
4352 static inline bool VmaStrIsEmpty(const char* pStr)
4353 {
4354  return pStr == VMA_NULL || *pStr == '\0';
4355 }
4356 
4357 #if VMA_STATS_STRING_ENABLED
4358 
4359 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4360 {
4361  switch(algorithm)
4362  {
4364  return "Linear";
4366  return "Buddy";
4367  case 0:
4368  return "Default";
4369  default:
4370  VMA_ASSERT(0);
4371  return "";
4372  }
4373 }
4374 
4375 #endif // #if VMA_STATS_STRING_ENABLED
4376 
4377 #ifndef VMA_SORT
4378 
4379 template<typename Iterator, typename Compare>
4380 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4381 {
4382  Iterator centerValue = end; --centerValue;
4383  Iterator insertIndex = beg;
4384  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4385  {
4386  if(cmp(*memTypeIndex, *centerValue))
4387  {
4388  if(insertIndex != memTypeIndex)
4389  {
4390  VMA_SWAP(*memTypeIndex, *insertIndex);
4391  }
4392  ++insertIndex;
4393  }
4394  }
4395  if(insertIndex != centerValue)
4396  {
4397  VMA_SWAP(*insertIndex, *centerValue);
4398  }
4399  return insertIndex;
4400 }
4401 
4402 template<typename Iterator, typename Compare>
4403 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4404 {
4405  if(beg < end)
4406  {
4407  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4408  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4409  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4410  }
4411 }
4412 
4413 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4414 
4415 #endif // #ifndef VMA_SORT
4416 
4417 /*
4418 Returns true if two memory blocks occupy overlapping pages.
4419 ResourceA must be in less memory offset than ResourceB.
4420 
4421 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4422 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4423 */
4424 static inline bool VmaBlocksOnSamePage(
4425  VkDeviceSize resourceAOffset,
4426  VkDeviceSize resourceASize,
4427  VkDeviceSize resourceBOffset,
4428  VkDeviceSize pageSize)
4429 {
4430  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4431  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4432  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4433  VkDeviceSize resourceBStart = resourceBOffset;
4434  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4435  return resourceAEndPage == resourceBStartPage;
4436 }
4437 
4438 enum VmaSuballocationType
4439 {
4440  VMA_SUBALLOCATION_TYPE_FREE = 0,
4441  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4442  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4443  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4444  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4445  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4446  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4447 };
4448 
4449 /*
4450 Returns true if given suballocation types could conflict and must respect
4451 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4452 or linear image and another one is optimal image. If type is unknown, behave
4453 conservatively.
4454 */
4455 static inline bool VmaIsBufferImageGranularityConflict(
4456  VmaSuballocationType suballocType1,
4457  VmaSuballocationType suballocType2)
4458 {
4459  if(suballocType1 > suballocType2)
4460  {
4461  VMA_SWAP(suballocType1, suballocType2);
4462  }
4463 
4464  switch(suballocType1)
4465  {
4466  case VMA_SUBALLOCATION_TYPE_FREE:
4467  return false;
4468  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4469  return true;
4470  case VMA_SUBALLOCATION_TYPE_BUFFER:
4471  return
4472  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4473  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4474  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4475  return
4476  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4477  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4478  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4479  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4480  return
4481  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4482  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4483  return false;
4484  default:
4485  VMA_ASSERT(0);
4486  return true;
4487  }
4488 }
4489 
4490 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4491 {
4492 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4493  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4494  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4495  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4496  {
4497  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4498  }
4499 #else
4500  // no-op
4501 #endif
4502 }
4503 
4504 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4505 {
4506 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4507  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4508  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4509  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4510  {
4511  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4512  {
4513  return false;
4514  }
4515  }
4516 #endif
4517  return true;
4518 }
4519 
4520 /*
4521 Fills structure with parameters of an example buffer to be used for transfers
4522 during GPU memory defragmentation.
4523 */
4524 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4525 {
4526  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4527  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4528  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4529  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4530 }
4531 
4532 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4533 struct VmaMutexLock
4534 {
4535  VMA_CLASS_NO_COPY(VmaMutexLock)
4536 public:
4537  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4538  m_pMutex(useMutex ? &mutex : VMA_NULL)
4539  { if(m_pMutex) { m_pMutex->Lock(); } }
4540  ~VmaMutexLock()
4541  { if(m_pMutex) { m_pMutex->Unlock(); } }
4542 private:
4543  VMA_MUTEX* m_pMutex;
4544 };
4545 
4546 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4547 struct VmaMutexLockRead
4548 {
4549  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4550 public:
4551  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4552  m_pMutex(useMutex ? &mutex : VMA_NULL)
4553  { if(m_pMutex) { m_pMutex->LockRead(); } }
4554  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4555 private:
4556  VMA_RW_MUTEX* m_pMutex;
4557 };
4558 
4559 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4560 struct VmaMutexLockWrite
4561 {
4562  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4563 public:
4564  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4565  m_pMutex(useMutex ? &mutex : VMA_NULL)
4566  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4567  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4568 private:
4569  VMA_RW_MUTEX* m_pMutex;
4570 };
4571 
4572 #if VMA_DEBUG_GLOBAL_MUTEX
4573  static VMA_MUTEX gDebugGlobalMutex;
4574  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4575 #else
4576  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4577 #endif
4578 
4579 // Minimum size of a free suballocation to register it in the free suballocation collection.
4580 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4581 
4582 /*
4583 Performs binary search and returns iterator to first element that is greater or
4584 equal to (key), according to comparison (cmp).
4585 
4586 Cmp should return true if first argument is less than second argument.
4587 
4588 Returned value is the found element, if present in the collection or place where
4589 new element with value (key) should be inserted.
4590 */
4591 template <typename CmpLess, typename IterT, typename KeyT>
4592 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4593 {
4594  size_t down = 0, up = (end - beg);
4595  while(down < up)
4596  {
4597  const size_t mid = (down + up) / 2;
4598  if(cmp(*(beg+mid), key))
4599  {
4600  down = mid + 1;
4601  }
4602  else
4603  {
4604  up = mid;
4605  }
4606  }
4607  return beg + down;
4608 }
4609 
4610 template<typename CmpLess, typename IterT, typename KeyT>
4611 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4612 {
4613  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4614  beg, end, value, cmp);
4615  if(it == end ||
4616  (!cmp(*it, value) && !cmp(value, *it)))
4617  {
4618  return it;
4619  }
4620  return end;
4621 }
4622 
4623 /*
4624 Returns true if all pointers in the array are not-null and unique.
4625 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4626 T must be pointer type, e.g. VmaAllocation, VmaPool.
4627 */
4628 template<typename T>
4629 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4630 {
4631  for(uint32_t i = 0; i < count; ++i)
4632  {
4633  const T iPtr = arr[i];
4634  if(iPtr == VMA_NULL)
4635  {
4636  return false;
4637  }
4638  for(uint32_t j = i + 1; j < count; ++j)
4639  {
4640  if(iPtr == arr[j])
4641  {
4642  return false;
4643  }
4644  }
4645  }
4646  return true;
4647 }
4648 
4649 template<typename MainT, typename NewT>
4650 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4651 {
4652  newStruct->pNext = mainStruct->pNext;
4653  mainStruct->pNext = newStruct;
4654 }
4655 
4657 // Memory allocation
4658 
4659 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4660 {
4661  void* result = VMA_NULL;
4662  if((pAllocationCallbacks != VMA_NULL) &&
4663  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4664  {
4665  result = (*pAllocationCallbacks->pfnAllocation)(
4666  pAllocationCallbacks->pUserData,
4667  size,
4668  alignment,
4669  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4670  }
4671  else
4672  {
4673  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4674  }
4675  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4676  return result;
4677 }
4678 
4679 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4680 {
4681  if((pAllocationCallbacks != VMA_NULL) &&
4682  (pAllocationCallbacks->pfnFree != VMA_NULL))
4683  {
4684  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4685  }
4686  else
4687  {
4688  VMA_SYSTEM_FREE(ptr);
4689  }
4690 }
4691 
4692 template<typename T>
4693 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4694 {
4695  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4696 }
4697 
4698 template<typename T>
4699 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4700 {
4701  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4702 }
4703 
4704 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4705 
4706 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4707 
4708 template<typename T>
4709 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4710 {
4711  ptr->~T();
4712  VmaFree(pAllocationCallbacks, ptr);
4713 }
4714 
4715 template<typename T>
4716 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4717 {
4718  if(ptr != VMA_NULL)
4719  {
4720  for(size_t i = count; i--; )
4721  {
4722  ptr[i].~T();
4723  }
4724  VmaFree(pAllocationCallbacks, ptr);
4725  }
4726 }
4727 
4728 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4729 {
4730  if(srcStr != VMA_NULL)
4731  {
4732  const size_t len = strlen(srcStr);
4733  char* const result = vma_new_array(allocs, char, len + 1);
4734  memcpy(result, srcStr, len + 1);
4735  return result;
4736  }
4737  else
4738  {
4739  return VMA_NULL;
4740  }
4741 }
4742 
4743 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4744 {
4745  if(str != VMA_NULL)
4746  {
4747  const size_t len = strlen(str);
4748  vma_delete_array(allocs, str, len + 1);
4749  }
4750 }
4751 
4752 // STL-compatible allocator.
4753 template<typename T>
4754 class VmaStlAllocator
4755 {
4756 public:
4757  const VkAllocationCallbacks* const m_pCallbacks;
4758  typedef T value_type;
4759 
4760  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4761  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4762 
4763  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4764  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4765 
4766  template<typename U>
4767  bool operator==(const VmaStlAllocator<U>& rhs) const
4768  {
4769  return m_pCallbacks == rhs.m_pCallbacks;
4770  }
4771  template<typename U>
4772  bool operator!=(const VmaStlAllocator<U>& rhs) const
4773  {
4774  return m_pCallbacks != rhs.m_pCallbacks;
4775  }
4776 
4777  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4778 };
4779 
4780 #if VMA_USE_STL_VECTOR
4781 
4782 #define VmaVector std::vector
4783 
4784 template<typename T, typename allocatorT>
4785 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4786 {
4787  vec.insert(vec.begin() + index, item);
4788 }
4789 
4790 template<typename T, typename allocatorT>
4791 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4792 {
4793  vec.erase(vec.begin() + index);
4794 }
4795 
4796 #else // #if VMA_USE_STL_VECTOR
4797 
4798 /* Class with interface compatible with subset of std::vector.
4799 T must be POD because constructors and destructors are not called and memcpy is
4800 used for these objects. */
4801 template<typename T, typename AllocatorT>
4802 class VmaVector
4803 {
4804 public:
4805  typedef T value_type;
4806 
4807  VmaVector(const AllocatorT& allocator) :
4808  m_Allocator(allocator),
4809  m_pArray(VMA_NULL),
4810  m_Count(0),
4811  m_Capacity(0)
4812  {
4813  }
4814 
4815  VmaVector(size_t count, const AllocatorT& allocator) :
4816  m_Allocator(allocator),
4817  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4818  m_Count(count),
4819  m_Capacity(count)
4820  {
4821  }
4822 
4823  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4824  // value is unused.
4825  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4826  : VmaVector(count, allocator) {}
4827 
4828  VmaVector(const VmaVector<T, AllocatorT>& src) :
4829  m_Allocator(src.m_Allocator),
4830  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4831  m_Count(src.m_Count),
4832  m_Capacity(src.m_Count)
4833  {
4834  if(m_Count != 0)
4835  {
4836  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4837  }
4838  }
4839 
4840  ~VmaVector()
4841  {
4842  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4843  }
4844 
4845  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4846  {
4847  if(&rhs != this)
4848  {
4849  resize(rhs.m_Count);
4850  if(m_Count != 0)
4851  {
4852  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4853  }
4854  }
4855  return *this;
4856  }
4857 
4858  bool empty() const { return m_Count == 0; }
4859  size_t size() const { return m_Count; }
4860  T* data() { return m_pArray; }
4861  const T* data() const { return m_pArray; }
4862 
4863  T& operator[](size_t index)
4864  {
4865  VMA_HEAVY_ASSERT(index < m_Count);
4866  return m_pArray[index];
4867  }
4868  const T& operator[](size_t index) const
4869  {
4870  VMA_HEAVY_ASSERT(index < m_Count);
4871  return m_pArray[index];
4872  }
4873 
4874  T& front()
4875  {
4876  VMA_HEAVY_ASSERT(m_Count > 0);
4877  return m_pArray[0];
4878  }
4879  const T& front() const
4880  {
4881  VMA_HEAVY_ASSERT(m_Count > 0);
4882  return m_pArray[0];
4883  }
4884  T& back()
4885  {
4886  VMA_HEAVY_ASSERT(m_Count > 0);
4887  return m_pArray[m_Count - 1];
4888  }
4889  const T& back() const
4890  {
4891  VMA_HEAVY_ASSERT(m_Count > 0);
4892  return m_pArray[m_Count - 1];
4893  }
4894 
4895  void reserve(size_t newCapacity, bool freeMemory = false)
4896  {
4897  newCapacity = VMA_MAX(newCapacity, m_Count);
4898 
4899  if((newCapacity < m_Capacity) && !freeMemory)
4900  {
4901  newCapacity = m_Capacity;
4902  }
4903 
4904  if(newCapacity != m_Capacity)
4905  {
4906  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4907  if(m_Count != 0)
4908  {
4909  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4910  }
4911  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4912  m_Capacity = newCapacity;
4913  m_pArray = newArray;
4914  }
4915  }
4916 
4917  void resize(size_t newCount, bool freeMemory = false)
4918  {
4919  size_t newCapacity = m_Capacity;
4920  if(newCount > m_Capacity)
4921  {
4922  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4923  }
4924  else if(freeMemory)
4925  {
4926  newCapacity = newCount;
4927  }
4928 
4929  if(newCapacity != m_Capacity)
4930  {
4931  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4932  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4933  if(elementsToCopy != 0)
4934  {
4935  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4936  }
4937  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4938  m_Capacity = newCapacity;
4939  m_pArray = newArray;
4940  }
4941 
4942  m_Count = newCount;
4943  }
4944 
4945  void clear(bool freeMemory = false)
4946  {
4947  resize(0, freeMemory);
4948  }
4949 
4950  void insert(size_t index, const T& src)
4951  {
4952  VMA_HEAVY_ASSERT(index <= m_Count);
4953  const size_t oldCount = size();
4954  resize(oldCount + 1);
4955  if(index < oldCount)
4956  {
4957  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4958  }
4959  m_pArray[index] = src;
4960  }
4961 
4962  void remove(size_t index)
4963  {
4964  VMA_HEAVY_ASSERT(index < m_Count);
4965  const size_t oldCount = size();
4966  if(index < oldCount - 1)
4967  {
4968  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4969  }
4970  resize(oldCount - 1);
4971  }
4972 
4973  void push_back(const T& src)
4974  {
4975  const size_t newIndex = size();
4976  resize(newIndex + 1);
4977  m_pArray[newIndex] = src;
4978  }
4979 
4980  void pop_back()
4981  {
4982  VMA_HEAVY_ASSERT(m_Count > 0);
4983  resize(size() - 1);
4984  }
4985 
4986  void push_front(const T& src)
4987  {
4988  insert(0, src);
4989  }
4990 
4991  void pop_front()
4992  {
4993  VMA_HEAVY_ASSERT(m_Count > 0);
4994  remove(0);
4995  }
4996 
4997  typedef T* iterator;
4998 
4999  iterator begin() { return m_pArray; }
5000  iterator end() { return m_pArray + m_Count; }
5001 
5002 private:
5003  AllocatorT m_Allocator;
5004  T* m_pArray;
5005  size_t m_Count;
5006  size_t m_Capacity;
5007 };
5008 
5009 template<typename T, typename allocatorT>
5010 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5011 {
5012  vec.insert(index, item);
5013 }
5014 
5015 template<typename T, typename allocatorT>
5016 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5017 {
5018  vec.remove(index);
5019 }
5020 
5021 #endif // #if VMA_USE_STL_VECTOR
5022 
5023 template<typename CmpLess, typename VectorT>
5024 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5025 {
5026  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5027  vector.data(),
5028  vector.data() + vector.size(),
5029  value,
5030  CmpLess()) - vector.data();
5031  VmaVectorInsert(vector, indexToInsert, value);
5032  return indexToInsert;
5033 }
5034 
5035 template<typename CmpLess, typename VectorT>
5036 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5037 {
5038  CmpLess comparator;
5039  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5040  vector.begin(),
5041  vector.end(),
5042  value,
5043  comparator);
5044  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5045  {
5046  size_t indexToRemove = it - vector.begin();
5047  VmaVectorRemove(vector, indexToRemove);
5048  return true;
5049  }
5050  return false;
5051 }
5052 
5054 // class VmaSmallVector
5055 
5056 /*
5057 This is a vector (a variable-sized array), optimized for the case when the array is small.
5058 
5059 It contains some number of elements in-place, which allows it to avoid heap allocation
5060 when the actual number of elements is below that threshold. This allows normal "small"
5061 cases to be fast without losing generality for large inputs.
5062 */
5063 
5064 template<typename T, typename AllocatorT, size_t N>
5065 class VmaSmallVector
5066 {
5067 public:
5068  typedef T value_type;
5069 
5070  VmaSmallVector(const AllocatorT& allocator) :
5071  m_Count(0),
5072  m_DynamicArray(allocator)
5073  {
5074  }
5075  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5076  m_Count(count),
5077  m_DynamicArray(count > N ? count : 0, allocator)
5078  {
5079  }
5080  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5081  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5082  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5083  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5084 
5085  bool empty() const { return m_Count == 0; }
5086  size_t size() const { return m_Count; }
5087  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5088  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5089 
5090  T& operator[](size_t index)
5091  {
5092  VMA_HEAVY_ASSERT(index < m_Count);
5093  return data()[index];
5094  }
5095  const T& operator[](size_t index) const
5096  {
5097  VMA_HEAVY_ASSERT(index < m_Count);
5098  return data()[index];
5099  }
5100 
5101  T& front()
5102  {
5103  VMA_HEAVY_ASSERT(m_Count > 0);
5104  return data()[0];
5105  }
5106  const T& front() const
5107  {
5108  VMA_HEAVY_ASSERT(m_Count > 0);
5109  return data()[0];
5110  }
5111  T& back()
5112  {
5113  VMA_HEAVY_ASSERT(m_Count > 0);
5114  return data()[m_Count - 1];
5115  }
5116  const T& back() const
5117  {
5118  VMA_HEAVY_ASSERT(m_Count > 0);
5119  return data()[m_Count - 1];
5120  }
5121 
5122  void resize(size_t newCount, bool freeMemory = false)
5123  {
5124  if(newCount > N && m_Count > N)
5125  {
5126  // Any direction, staying in m_DynamicArray
5127  m_DynamicArray.resize(newCount, freeMemory);
5128  }
5129  else if(newCount > N && m_Count <= N)
5130  {
5131  // Growing, moving from m_StaticArray to m_DynamicArray
5132  m_DynamicArray.resize(newCount, freeMemory);
5133  if(m_Count > 0)
5134  {
5135  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5136  }
5137  }
5138  else if(newCount <= N && m_Count > N)
5139  {
5140  // Shrinking, moving from m_DynamicArray to m_StaticArray
5141  if(newCount > 0)
5142  {
5143  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5144  }
5145  m_DynamicArray.resize(0, freeMemory);
5146  }
5147  else
5148  {
5149  // Any direction, staying in m_StaticArray - nothing to do here
5150  }
5151  m_Count = newCount;
5152  }
5153 
5154  void clear(bool freeMemory = false)
5155  {
5156  m_DynamicArray.clear(freeMemory);
5157  m_Count = 0;
5158  }
5159 
5160  void insert(size_t index, const T& src)
5161  {
5162  VMA_HEAVY_ASSERT(index <= m_Count);
5163  const size_t oldCount = size();
5164  resize(oldCount + 1);
5165  T* const dataPtr = data();
5166  if(index < oldCount)
5167  {
5168  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5169  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5170  }
5171  dataPtr[index] = src;
5172  }
5173 
5174  void remove(size_t index)
5175  {
5176  VMA_HEAVY_ASSERT(index < m_Count);
5177  const size_t oldCount = size();
5178  if(index < oldCount - 1)
5179  {
5180  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5181  T* const dataPtr = data();
5182  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5183  }
5184  resize(oldCount - 1);
5185  }
5186 
5187  void push_back(const T& src)
5188  {
5189  const size_t newIndex = size();
5190  resize(newIndex + 1);
5191  data()[newIndex] = src;
5192  }
5193 
5194  void pop_back()
5195  {
5196  VMA_HEAVY_ASSERT(m_Count > 0);
5197  resize(size() - 1);
5198  }
5199 
5200  void push_front(const T& src)
5201  {
5202  insert(0, src);
5203  }
5204 
5205  void pop_front()
5206  {
5207  VMA_HEAVY_ASSERT(m_Count > 0);
5208  remove(0);
5209  }
5210 
5211  typedef T* iterator;
5212 
5213  iterator begin() { return data(); }
5214  iterator end() { return data() + m_Count; }
5215 
5216 private:
5217  size_t m_Count;
5218  T m_StaticArray[N]; // Used when m_Size <= N
5219  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5220 };
5221 
5223 // class VmaPoolAllocator
5224 
5225 /*
5226 Allocator for objects of type T using a list of arrays (pools) to speed up
5227 allocation. Number of elements that can be allocated is not bounded because
5228 allocator can create multiple blocks.
5229 */
5230 template<typename T>
5231 class VmaPoolAllocator
5232 {
5233  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5234 public:
5235  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5236  ~VmaPoolAllocator();
5237  template<typename... Types> T* Alloc(Types... args);
5238  void Free(T* ptr);
5239 
5240 private:
5241  union Item
5242  {
5243  uint32_t NextFreeIndex;
5244  alignas(T) char Value[sizeof(T)];
5245  };
5246 
5247  struct ItemBlock
5248  {
5249  Item* pItems;
5250  uint32_t Capacity;
5251  uint32_t FirstFreeIndex;
5252  };
5253 
5254  const VkAllocationCallbacks* m_pAllocationCallbacks;
5255  const uint32_t m_FirstBlockCapacity;
5256  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5257 
5258  ItemBlock& CreateNewBlock();
5259 };
5260 
5261 template<typename T>
5262 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5263  m_pAllocationCallbacks(pAllocationCallbacks),
5264  m_FirstBlockCapacity(firstBlockCapacity),
5265  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5266 {
5267  VMA_ASSERT(m_FirstBlockCapacity > 1);
5268 }
5269 
5270 template<typename T>
5271 VmaPoolAllocator<T>::~VmaPoolAllocator()
5272 {
5273  for(size_t i = m_ItemBlocks.size(); i--; )
5274  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5275  m_ItemBlocks.clear();
5276 }
5277 
5278 template<typename T>
5279 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5280 {
5281  for(size_t i = m_ItemBlocks.size(); i--; )
5282  {
5283  ItemBlock& block = m_ItemBlocks[i];
5284  // This block has some free items: Use first one.
5285  if(block.FirstFreeIndex != UINT32_MAX)
5286  {
5287  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5288  block.FirstFreeIndex = pItem->NextFreeIndex;
5289  T* result = (T*)&pItem->Value;
5290  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5291  return result;
5292  }
5293  }
5294 
5295  // No block has free item: Create new one and use it.
5296  ItemBlock& newBlock = CreateNewBlock();
5297  Item* const pItem = &newBlock.pItems[0];
5298  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5299  T* result = (T*)&pItem->Value;
5300  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5301  return result;
5302 }
5303 
5304 template<typename T>
5305 void VmaPoolAllocator<T>::Free(T* ptr)
5306 {
5307  // Search all memory blocks to find ptr.
5308  for(size_t i = m_ItemBlocks.size(); i--; )
5309  {
5310  ItemBlock& block = m_ItemBlocks[i];
5311 
5312  // Casting to union.
5313  Item* pItemPtr;
5314  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5315 
5316  // Check if pItemPtr is in address range of this block.
5317  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5318  {
5319  ptr->~T(); // Explicit destructor call.
5320  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5321  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5322  block.FirstFreeIndex = index;
5323  return;
5324  }
5325  }
5326  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5327 }
5328 
5329 template<typename T>
5330 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5331 {
5332  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5333  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5334 
5335  const ItemBlock newBlock = {
5336  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5337  newBlockCapacity,
5338  0 };
5339 
5340  m_ItemBlocks.push_back(newBlock);
5341 
5342  // Setup singly-linked list of all free items in this block.
5343  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5344  newBlock.pItems[i].NextFreeIndex = i + 1;
5345  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5346  return m_ItemBlocks.back();
5347 }
5348 
5350 // class VmaRawList, VmaList
5351 
5352 #if VMA_USE_STL_LIST
5353 
5354 #define VmaList std::list
5355 
5356 #else // #if VMA_USE_STL_LIST
5357 
5358 template<typename T>
5359 struct VmaListItem
5360 {
5361  VmaListItem* pPrev;
5362  VmaListItem* pNext;
5363  T Value;
5364 };
5365 
5366 // Doubly linked list.
5367 template<typename T>
5368 class VmaRawList
5369 {
5370  VMA_CLASS_NO_COPY(VmaRawList)
5371 public:
5372  typedef VmaListItem<T> ItemType;
5373 
5374  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5375  ~VmaRawList();
5376  void Clear();
5377 
5378  size_t GetCount() const { return m_Count; }
5379  bool IsEmpty() const { return m_Count == 0; }
5380 
5381  ItemType* Front() { return m_pFront; }
5382  const ItemType* Front() const { return m_pFront; }
5383  ItemType* Back() { return m_pBack; }
5384  const ItemType* Back() const { return m_pBack; }
5385 
5386  ItemType* PushBack();
5387  ItemType* PushFront();
5388  ItemType* PushBack(const T& value);
5389  ItemType* PushFront(const T& value);
5390  void PopBack();
5391  void PopFront();
5392 
5393  // Item can be null - it means PushBack.
5394  ItemType* InsertBefore(ItemType* pItem);
5395  // Item can be null - it means PushFront.
5396  ItemType* InsertAfter(ItemType* pItem);
5397 
5398  ItemType* InsertBefore(ItemType* pItem, const T& value);
5399  ItemType* InsertAfter(ItemType* pItem, const T& value);
5400 
5401  void Remove(ItemType* pItem);
5402 
5403 private:
5404  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5405  VmaPoolAllocator<ItemType> m_ItemAllocator;
5406  ItemType* m_pFront;
5407  ItemType* m_pBack;
5408  size_t m_Count;
5409 };
5410 
5411 template<typename T>
5412 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5413  m_pAllocationCallbacks(pAllocationCallbacks),
5414  m_ItemAllocator(pAllocationCallbacks, 128),
5415  m_pFront(VMA_NULL),
5416  m_pBack(VMA_NULL),
5417  m_Count(0)
5418 {
5419 }
5420 
5421 template<typename T>
5422 VmaRawList<T>::~VmaRawList()
5423 {
5424  // Intentionally not calling Clear, because that would be unnecessary
5425  // computations to return all items to m_ItemAllocator as free.
5426 }
5427 
5428 template<typename T>
5429 void VmaRawList<T>::Clear()
5430 {
5431  if(IsEmpty() == false)
5432  {
5433  ItemType* pItem = m_pBack;
5434  while(pItem != VMA_NULL)
5435  {
5436  ItemType* const pPrevItem = pItem->pPrev;
5437  m_ItemAllocator.Free(pItem);
5438  pItem = pPrevItem;
5439  }
5440  m_pFront = VMA_NULL;
5441  m_pBack = VMA_NULL;
5442  m_Count = 0;
5443  }
5444 }
5445 
5446 template<typename T>
5447 VmaListItem<T>* VmaRawList<T>::PushBack()
5448 {
5449  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5450  pNewItem->pNext = VMA_NULL;
5451  if(IsEmpty())
5452  {
5453  pNewItem->pPrev = VMA_NULL;
5454  m_pFront = pNewItem;
5455  m_pBack = pNewItem;
5456  m_Count = 1;
5457  }
5458  else
5459  {
5460  pNewItem->pPrev = m_pBack;
5461  m_pBack->pNext = pNewItem;
5462  m_pBack = pNewItem;
5463  ++m_Count;
5464  }
5465  return pNewItem;
5466 }
5467 
5468 template<typename T>
5469 VmaListItem<T>* VmaRawList<T>::PushFront()
5470 {
5471  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5472  pNewItem->pPrev = VMA_NULL;
5473  if(IsEmpty())
5474  {
5475  pNewItem->pNext = VMA_NULL;
5476  m_pFront = pNewItem;
5477  m_pBack = pNewItem;
5478  m_Count = 1;
5479  }
5480  else
5481  {
5482  pNewItem->pNext = m_pFront;
5483  m_pFront->pPrev = pNewItem;
5484  m_pFront = pNewItem;
5485  ++m_Count;
5486  }
5487  return pNewItem;
5488 }
5489 
5490 template<typename T>
5491 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5492 {
5493  ItemType* const pNewItem = PushBack();
5494  pNewItem->Value = value;
5495  return pNewItem;
5496 }
5497 
5498 template<typename T>
5499 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5500 {
5501  ItemType* const pNewItem = PushFront();
5502  pNewItem->Value = value;
5503  return pNewItem;
5504 }
5505 
5506 template<typename T>
5507 void VmaRawList<T>::PopBack()
5508 {
5509  VMA_HEAVY_ASSERT(m_Count > 0);
5510  ItemType* const pBackItem = m_pBack;
5511  ItemType* const pPrevItem = pBackItem->pPrev;
5512  if(pPrevItem != VMA_NULL)
5513  {
5514  pPrevItem->pNext = VMA_NULL;
5515  }
5516  m_pBack = pPrevItem;
5517  m_ItemAllocator.Free(pBackItem);
5518  --m_Count;
5519 }
5520 
5521 template<typename T>
5522 void VmaRawList<T>::PopFront()
5523 {
5524  VMA_HEAVY_ASSERT(m_Count > 0);
5525  ItemType* const pFrontItem = m_pFront;
5526  ItemType* const pNextItem = pFrontItem->pNext;
5527  if(pNextItem != VMA_NULL)
5528  {
5529  pNextItem->pPrev = VMA_NULL;
5530  }
5531  m_pFront = pNextItem;
5532  m_ItemAllocator.Free(pFrontItem);
5533  --m_Count;
5534 }
5535 
5536 template<typename T>
5537 void VmaRawList<T>::Remove(ItemType* pItem)
5538 {
5539  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5540  VMA_HEAVY_ASSERT(m_Count > 0);
5541 
5542  if(pItem->pPrev != VMA_NULL)
5543  {
5544  pItem->pPrev->pNext = pItem->pNext;
5545  }
5546  else
5547  {
5548  VMA_HEAVY_ASSERT(m_pFront == pItem);
5549  m_pFront = pItem->pNext;
5550  }
5551 
5552  if(pItem->pNext != VMA_NULL)
5553  {
5554  pItem->pNext->pPrev = pItem->pPrev;
5555  }
5556  else
5557  {
5558  VMA_HEAVY_ASSERT(m_pBack == pItem);
5559  m_pBack = pItem->pPrev;
5560  }
5561 
5562  m_ItemAllocator.Free(pItem);
5563  --m_Count;
5564 }
5565 
5566 template<typename T>
5567 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5568 {
5569  if(pItem != VMA_NULL)
5570  {
5571  ItemType* const prevItem = pItem->pPrev;
5572  ItemType* const newItem = m_ItemAllocator.Alloc();
5573  newItem->pPrev = prevItem;
5574  newItem->pNext = pItem;
5575  pItem->pPrev = newItem;
5576  if(prevItem != VMA_NULL)
5577  {
5578  prevItem->pNext = newItem;
5579  }
5580  else
5581  {
5582  VMA_HEAVY_ASSERT(m_pFront == pItem);
5583  m_pFront = newItem;
5584  }
5585  ++m_Count;
5586  return newItem;
5587  }
5588  else
5589  return PushBack();
5590 }
5591 
5592 template<typename T>
5593 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5594 {
5595  if(pItem != VMA_NULL)
5596  {
5597  ItemType* const nextItem = pItem->pNext;
5598  ItemType* const newItem = m_ItemAllocator.Alloc();
5599  newItem->pNext = nextItem;
5600  newItem->pPrev = pItem;
5601  pItem->pNext = newItem;
5602  if(nextItem != VMA_NULL)
5603  {
5604  nextItem->pPrev = newItem;
5605  }
5606  else
5607  {
5608  VMA_HEAVY_ASSERT(m_pBack == pItem);
5609  m_pBack = newItem;
5610  }
5611  ++m_Count;
5612  return newItem;
5613  }
5614  else
5615  return PushFront();
5616 }
5617 
5618 template<typename T>
5619 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5620 {
5621  ItemType* const newItem = InsertBefore(pItem);
5622  newItem->Value = value;
5623  return newItem;
5624 }
5625 
5626 template<typename T>
5627 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5628 {
5629  ItemType* const newItem = InsertAfter(pItem);
5630  newItem->Value = value;
5631  return newItem;
5632 }
5633 
5634 template<typename T, typename AllocatorT>
5635 class VmaList
5636 {
5637  VMA_CLASS_NO_COPY(VmaList)
5638 public:
5639  class iterator
5640  {
5641  public:
5642  iterator() :
5643  m_pList(VMA_NULL),
5644  m_pItem(VMA_NULL)
5645  {
5646  }
5647 
5648  T& operator*() const
5649  {
5650  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5651  return m_pItem->Value;
5652  }
5653  T* operator->() const
5654  {
5655  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5656  return &m_pItem->Value;
5657  }
5658 
5659  iterator& operator++()
5660  {
5661  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5662  m_pItem = m_pItem->pNext;
5663  return *this;
5664  }
5665  iterator& operator--()
5666  {
5667  if(m_pItem != VMA_NULL)
5668  {
5669  m_pItem = m_pItem->pPrev;
5670  }
5671  else
5672  {
5673  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5674  m_pItem = m_pList->Back();
5675  }
5676  return *this;
5677  }
5678 
5679  iterator operator++(int)
5680  {
5681  iterator result = *this;
5682  ++*this;
5683  return result;
5684  }
5685  iterator operator--(int)
5686  {
5687  iterator result = *this;
5688  --*this;
5689  return result;
5690  }
5691 
5692  bool operator==(const iterator& rhs) const
5693  {
5694  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5695  return m_pItem == rhs.m_pItem;
5696  }
5697  bool operator!=(const iterator& rhs) const
5698  {
5699  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5700  return m_pItem != rhs.m_pItem;
5701  }
5702 
5703  private:
5704  VmaRawList<T>* m_pList;
5705  VmaListItem<T>* m_pItem;
5706 
5707  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5708  m_pList(pList),
5709  m_pItem(pItem)
5710  {
5711  }
5712 
5713  friend class VmaList<T, AllocatorT>;
5714  };
5715 
5716  class const_iterator
5717  {
5718  public:
5719  const_iterator() :
5720  m_pList(VMA_NULL),
5721  m_pItem(VMA_NULL)
5722  {
5723  }
5724 
5725  const_iterator(const iterator& src) :
5726  m_pList(src.m_pList),
5727  m_pItem(src.m_pItem)
5728  {
5729  }
5730 
5731  const T& operator*() const
5732  {
5733  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5734  return m_pItem->Value;
5735  }
5736  const T* operator->() const
5737  {
5738  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5739  return &m_pItem->Value;
5740  }
5741 
5742  const_iterator& operator++()
5743  {
5744  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5745  m_pItem = m_pItem->pNext;
5746  return *this;
5747  }
5748  const_iterator& operator--()
5749  {
5750  if(m_pItem != VMA_NULL)
5751  {
5752  m_pItem = m_pItem->pPrev;
5753  }
5754  else
5755  {
5756  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5757  m_pItem = m_pList->Back();
5758  }
5759  return *this;
5760  }
5761 
5762  const_iterator operator++(int)
5763  {
5764  const_iterator result = *this;
5765  ++*this;
5766  return result;
5767  }
5768  const_iterator operator--(int)
5769  {
5770  const_iterator result = *this;
5771  --*this;
5772  return result;
5773  }
5774 
5775  bool operator==(const const_iterator& rhs) const
5776  {
5777  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5778  return m_pItem == rhs.m_pItem;
5779  }
5780  bool operator!=(const const_iterator& rhs) const
5781  {
5782  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5783  return m_pItem != rhs.m_pItem;
5784  }
5785 
5786  private:
5787  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5788  m_pList(pList),
5789  m_pItem(pItem)
5790  {
5791  }
5792 
5793  const VmaRawList<T>* m_pList;
5794  const VmaListItem<T>* m_pItem;
5795 
5796  friend class VmaList<T, AllocatorT>;
5797  };
5798 
5799  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5800 
5801  bool empty() const { return m_RawList.IsEmpty(); }
5802  size_t size() const { return m_RawList.GetCount(); }
5803 
5804  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5805  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5806 
5807  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5808  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5809 
5810  void clear() { m_RawList.Clear(); }
5811  void push_back(const T& value) { m_RawList.PushBack(value); }
5812  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5813  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5814 
5815 private:
5816  VmaRawList<T> m_RawList;
5817 };
5818 
5819 #endif // #if VMA_USE_STL_LIST
5820 
5822 // class VmaMap
5823 
5824 // Unused in this version.
5825 #if 0
5826 
5827 #if VMA_USE_STL_UNORDERED_MAP
5828 
5829 #define VmaPair std::pair
5830 
5831 #define VMA_MAP_TYPE(KeyT, ValueT) \
5832  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5833 
5834 #else // #if VMA_USE_STL_UNORDERED_MAP
5835 
5836 template<typename T1, typename T2>
5837 struct VmaPair
5838 {
5839  T1 first;
5840  T2 second;
5841 
5842  VmaPair() : first(), second() { }
5843  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5844 };
5845 
5846 /* Class compatible with subset of interface of std::unordered_map.
5847 KeyT, ValueT must be POD because they will be stored in VmaVector.
5848 */
5849 template<typename KeyT, typename ValueT>
5850 class VmaMap
5851 {
5852 public:
5853  typedef VmaPair<KeyT, ValueT> PairType;
5854  typedef PairType* iterator;
5855 
5856  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5857 
5858  iterator begin() { return m_Vector.begin(); }
5859  iterator end() { return m_Vector.end(); }
5860 
5861  void insert(const PairType& pair);
5862  iterator find(const KeyT& key);
5863  void erase(iterator it);
5864 
5865 private:
5866  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5867 };
5868 
5869 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5870 
5871 template<typename FirstT, typename SecondT>
5872 struct VmaPairFirstLess
5873 {
5874  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5875  {
5876  return lhs.first < rhs.first;
5877  }
5878  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5879  {
5880  return lhs.first < rhsFirst;
5881  }
5882 };
5883 
5884 template<typename KeyT, typename ValueT>
5885 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5886 {
5887  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5888  m_Vector.data(),
5889  m_Vector.data() + m_Vector.size(),
5890  pair,
5891  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5892  VmaVectorInsert(m_Vector, indexToInsert, pair);
5893 }
5894 
5895 template<typename KeyT, typename ValueT>
5896 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5897 {
5898  PairType* it = VmaBinaryFindFirstNotLess(
5899  m_Vector.data(),
5900  m_Vector.data() + m_Vector.size(),
5901  key,
5902  VmaPairFirstLess<KeyT, ValueT>());
5903  if((it != m_Vector.end()) && (it->first == key))
5904  {
5905  return it;
5906  }
5907  else
5908  {
5909  return m_Vector.end();
5910  }
5911 }
5912 
5913 template<typename KeyT, typename ValueT>
5914 void VmaMap<KeyT, ValueT>::erase(iterator it)
5915 {
5916  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5917 }
5918 
5919 #endif // #if VMA_USE_STL_UNORDERED_MAP
5920 
5921 #endif // #if 0
5922 
5924 
5925 class VmaDeviceMemoryBlock;
5926 
5927 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5928 
5929 struct VmaAllocation_T
5930 {
5931 private:
5932  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5933 
5934  enum FLAGS
5935  {
5936  FLAG_USER_DATA_STRING = 0x01,
5937  };
5938 
5939 public:
5940  enum ALLOCATION_TYPE
5941  {
5942  ALLOCATION_TYPE_NONE,
5943  ALLOCATION_TYPE_BLOCK,
5944  ALLOCATION_TYPE_DEDICATED,
5945  };
5946 
5947  /*
5948  This struct is allocated using VmaPoolAllocator.
5949  */
5950 
5951  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5952  m_Alignment{1},
5953  m_Size{0},
5954  m_pUserData{VMA_NULL},
5955  m_LastUseFrameIndex{currentFrameIndex},
5956  m_MemoryTypeIndex{0},
5957  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5958  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5959  m_MapCount{0},
5960  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5961  {
5962 #if VMA_STATS_STRING_ENABLED
5963  m_CreationFrameIndex = currentFrameIndex;
5964  m_BufferImageUsage = 0;
5965 #endif
5966  }
5967 
5968  ~VmaAllocation_T()
5969  {
5970  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5971 
5972  // Check if owned string was freed.
5973  VMA_ASSERT(m_pUserData == VMA_NULL);
5974  }
5975 
5976  void InitBlockAllocation(
5977  VmaDeviceMemoryBlock* block,
5978  VkDeviceSize offset,
5979  VkDeviceSize alignment,
5980  VkDeviceSize size,
5981  uint32_t memoryTypeIndex,
5982  VmaSuballocationType suballocationType,
5983  bool mapped,
5984  bool canBecomeLost)
5985  {
5986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5987  VMA_ASSERT(block != VMA_NULL);
5988  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5989  m_Alignment = alignment;
5990  m_Size = size;
5991  m_MemoryTypeIndex = memoryTypeIndex;
5992  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5993  m_SuballocationType = (uint8_t)suballocationType;
5994  m_BlockAllocation.m_Block = block;
5995  m_BlockAllocation.m_Offset = offset;
5996  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5997  }
5998 
5999  void InitLost()
6000  {
6001  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6002  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6003  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6004  m_MemoryTypeIndex = 0;
6005  m_BlockAllocation.m_Block = VMA_NULL;
6006  m_BlockAllocation.m_Offset = 0;
6007  m_BlockAllocation.m_CanBecomeLost = true;
6008  }
6009 
6010  void ChangeBlockAllocation(
6011  VmaAllocator hAllocator,
6012  VmaDeviceMemoryBlock* block,
6013  VkDeviceSize offset);
6014 
6015  void ChangeOffset(VkDeviceSize newOffset);
6016 
6017  // pMappedData not null means allocation is created with MAPPED flag.
6018  void InitDedicatedAllocation(
6019  uint32_t memoryTypeIndex,
6020  VkDeviceMemory hMemory,
6021  VmaSuballocationType suballocationType,
6022  void* pMappedData,
6023  VkDeviceSize size)
6024  {
6025  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6026  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6027  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6028  m_Alignment = 0;
6029  m_Size = size;
6030  m_MemoryTypeIndex = memoryTypeIndex;
6031  m_SuballocationType = (uint8_t)suballocationType;
6032  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6033  m_DedicatedAllocation.m_hMemory = hMemory;
6034  m_DedicatedAllocation.m_pMappedData = pMappedData;
6035  }
6036 
6037  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6038  VkDeviceSize GetAlignment() const { return m_Alignment; }
6039  VkDeviceSize GetSize() const { return m_Size; }
6040  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6041  void* GetUserData() const { return m_pUserData; }
6042  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6043  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6044 
6045  VmaDeviceMemoryBlock* GetBlock() const
6046  {
6047  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6048  return m_BlockAllocation.m_Block;
6049  }
6050  VkDeviceSize GetOffset() const;
6051  VkDeviceMemory GetMemory() const;
6052  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6053  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6054  void* GetMappedData() const;
6055  bool CanBecomeLost() const;
6056 
6057  uint32_t GetLastUseFrameIndex() const
6058  {
6059  return m_LastUseFrameIndex.load();
6060  }
6061  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6062  {
6063  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6064  }
6065  /*
6066  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6067  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6068  - Else, returns false.
6069 
6070  If hAllocation is already lost, assert - you should not call it then.
6071  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6072  */
6073  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6074 
6075  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6076  {
6077  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6078  outInfo.blockCount = 1;
6079  outInfo.allocationCount = 1;
6080  outInfo.unusedRangeCount = 0;
6081  outInfo.usedBytes = m_Size;
6082  outInfo.unusedBytes = 0;
6083  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6084  outInfo.unusedRangeSizeMin = UINT64_MAX;
6085  outInfo.unusedRangeSizeMax = 0;
6086  }
6087 
6088  void BlockAllocMap();
6089  void BlockAllocUnmap();
6090  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6091  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6092 
6093 #if VMA_STATS_STRING_ENABLED
6094  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6095  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6096 
6097  void InitBufferImageUsage(uint32_t bufferImageUsage)
6098  {
6099  VMA_ASSERT(m_BufferImageUsage == 0);
6100  m_BufferImageUsage = bufferImageUsage;
6101  }
6102 
6103  void PrintParameters(class VmaJsonWriter& json) const;
6104 #endif
6105 
6106 private:
6107  VkDeviceSize m_Alignment;
6108  VkDeviceSize m_Size;
6109  void* m_pUserData;
6110  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6111  uint32_t m_MemoryTypeIndex;
6112  uint8_t m_Type; // ALLOCATION_TYPE
6113  uint8_t m_SuballocationType; // VmaSuballocationType
6114  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6115  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6116  uint8_t m_MapCount;
6117  uint8_t m_Flags; // enum FLAGS
6118 
6119  // Allocation out of VmaDeviceMemoryBlock.
6120  struct BlockAllocation
6121  {
6122  VmaDeviceMemoryBlock* m_Block;
6123  VkDeviceSize m_Offset;
6124  bool m_CanBecomeLost;
6125  };
6126 
6127  // Allocation for an object that has its own private VkDeviceMemory.
6128  struct DedicatedAllocation
6129  {
6130  VkDeviceMemory m_hMemory;
6131  void* m_pMappedData; // Not null means memory is mapped.
6132  };
6133 
6134  union
6135  {
6136  // Allocation out of VmaDeviceMemoryBlock.
6137  BlockAllocation m_BlockAllocation;
6138  // Allocation for an object that has its own private VkDeviceMemory.
6139  DedicatedAllocation m_DedicatedAllocation;
6140  };
6141 
6142 #if VMA_STATS_STRING_ENABLED
6143  uint32_t m_CreationFrameIndex;
6144  uint32_t m_BufferImageUsage; // 0 if unknown.
6145 #endif
6146 
6147  void FreeUserDataString(VmaAllocator hAllocator);
6148 };
6149 
6150 /*
6151 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6152 allocated memory block or free.
6153 */
6154 struct VmaSuballocation
6155 {
6156  VkDeviceSize offset;
6157  VkDeviceSize size;
6158  VmaAllocation hAllocation;
6159  VmaSuballocationType type;
6160 };
6161 
6162 // Comparator for offsets.
6163 struct VmaSuballocationOffsetLess
6164 {
6165  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6166  {
6167  return lhs.offset < rhs.offset;
6168  }
6169 };
6170 struct VmaSuballocationOffsetGreater
6171 {
6172  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6173  {
6174  return lhs.offset > rhs.offset;
6175  }
6176 };
6177 
6178 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6179 
6180 // Cost of one additional allocation lost, as equivalent in bytes.
6181 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6182 
6183 enum class VmaAllocationRequestType
6184 {
6185  Normal,
6186  // Used by "Linear" algorithm.
6187  UpperAddress,
6188  EndOf1st,
6189  EndOf2nd,
6190 };
6191 
6192 /*
6193 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6194 
6195 If canMakeOtherLost was false:
6196 - item points to a FREE suballocation.
6197 - itemsToMakeLostCount is 0.
6198 
6199 If canMakeOtherLost was true:
6200 - item points to first of sequence of suballocations, which are either FREE,
6201  or point to VmaAllocations that can become lost.
6202 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6203  the requested allocation to succeed.
6204 */
6205 struct VmaAllocationRequest
6206 {
6207  VkDeviceSize offset;
6208  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6209  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6210  VmaSuballocationList::iterator item;
6211  size_t itemsToMakeLostCount;
6212  void* customData;
6213  VmaAllocationRequestType type;
6214 
6215  VkDeviceSize CalcCost() const
6216  {
6217  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6218  }
6219 };
6220 
6221 /*
6222 Data structure used for bookkeeping of allocations and unused ranges of memory
6223 in a single VkDeviceMemory block.
6224 */
6225 class VmaBlockMetadata
6226 {
6227 public:
6228  VmaBlockMetadata(VmaAllocator hAllocator);
6229  virtual ~VmaBlockMetadata() { }
6230  virtual void Init(VkDeviceSize size) { m_Size = size; }
6231 
6232  // Validates all data structures inside this object. If not valid, returns false.
6233  virtual bool Validate() const = 0;
6234  VkDeviceSize GetSize() const { return m_Size; }
6235  virtual size_t GetAllocationCount() const = 0;
6236  virtual VkDeviceSize GetSumFreeSize() const = 0;
6237  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6238  // Returns true if this block is empty - contains only single free suballocation.
6239  virtual bool IsEmpty() const = 0;
6240 
6241  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6242  // Shouldn't modify blockCount.
6243  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6244 
6245 #if VMA_STATS_STRING_ENABLED
6246  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6247 #endif
6248 
6249  // Tries to find a place for suballocation with given parameters inside this block.
6250  // If succeeded, fills pAllocationRequest and returns true.
6251  // If failed, returns false.
6252  virtual bool CreateAllocationRequest(
6253  uint32_t currentFrameIndex,
6254  uint32_t frameInUseCount,
6255  VkDeviceSize bufferImageGranularity,
6256  VkDeviceSize allocSize,
6257  VkDeviceSize allocAlignment,
6258  bool upperAddress,
6259  VmaSuballocationType allocType,
6260  bool canMakeOtherLost,
6261  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6262  uint32_t strategy,
6263  VmaAllocationRequest* pAllocationRequest) = 0;
6264 
6265  virtual bool MakeRequestedAllocationsLost(
6266  uint32_t currentFrameIndex,
6267  uint32_t frameInUseCount,
6268  VmaAllocationRequest* pAllocationRequest) = 0;
6269 
6270  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6271 
6272  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6273 
6274  // Makes actual allocation based on request. Request must already be checked and valid.
6275  virtual void Alloc(
6276  const VmaAllocationRequest& request,
6277  VmaSuballocationType type,
6278  VkDeviceSize allocSize,
6279  VmaAllocation hAllocation) = 0;
6280 
6281  // Frees suballocation assigned to given memory region.
6282  virtual void Free(const VmaAllocation allocation) = 0;
6283  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6284 
6285 protected:
6286  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6287 
6288 #if VMA_STATS_STRING_ENABLED
6289  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6290  VkDeviceSize unusedBytes,
6291  size_t allocationCount,
6292  size_t unusedRangeCount) const;
6293  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6294  VkDeviceSize offset,
6295  VmaAllocation hAllocation) const;
6296  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6297  VkDeviceSize offset,
6298  VkDeviceSize size) const;
6299  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6300 #endif
6301 
6302 private:
6303  VkDeviceSize m_Size;
6304  const VkAllocationCallbacks* m_pAllocationCallbacks;
6305 };
6306 
6307 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6308  VMA_ASSERT(0 && "Validation failed: " #cond); \
6309  return false; \
6310  } } while(false)
6311 
6312 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6313 {
6314  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6315 public:
6316  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6317  virtual ~VmaBlockMetadata_Generic();
6318  virtual void Init(VkDeviceSize size);
6319 
6320  virtual bool Validate() const;
6321  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6322  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6323  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6324  virtual bool IsEmpty() const;
6325 
6326  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6327  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6328 
6329 #if VMA_STATS_STRING_ENABLED
6330  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6331 #endif
6332 
6333  virtual bool CreateAllocationRequest(
6334  uint32_t currentFrameIndex,
6335  uint32_t frameInUseCount,
6336  VkDeviceSize bufferImageGranularity,
6337  VkDeviceSize allocSize,
6338  VkDeviceSize allocAlignment,
6339  bool upperAddress,
6340  VmaSuballocationType allocType,
6341  bool canMakeOtherLost,
6342  uint32_t strategy,
6343  VmaAllocationRequest* pAllocationRequest);
6344 
6345  virtual bool MakeRequestedAllocationsLost(
6346  uint32_t currentFrameIndex,
6347  uint32_t frameInUseCount,
6348  VmaAllocationRequest* pAllocationRequest);
6349 
6350  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6351 
6352  virtual VkResult CheckCorruption(const void* pBlockData);
6353 
6354  virtual void Alloc(
6355  const VmaAllocationRequest& request,
6356  VmaSuballocationType type,
6357  VkDeviceSize allocSize,
6358  VmaAllocation hAllocation);
6359 
6360  virtual void Free(const VmaAllocation allocation);
6361  virtual void FreeAtOffset(VkDeviceSize offset);
6362 
6364  // For defragmentation
6365 
6366  bool IsBufferImageGranularityConflictPossible(
6367  VkDeviceSize bufferImageGranularity,
6368  VmaSuballocationType& inOutPrevSuballocType) const;
6369 
6370 private:
6371  friend class VmaDefragmentationAlgorithm_Generic;
6372  friend class VmaDefragmentationAlgorithm_Fast;
6373 
6374  uint32_t m_FreeCount;
6375  VkDeviceSize m_SumFreeSize;
6376  VmaSuballocationList m_Suballocations;
6377  // Suballocations that are free and have size greater than certain threshold.
6378  // Sorted by size, ascending.
6379  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6380 
6381  bool ValidateFreeSuballocationList() const;
6382 
6383  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6384  // If yes, fills pOffset and returns true. If no, returns false.
6385  bool CheckAllocation(
6386  uint32_t currentFrameIndex,
6387  uint32_t frameInUseCount,
6388  VkDeviceSize bufferImageGranularity,
6389  VkDeviceSize allocSize,
6390  VkDeviceSize allocAlignment,
6391  VmaSuballocationType allocType,
6392  VmaSuballocationList::const_iterator suballocItem,
6393  bool canMakeOtherLost,
6394  VkDeviceSize* pOffset,
6395  size_t* itemsToMakeLostCount,
6396  VkDeviceSize* pSumFreeSize,
6397  VkDeviceSize* pSumItemSize) const;
6398  // Given free suballocation, it merges it with following one, which must also be free.
6399  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6400  // Releases given suballocation, making it free.
6401  // Merges it with adjacent free suballocations if applicable.
6402  // Returns iterator to new free suballocation at this place.
6403  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6404  // Given free suballocation, it inserts it into sorted list of
6405  // m_FreeSuballocationsBySize if it's suitable.
6406  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6407  // Given free suballocation, it removes it from sorted list of
6408  // m_FreeSuballocationsBySize if it's suitable.
6409  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6410 };
6411 
6412 /*
6413 Allocations and their references in internal data structure look like this:
6414 
6415 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6416 
6417  0 +-------+
6418  | |
6419  | |
6420  | |
6421  +-------+
6422  | Alloc | 1st[m_1stNullItemsBeginCount]
6423  +-------+
6424  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6425  +-------+
6426  | ... |
6427  +-------+
6428  | Alloc | 1st[1st.size() - 1]
6429  +-------+
6430  | |
6431  | |
6432  | |
6433 GetSize() +-------+
6434 
6435 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6436 
6437  0 +-------+
6438  | Alloc | 2nd[0]
6439  +-------+
6440  | Alloc | 2nd[1]
6441  +-------+
6442  | ... |
6443  +-------+
6444  | Alloc | 2nd[2nd.size() - 1]
6445  +-------+
6446  | |
6447  | |
6448  | |
6449  +-------+
6450  | Alloc | 1st[m_1stNullItemsBeginCount]
6451  +-------+
6452  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6453  +-------+
6454  | ... |
6455  +-------+
6456  | Alloc | 1st[1st.size() - 1]
6457  +-------+
6458  | |
6459 GetSize() +-------+
6460 
6461 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6462 
6463  0 +-------+
6464  | |
6465  | |
6466  | |
6467  +-------+
6468  | Alloc | 1st[m_1stNullItemsBeginCount]
6469  +-------+
6470  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6471  +-------+
6472  | ... |
6473  +-------+
6474  | Alloc | 1st[1st.size() - 1]
6475  +-------+
6476  | |
6477  | |
6478  | |
6479  +-------+
6480  | Alloc | 2nd[2nd.size() - 1]
6481  +-------+
6482  | ... |
6483  +-------+
6484  | Alloc | 2nd[1]
6485  +-------+
6486  | Alloc | 2nd[0]
6487 GetSize() +-------+
6488 
6489 */
6490 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6491 {
6492  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6493 public:
6494  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6495  virtual ~VmaBlockMetadata_Linear();
6496  virtual void Init(VkDeviceSize size);
6497 
6498  virtual bool Validate() const;
6499  virtual size_t GetAllocationCount() const;
6500  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6501  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6502  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6503 
6504  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6505  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6506 
6507 #if VMA_STATS_STRING_ENABLED
6508  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6509 #endif
6510 
6511  virtual bool CreateAllocationRequest(
6512  uint32_t currentFrameIndex,
6513  uint32_t frameInUseCount,
6514  VkDeviceSize bufferImageGranularity,
6515  VkDeviceSize allocSize,
6516  VkDeviceSize allocAlignment,
6517  bool upperAddress,
6518  VmaSuballocationType allocType,
6519  bool canMakeOtherLost,
6520  uint32_t strategy,
6521  VmaAllocationRequest* pAllocationRequest);
6522 
6523  virtual bool MakeRequestedAllocationsLost(
6524  uint32_t currentFrameIndex,
6525  uint32_t frameInUseCount,
6526  VmaAllocationRequest* pAllocationRequest);
6527 
6528  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6529 
6530  virtual VkResult CheckCorruption(const void* pBlockData);
6531 
6532  virtual void Alloc(
6533  const VmaAllocationRequest& request,
6534  VmaSuballocationType type,
6535  VkDeviceSize allocSize,
6536  VmaAllocation hAllocation);
6537 
6538  virtual void Free(const VmaAllocation allocation);
6539  virtual void FreeAtOffset(VkDeviceSize offset);
6540 
6541 private:
6542  /*
6543  There are two suballocation vectors, used in ping-pong way.
6544  The one with index m_1stVectorIndex is called 1st.
6545  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6546  2nd can be non-empty only when 1st is not empty.
6547  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6548  */
6549  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6550 
6551  enum SECOND_VECTOR_MODE
6552  {
6553  SECOND_VECTOR_EMPTY,
6554  /*
6555  Suballocations in 2nd vector are created later than the ones in 1st, but they
6556  all have smaller offset.
6557  */
6558  SECOND_VECTOR_RING_BUFFER,
6559  /*
6560  Suballocations in 2nd vector are upper side of double stack.
6561  They all have offsets higher than those in 1st vector.
6562  Top of this stack means smaller offsets, but higher indices in this vector.
6563  */
6564  SECOND_VECTOR_DOUBLE_STACK,
6565  };
6566 
6567  VkDeviceSize m_SumFreeSize;
6568  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6569  uint32_t m_1stVectorIndex;
6570  SECOND_VECTOR_MODE m_2ndVectorMode;
6571 
6572  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6573  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6574  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6575  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6576 
6577  // Number of items in 1st vector with hAllocation = null at the beginning.
6578  size_t m_1stNullItemsBeginCount;
6579  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6580  size_t m_1stNullItemsMiddleCount;
6581  // Number of items in 2nd vector with hAllocation = null.
6582  size_t m_2ndNullItemsCount;
6583 
6584  bool ShouldCompact1st() const;
6585  void CleanupAfterFree();
6586 
6587  bool CreateAllocationRequest_LowerAddress(
6588  uint32_t currentFrameIndex,
6589  uint32_t frameInUseCount,
6590  VkDeviceSize bufferImageGranularity,
6591  VkDeviceSize allocSize,
6592  VkDeviceSize allocAlignment,
6593  VmaSuballocationType allocType,
6594  bool canMakeOtherLost,
6595  uint32_t strategy,
6596  VmaAllocationRequest* pAllocationRequest);
6597  bool CreateAllocationRequest_UpperAddress(
6598  uint32_t currentFrameIndex,
6599  uint32_t frameInUseCount,
6600  VkDeviceSize bufferImageGranularity,
6601  VkDeviceSize allocSize,
6602  VkDeviceSize allocAlignment,
6603  VmaSuballocationType allocType,
6604  bool canMakeOtherLost,
6605  uint32_t strategy,
6606  VmaAllocationRequest* pAllocationRequest);
6607 };
6608 
6609 /*
6610 - GetSize() is the original size of allocated memory block.
6611 - m_UsableSize is this size aligned down to a power of two.
6612  All allocations and calculations happen relative to m_UsableSize.
6613 - GetUnusableSize() is the difference between them.
6614  It is repoted as separate, unused range, not available for allocations.
6615 
6616 Node at level 0 has size = m_UsableSize.
6617 Each next level contains nodes with size 2 times smaller than current level.
6618 m_LevelCount is the maximum number of levels to use in the current object.
6619 */
6620 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6621 {
6622  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6623 public:
6624  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6625  virtual ~VmaBlockMetadata_Buddy();
6626  virtual void Init(VkDeviceSize size);
6627 
6628  virtual bool Validate() const;
6629  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6630  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6631  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6632  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6633 
6634  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6635  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6636 
6637 #if VMA_STATS_STRING_ENABLED
6638  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6639 #endif
6640 
6641  virtual bool CreateAllocationRequest(
6642  uint32_t currentFrameIndex,
6643  uint32_t frameInUseCount,
6644  VkDeviceSize bufferImageGranularity,
6645  VkDeviceSize allocSize,
6646  VkDeviceSize allocAlignment,
6647  bool upperAddress,
6648  VmaSuballocationType allocType,
6649  bool canMakeOtherLost,
6650  uint32_t strategy,
6651  VmaAllocationRequest* pAllocationRequest);
6652 
6653  virtual bool MakeRequestedAllocationsLost(
6654  uint32_t currentFrameIndex,
6655  uint32_t frameInUseCount,
6656  VmaAllocationRequest* pAllocationRequest);
6657 
6658  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6659 
6660  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6661 
6662  virtual void Alloc(
6663  const VmaAllocationRequest& request,
6664  VmaSuballocationType type,
6665  VkDeviceSize allocSize,
6666  VmaAllocation hAllocation);
6667 
6668  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6669  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6670 
6671 private:
6672  static const VkDeviceSize MIN_NODE_SIZE = 32;
6673  static const size_t MAX_LEVELS = 30;
6674 
6675  struct ValidationContext
6676  {
6677  size_t calculatedAllocationCount;
6678  size_t calculatedFreeCount;
6679  VkDeviceSize calculatedSumFreeSize;
6680 
6681  ValidationContext() :
6682  calculatedAllocationCount(0),
6683  calculatedFreeCount(0),
6684  calculatedSumFreeSize(0) { }
6685  };
6686 
6687  struct Node
6688  {
6689  VkDeviceSize offset;
6690  enum TYPE
6691  {
6692  TYPE_FREE,
6693  TYPE_ALLOCATION,
6694  TYPE_SPLIT,
6695  TYPE_COUNT
6696  } type;
6697  Node* parent;
6698  Node* buddy;
6699 
6700  union
6701  {
6702  struct
6703  {
6704  Node* prev;
6705  Node* next;
6706  } free;
6707  struct
6708  {
6709  VmaAllocation alloc;
6710  } allocation;
6711  struct
6712  {
6713  Node* leftChild;
6714  } split;
6715  };
6716  };
6717 
6718  // Size of the memory block aligned down to a power of two.
6719  VkDeviceSize m_UsableSize;
6720  uint32_t m_LevelCount;
6721 
6722  Node* m_Root;
6723  struct {
6724  Node* front;
6725  Node* back;
6726  } m_FreeList[MAX_LEVELS];
6727  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6728  size_t m_AllocationCount;
6729  // Number of nodes in the tree with type == TYPE_FREE.
6730  size_t m_FreeCount;
6731  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6732  VkDeviceSize m_SumFreeSize;
6733 
6734  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6735  void DeleteNode(Node* node);
6736  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6737  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6738  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6739  // Alloc passed just for validation. Can be null.
6740  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6741  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6742  // Adds node to the front of FreeList at given level.
6743  // node->type must be FREE.
6744  // node->free.prev, next can be undefined.
6745  void AddToFreeListFront(uint32_t level, Node* node);
6746  // Removes node from FreeList at given level.
6747  // node->type must be FREE.
6748  // node->free.prev, next stay untouched.
6749  void RemoveFromFreeList(uint32_t level, Node* node);
6750 
6751 #if VMA_STATS_STRING_ENABLED
6752  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6753 #endif
6754 };
6755 
6756 /*
6757 Represents a single block of device memory (`VkDeviceMemory`) with all the
6758 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6759 
6760 Thread-safety: This class must be externally synchronized.
6761 */
6762 class VmaDeviceMemoryBlock
6763 {
6764  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6765 public:
6766  VmaBlockMetadata* m_pMetadata;
6767 
6768  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6769 
6770  ~VmaDeviceMemoryBlock()
6771  {
6772  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6773  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6774  }
6775 
6776  // Always call after construction.
6777  void Init(
6778  VmaAllocator hAllocator,
6779  VmaPool hParentPool,
6780  uint32_t newMemoryTypeIndex,
6781  VkDeviceMemory newMemory,
6782  VkDeviceSize newSize,
6783  uint32_t id,
6784  uint32_t algorithm);
6785  // Always call before destruction.
6786  void Destroy(VmaAllocator allocator);
6787 
6788  VmaPool GetParentPool() const { return m_hParentPool; }
6789  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6790  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6791  uint32_t GetId() const { return m_Id; }
6792  void* GetMappedData() const { return m_pMappedData; }
6793 
6794  // Validates all data structures inside this object. If not valid, returns false.
6795  bool Validate() const;
6796 
6797  VkResult CheckCorruption(VmaAllocator hAllocator);
6798 
6799  // ppData can be null.
6800  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6801  void Unmap(VmaAllocator hAllocator, uint32_t count);
6802 
6803  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6804  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6805 
6806  VkResult BindBufferMemory(
6807  const VmaAllocator hAllocator,
6808  const VmaAllocation hAllocation,
6809  VkDeviceSize allocationLocalOffset,
6810  VkBuffer hBuffer,
6811  const void* pNext);
6812  VkResult BindImageMemory(
6813  const VmaAllocator hAllocator,
6814  const VmaAllocation hAllocation,
6815  VkDeviceSize allocationLocalOffset,
6816  VkImage hImage,
6817  const void* pNext);
6818 
6819 private:
6820  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6821  uint32_t m_MemoryTypeIndex;
6822  uint32_t m_Id;
6823  VkDeviceMemory m_hMemory;
6824 
6825  /*
6826  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6827  Also protects m_MapCount, m_pMappedData.
6828  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6829  */
6830  VMA_MUTEX m_Mutex;
6831  uint32_t m_MapCount;
6832  void* m_pMappedData;
6833 };
6834 
6835 struct VmaPointerLess
6836 {
6837  bool operator()(const void* lhs, const void* rhs) const
6838  {
6839  return lhs < rhs;
6840  }
6841 };
6842 
6843 struct VmaDefragmentationMove
6844 {
6845  size_t srcBlockIndex;
6846  size_t dstBlockIndex;
6847  VkDeviceSize srcOffset;
6848  VkDeviceSize dstOffset;
6849  VkDeviceSize size;
6850  VmaAllocation hAllocation;
6851  VmaDeviceMemoryBlock* pSrcBlock;
6852  VmaDeviceMemoryBlock* pDstBlock;
6853 };
6854 
6855 class VmaDefragmentationAlgorithm;
6856 
6857 /*
6858 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6859 Vulkan memory type.
6860 
6861 Synchronized internally with a mutex.
6862 */
6863 struct VmaBlockVector
6864 {
6865  VMA_CLASS_NO_COPY(VmaBlockVector)
6866 public:
6867  VmaBlockVector(
6868  VmaAllocator hAllocator,
6869  VmaPool hParentPool,
6870  uint32_t memoryTypeIndex,
6871  VkDeviceSize preferredBlockSize,
6872  size_t minBlockCount,
6873  size_t maxBlockCount,
6874  VkDeviceSize bufferImageGranularity,
6875  uint32_t frameInUseCount,
6876  bool explicitBlockSize,
6877  uint32_t algorithm);
6878  ~VmaBlockVector();
6879 
6880  VkResult CreateMinBlocks();
6881 
6882  VmaAllocator GetAllocator() const { return m_hAllocator; }
6883  VmaPool GetParentPool() const { return m_hParentPool; }
6884  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6885  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6886  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6887  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6888  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6889  uint32_t GetAlgorithm() const { return m_Algorithm; }
6890 
6891  void GetPoolStats(VmaPoolStats* pStats);
6892 
6893  bool IsEmpty();
6894  bool IsCorruptionDetectionEnabled() const;
6895 
6896  VkResult Allocate(
6897  uint32_t currentFrameIndex,
6898  VkDeviceSize size,
6899  VkDeviceSize alignment,
6900  const VmaAllocationCreateInfo& createInfo,
6901  VmaSuballocationType suballocType,
6902  size_t allocationCount,
6903  VmaAllocation* pAllocations);
6904 
6905  void Free(const VmaAllocation hAllocation);
6906 
6907  // Adds statistics of this BlockVector to pStats.
6908  void AddStats(VmaStats* pStats);
6909 
6910 #if VMA_STATS_STRING_ENABLED
6911  void PrintDetailedMap(class VmaJsonWriter& json);
6912 #endif
6913 
6914  void MakePoolAllocationsLost(
6915  uint32_t currentFrameIndex,
6916  size_t* pLostAllocationCount);
6917  VkResult CheckCorruption();
6918 
6919  // Saves results in pCtx->res.
6920  void Defragment(
6921  class VmaBlockVectorDefragmentationContext* pCtx,
6923  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6924  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6925  VkCommandBuffer commandBuffer);
6926  void DefragmentationEnd(
6927  class VmaBlockVectorDefragmentationContext* pCtx,
6928  uint32_t flags,
6929  VmaDefragmentationStats* pStats);
6930 
6931  uint32_t ProcessDefragmentations(
6932  class VmaBlockVectorDefragmentationContext *pCtx,
6933  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6934 
6935  void CommitDefragmentations(
6936  class VmaBlockVectorDefragmentationContext *pCtx,
6937  VmaDefragmentationStats* pStats);
6938 
6940  // To be used only while the m_Mutex is locked. Used during defragmentation.
6941 
6942  size_t GetBlockCount() const { return m_Blocks.size(); }
6943  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6944  size_t CalcAllocationCount() const;
6945  bool IsBufferImageGranularityConflictPossible() const;
6946 
6947 private:
6948  friend class VmaDefragmentationAlgorithm_Generic;
6949 
6950  const VmaAllocator m_hAllocator;
6951  const VmaPool m_hParentPool;
6952  const uint32_t m_MemoryTypeIndex;
6953  const VkDeviceSize m_PreferredBlockSize;
6954  const size_t m_MinBlockCount;
6955  const size_t m_MaxBlockCount;
6956  const VkDeviceSize m_BufferImageGranularity;
6957  const uint32_t m_FrameInUseCount;
6958  const bool m_ExplicitBlockSize;
6959  const uint32_t m_Algorithm;
6960  VMA_RW_MUTEX m_Mutex;
6961 
6962  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6963  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6964  bool m_HasEmptyBlock;
6965  // Incrementally sorted by sumFreeSize, ascending.
6966  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6967  uint32_t m_NextBlockId;
6968 
6969  VkDeviceSize CalcMaxBlockSize() const;
6970 
6971  // Finds and removes given block from vector.
6972  void Remove(VmaDeviceMemoryBlock* pBlock);
6973 
6974  // Performs single step in sorting m_Blocks. They may not be fully sorted
6975  // after this call.
6976  void IncrementallySortBlocks();
6977 
6978  VkResult AllocatePage(
6979  uint32_t currentFrameIndex,
6980  VkDeviceSize size,
6981  VkDeviceSize alignment,
6982  const VmaAllocationCreateInfo& createInfo,
6983  VmaSuballocationType suballocType,
6984  VmaAllocation* pAllocation);
6985 
6986  // To be used only without CAN_MAKE_OTHER_LOST flag.
6987  VkResult AllocateFromBlock(
6988  VmaDeviceMemoryBlock* pBlock,
6989  uint32_t currentFrameIndex,
6990  VkDeviceSize size,
6991  VkDeviceSize alignment,
6992  VmaAllocationCreateFlags allocFlags,
6993  void* pUserData,
6994  VmaSuballocationType suballocType,
6995  uint32_t strategy,
6996  VmaAllocation* pAllocation);
6997 
6998  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6999 
7000  // Saves result to pCtx->res.
7001  void ApplyDefragmentationMovesCpu(
7002  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7003  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7004  // Saves result to pCtx->res.
7005  void ApplyDefragmentationMovesGpu(
7006  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7007  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7008  VkCommandBuffer commandBuffer);
7009 
7010  /*
7011  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7012  - updated with new data.
7013  */
7014  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7015 
7016  void UpdateHasEmptyBlock();
7017 };
7018 
7019 struct VmaPool_T
7020 {
7021  VMA_CLASS_NO_COPY(VmaPool_T)
7022 public:
7023  VmaBlockVector m_BlockVector;
7024 
7025  VmaPool_T(
7026  VmaAllocator hAllocator,
7027  const VmaPoolCreateInfo& createInfo,
7028  VkDeviceSize preferredBlockSize);
7029  ~VmaPool_T();
7030 
7031  uint32_t GetId() const { return m_Id; }
7032  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7033 
7034  const char* GetName() const { return m_Name; }
7035  void SetName(const char* pName);
7036 
7037 #if VMA_STATS_STRING_ENABLED
7038  //void PrintDetailedMap(class VmaStringBuilder& sb);
7039 #endif
7040 
7041 private:
7042  uint32_t m_Id;
7043  char* m_Name;
7044 };
7045 
7046 /*
7047 Performs defragmentation:
7048 
7049 - Updates `pBlockVector->m_pMetadata`.
7050 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7051 - Does not move actual data, only returns requested moves as `moves`.
7052 */
7053 class VmaDefragmentationAlgorithm
7054 {
7055  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7056 public:
7057  VmaDefragmentationAlgorithm(
7058  VmaAllocator hAllocator,
7059  VmaBlockVector* pBlockVector,
7060  uint32_t currentFrameIndex) :
7061  m_hAllocator(hAllocator),
7062  m_pBlockVector(pBlockVector),
7063  m_CurrentFrameIndex(currentFrameIndex)
7064  {
7065  }
7066  virtual ~VmaDefragmentationAlgorithm()
7067  {
7068  }
7069 
7070  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7071  virtual void AddAll() = 0;
7072 
7073  virtual VkResult Defragment(
7074  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7075  VkDeviceSize maxBytesToMove,
7076  uint32_t maxAllocationsToMove,
7077  VmaDefragmentationFlags flags) = 0;
7078 
7079  virtual VkDeviceSize GetBytesMoved() const = 0;
7080  virtual uint32_t GetAllocationsMoved() const = 0;
7081 
7082 protected:
7083  VmaAllocator const m_hAllocator;
7084  VmaBlockVector* const m_pBlockVector;
7085  const uint32_t m_CurrentFrameIndex;
7086 
7087  struct AllocationInfo
7088  {
7089  VmaAllocation m_hAllocation;
7090  VkBool32* m_pChanged;
7091 
7092  AllocationInfo() :
7093  m_hAllocation(VK_NULL_HANDLE),
7094  m_pChanged(VMA_NULL)
7095  {
7096  }
7097  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7098  m_hAllocation(hAlloc),
7099  m_pChanged(pChanged)
7100  {
7101  }
7102  };
7103 };
7104 
7105 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7106 {
7107  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7108 public:
7109  VmaDefragmentationAlgorithm_Generic(
7110  VmaAllocator hAllocator,
7111  VmaBlockVector* pBlockVector,
7112  uint32_t currentFrameIndex,
7113  bool overlappingMoveSupported);
7114  virtual ~VmaDefragmentationAlgorithm_Generic();
7115 
7116  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7117  virtual void AddAll() { m_AllAllocations = true; }
7118 
7119  virtual VkResult Defragment(
7120  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7121  VkDeviceSize maxBytesToMove,
7122  uint32_t maxAllocationsToMove,
7123  VmaDefragmentationFlags flags);
7124 
7125  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7126  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7127 
7128 private:
7129  uint32_t m_AllocationCount;
7130  bool m_AllAllocations;
7131 
7132  VkDeviceSize m_BytesMoved;
7133  uint32_t m_AllocationsMoved;
7134 
7135  struct AllocationInfoSizeGreater
7136  {
7137  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7138  {
7139  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7140  }
7141  };
7142 
7143  struct AllocationInfoOffsetGreater
7144  {
7145  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7146  {
7147  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7148  }
7149  };
7150 
7151  struct BlockInfo
7152  {
7153  size_t m_OriginalBlockIndex;
7154  VmaDeviceMemoryBlock* m_pBlock;
7155  bool m_HasNonMovableAllocations;
7156  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7157 
7158  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7159  m_OriginalBlockIndex(SIZE_MAX),
7160  m_pBlock(VMA_NULL),
7161  m_HasNonMovableAllocations(true),
7162  m_Allocations(pAllocationCallbacks)
7163  {
7164  }
7165 
7166  void CalcHasNonMovableAllocations()
7167  {
7168  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7169  const size_t defragmentAllocCount = m_Allocations.size();
7170  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7171  }
7172 
7173  void SortAllocationsBySizeDescending()
7174  {
7175  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7176  }
7177 
7178  void SortAllocationsByOffsetDescending()
7179  {
7180  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7181  }
7182  };
7183 
7184  struct BlockPointerLess
7185  {
7186  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7187  {
7188  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7189  }
7190  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7191  {
7192  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7193  }
7194  };
7195 
7196  // 1. Blocks with some non-movable allocations go first.
7197  // 2. Blocks with smaller sumFreeSize go first.
7198  struct BlockInfoCompareMoveDestination
7199  {
7200  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7201  {
7202  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7203  {
7204  return true;
7205  }
7206  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7207  {
7208  return false;
7209  }
7210  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7211  {
7212  return true;
7213  }
7214  return false;
7215  }
7216  };
7217 
7218  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7219  BlockInfoVector m_Blocks;
7220 
7221  VkResult DefragmentRound(
7222  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7223  VkDeviceSize maxBytesToMove,
7224  uint32_t maxAllocationsToMove,
7225  bool freeOldAllocations);
7226 
7227  size_t CalcBlocksWithNonMovableCount() const;
7228 
7229  static bool MoveMakesSense(
7230  size_t dstBlockIndex, VkDeviceSize dstOffset,
7231  size_t srcBlockIndex, VkDeviceSize srcOffset);
7232 };
7233 
7234 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7235 {
7236  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7237 public:
7238  VmaDefragmentationAlgorithm_Fast(
7239  VmaAllocator hAllocator,
7240  VmaBlockVector* pBlockVector,
7241  uint32_t currentFrameIndex,
7242  bool overlappingMoveSupported);
7243  virtual ~VmaDefragmentationAlgorithm_Fast();
7244 
7245  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7246  virtual void AddAll() { m_AllAllocations = true; }
7247 
7248  virtual VkResult Defragment(
7249  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7250  VkDeviceSize maxBytesToMove,
7251  uint32_t maxAllocationsToMove,
7252  VmaDefragmentationFlags flags);
7253 
7254  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7255  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7256 
7257 private:
7258  struct BlockInfo
7259  {
7260  size_t origBlockIndex;
7261  };
7262 
7263  class FreeSpaceDatabase
7264  {
7265  public:
7266  FreeSpaceDatabase()
7267  {
7268  FreeSpace s = {};
7269  s.blockInfoIndex = SIZE_MAX;
7270  for(size_t i = 0; i < MAX_COUNT; ++i)
7271  {
7272  m_FreeSpaces[i] = s;
7273  }
7274  }
7275 
7276  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7277  {
7278  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7279  {
7280  return;
7281  }
7282 
7283  // Find first invalid or the smallest structure.
7284  size_t bestIndex = SIZE_MAX;
7285  for(size_t i = 0; i < MAX_COUNT; ++i)
7286  {
7287  // Empty structure.
7288  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7289  {
7290  bestIndex = i;
7291  break;
7292  }
7293  if(m_FreeSpaces[i].size < size &&
7294  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7295  {
7296  bestIndex = i;
7297  }
7298  }
7299 
7300  if(bestIndex != SIZE_MAX)
7301  {
7302  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7303  m_FreeSpaces[bestIndex].offset = offset;
7304  m_FreeSpaces[bestIndex].size = size;
7305  }
7306  }
7307 
7308  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7309  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7310  {
7311  size_t bestIndex = SIZE_MAX;
7312  VkDeviceSize bestFreeSpaceAfter = 0;
7313  for(size_t i = 0; i < MAX_COUNT; ++i)
7314  {
7315  // Structure is valid.
7316  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7317  {
7318  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7319  // Allocation fits into this structure.
7320  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7321  {
7322  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7323  (dstOffset + size);
7324  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7325  {
7326  bestIndex = i;
7327  bestFreeSpaceAfter = freeSpaceAfter;
7328  }
7329  }
7330  }
7331  }
7332 
7333  if(bestIndex != SIZE_MAX)
7334  {
7335  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7336  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7337 
7338  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7339  {
7340  // Leave this structure for remaining empty space.
7341  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7342  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7343  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7344  }
7345  else
7346  {
7347  // This structure becomes invalid.
7348  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7349  }
7350 
7351  return true;
7352  }
7353 
7354  return false;
7355  }
7356 
7357  private:
7358  static const size_t MAX_COUNT = 4;
7359 
7360  struct FreeSpace
7361  {
7362  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7363  VkDeviceSize offset;
7364  VkDeviceSize size;
7365  } m_FreeSpaces[MAX_COUNT];
7366  };
7367 
7368  const bool m_OverlappingMoveSupported;
7369 
7370  uint32_t m_AllocationCount;
7371  bool m_AllAllocations;
7372 
7373  VkDeviceSize m_BytesMoved;
7374  uint32_t m_AllocationsMoved;
7375 
7376  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7377 
7378  void PreprocessMetadata();
7379  void PostprocessMetadata();
7380  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7381 };
7382 
7383 struct VmaBlockDefragmentationContext
7384 {
7385  enum BLOCK_FLAG
7386  {
7387  BLOCK_FLAG_USED = 0x00000001,
7388  };
7389  uint32_t flags;
7390  VkBuffer hBuffer;
7391 };
7392 
7393 class VmaBlockVectorDefragmentationContext
7394 {
7395  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7396 public:
7397  VkResult res;
7398  bool mutexLocked;
7399  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7400  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7401  uint32_t defragmentationMovesProcessed;
7402  uint32_t defragmentationMovesCommitted;
7403  bool hasDefragmentationPlan;
7404 
7405  VmaBlockVectorDefragmentationContext(
7406  VmaAllocator hAllocator,
7407  VmaPool hCustomPool, // Optional.
7408  VmaBlockVector* pBlockVector,
7409  uint32_t currFrameIndex);
7410  ~VmaBlockVectorDefragmentationContext();
7411 
7412  VmaPool GetCustomPool() const { return m_hCustomPool; }
7413  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7414  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7415 
7416  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7417  void AddAll() { m_AllAllocations = true; }
7418 
7419  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7420 
7421 private:
7422  const VmaAllocator m_hAllocator;
7423  // Null if not from custom pool.
7424  const VmaPool m_hCustomPool;
7425  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7426  VmaBlockVector* const m_pBlockVector;
7427  const uint32_t m_CurrFrameIndex;
7428  // Owner of this object.
7429  VmaDefragmentationAlgorithm* m_pAlgorithm;
7430 
7431  struct AllocInfo
7432  {
7433  VmaAllocation hAlloc;
7434  VkBool32* pChanged;
7435  };
7436  // Used between constructor and Begin.
7437  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7438  bool m_AllAllocations;
7439 };
7440 
7441 struct VmaDefragmentationContext_T
7442 {
7443 private:
7444  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7445 public:
7446  VmaDefragmentationContext_T(
7447  VmaAllocator hAllocator,
7448  uint32_t currFrameIndex,
7449  uint32_t flags,
7450  VmaDefragmentationStats* pStats);
7451  ~VmaDefragmentationContext_T();
7452 
7453  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7454  void AddAllocations(
7455  uint32_t allocationCount,
7456  const VmaAllocation* pAllocations,
7457  VkBool32* pAllocationsChanged);
7458 
7459  /*
7460  Returns:
7461  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7462  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7463  - Negative value if error occured and object can be destroyed immediately.
7464  */
7465  VkResult Defragment(
7466  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7467  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7468  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7469 
7470  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7471  VkResult DefragmentPassEnd();
7472 
7473 private:
7474  const VmaAllocator m_hAllocator;
7475  const uint32_t m_CurrFrameIndex;
7476  const uint32_t m_Flags;
7477  VmaDefragmentationStats* const m_pStats;
7478 
7479  VkDeviceSize m_MaxCpuBytesToMove;
7480  uint32_t m_MaxCpuAllocationsToMove;
7481  VkDeviceSize m_MaxGpuBytesToMove;
7482  uint32_t m_MaxGpuAllocationsToMove;
7483 
7484  // Owner of these objects.
7485  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7486  // Owner of these objects.
7487  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7488 };
7489 
7490 #if VMA_RECORDING_ENABLED
7491 
7492 class VmaRecorder
7493 {
7494 public:
7495  VmaRecorder();
7496  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7497  void WriteConfiguration(
7498  const VkPhysicalDeviceProperties& devProps,
7499  const VkPhysicalDeviceMemoryProperties& memProps,
7500  uint32_t vulkanApiVersion,
7501  bool dedicatedAllocationExtensionEnabled,
7502  bool bindMemory2ExtensionEnabled,
7503  bool memoryBudgetExtensionEnabled,
7504  bool deviceCoherentMemoryExtensionEnabled);
7505  ~VmaRecorder();
7506 
7507  void RecordCreateAllocator(uint32_t frameIndex);
7508  void RecordDestroyAllocator(uint32_t frameIndex);
7509  void RecordCreatePool(uint32_t frameIndex,
7510  const VmaPoolCreateInfo& createInfo,
7511  VmaPool pool);
7512  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7513  void RecordAllocateMemory(uint32_t frameIndex,
7514  const VkMemoryRequirements& vkMemReq,
7515  const VmaAllocationCreateInfo& createInfo,
7516  VmaAllocation allocation);
7517  void RecordAllocateMemoryPages(uint32_t frameIndex,
7518  const VkMemoryRequirements& vkMemReq,
7519  const VmaAllocationCreateInfo& createInfo,
7520  uint64_t allocationCount,
7521  const VmaAllocation* pAllocations);
7522  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7523  const VkMemoryRequirements& vkMemReq,
7524  bool requiresDedicatedAllocation,
7525  bool prefersDedicatedAllocation,
7526  const VmaAllocationCreateInfo& createInfo,
7527  VmaAllocation allocation);
7528  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7529  const VkMemoryRequirements& vkMemReq,
7530  bool requiresDedicatedAllocation,
7531  bool prefersDedicatedAllocation,
7532  const VmaAllocationCreateInfo& createInfo,
7533  VmaAllocation allocation);
7534  void RecordFreeMemory(uint32_t frameIndex,
7535  VmaAllocation allocation);
7536  void RecordFreeMemoryPages(uint32_t frameIndex,
7537  uint64_t allocationCount,
7538  const VmaAllocation* pAllocations);
7539  void RecordSetAllocationUserData(uint32_t frameIndex,
7540  VmaAllocation allocation,
7541  const void* pUserData);
7542  void RecordCreateLostAllocation(uint32_t frameIndex,
7543  VmaAllocation allocation);
7544  void RecordMapMemory(uint32_t frameIndex,
7545  VmaAllocation allocation);
7546  void RecordUnmapMemory(uint32_t frameIndex,
7547  VmaAllocation allocation);
7548  void RecordFlushAllocation(uint32_t frameIndex,
7549  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7550  void RecordInvalidateAllocation(uint32_t frameIndex,
7551  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7552  void RecordCreateBuffer(uint32_t frameIndex,
7553  const VkBufferCreateInfo& bufCreateInfo,
7554  const VmaAllocationCreateInfo& allocCreateInfo,
7555  VmaAllocation allocation);
7556  void RecordCreateImage(uint32_t frameIndex,
7557  const VkImageCreateInfo& imageCreateInfo,
7558  const VmaAllocationCreateInfo& allocCreateInfo,
7559  VmaAllocation allocation);
7560  void RecordDestroyBuffer(uint32_t frameIndex,
7561  VmaAllocation allocation);
7562  void RecordDestroyImage(uint32_t frameIndex,
7563  VmaAllocation allocation);
7564  void RecordTouchAllocation(uint32_t frameIndex,
7565  VmaAllocation allocation);
7566  void RecordGetAllocationInfo(uint32_t frameIndex,
7567  VmaAllocation allocation);
7568  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7569  VmaPool pool);
7570  void RecordDefragmentationBegin(uint32_t frameIndex,
7571  const VmaDefragmentationInfo2& info,
7573  void RecordDefragmentationEnd(uint32_t frameIndex,
7575  void RecordSetPoolName(uint32_t frameIndex,
7576  VmaPool pool,
7577  const char* name);
7578 
7579 private:
7580  struct CallParams
7581  {
7582  uint32_t threadId;
7583  double time;
7584  };
7585 
7586  class UserDataString
7587  {
7588  public:
7589  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7590  const char* GetString() const { return m_Str; }
7591 
7592  private:
7593  char m_PtrStr[17];
7594  const char* m_Str;
7595  };
7596 
7597  bool m_UseMutex;
7598  VmaRecordFlags m_Flags;
7599  FILE* m_File;
7600  VMA_MUTEX m_FileMutex;
7601  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
7602 
7603  void GetBasicParams(CallParams& outParams);
7604 
7605  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7606  template<typename T>
7607  void PrintPointerList(uint64_t count, const T* pItems)
7608  {
7609  if(count)
7610  {
7611  fprintf(m_File, "%p", pItems[0]);
7612  for(uint64_t i = 1; i < count; ++i)
7613  {
7614  fprintf(m_File, " %p", pItems[i]);
7615  }
7616  }
7617  }
7618 
7619  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7620  void Flush();
7621 };
7622 
7623 #endif // #if VMA_RECORDING_ENABLED
7624 
7625 /*
7626 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7627 */
7628 class VmaAllocationObjectAllocator
7629 {
7630  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7631 public:
7632  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7633 
7634  template<typename... Types> VmaAllocation Allocate(Types... args);
7635  void Free(VmaAllocation hAlloc);
7636 
7637 private:
7638  VMA_MUTEX m_Mutex;
7639  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7640 };
7641 
7642 struct VmaCurrentBudgetData
7643 {
7644  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7645  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7646 
7647 #if VMA_MEMORY_BUDGET
7648  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7649  VMA_RW_MUTEX m_BudgetMutex;
7650  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7651  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7652  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7653 #endif // #if VMA_MEMORY_BUDGET
7654 
7655  VmaCurrentBudgetData()
7656  {
7657  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7658  {
7659  m_BlockBytes[heapIndex] = 0;
7660  m_AllocationBytes[heapIndex] = 0;
7661 #if VMA_MEMORY_BUDGET
7662  m_VulkanUsage[heapIndex] = 0;
7663  m_VulkanBudget[heapIndex] = 0;
7664  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7665 #endif
7666  }
7667 
7668 #if VMA_MEMORY_BUDGET
7669  m_OperationsSinceBudgetFetch = 0;
7670 #endif
7671  }
7672 
7673  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7674  {
7675  m_AllocationBytes[heapIndex] += allocationSize;
7676 #if VMA_MEMORY_BUDGET
7677  ++m_OperationsSinceBudgetFetch;
7678 #endif
7679  }
7680 
7681  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7682  {
7683  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7684  m_AllocationBytes[heapIndex] -= allocationSize;
7685 #if VMA_MEMORY_BUDGET
7686  ++m_OperationsSinceBudgetFetch;
7687 #endif
7688  }
7689 };
7690 
7691 // Main allocator object.
7692 struct VmaAllocator_T
7693 {
7694  VMA_CLASS_NO_COPY(VmaAllocator_T)
7695 public:
7696  bool m_UseMutex;
7697  uint32_t m_VulkanApiVersion;
7698  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7699  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7700  bool m_UseExtMemoryBudget;
7701  bool m_UseAmdDeviceCoherentMemory;
7702  bool m_UseKhrBufferDeviceAddress;
7703  VkDevice m_hDevice;
7704  VkInstance m_hInstance;
7705  bool m_AllocationCallbacksSpecified;
7706  VkAllocationCallbacks m_AllocationCallbacks;
7707  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7708  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7709 
7710  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7711  uint32_t m_HeapSizeLimitMask;
7712 
7713  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7714  VkPhysicalDeviceMemoryProperties m_MemProps;
7715 
7716  // Default pools.
7717  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7718 
7719  // Each vector is sorted by memory (handle value).
7720  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7721  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7722  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7723 
7724  VmaCurrentBudgetData m_Budget;
7725 
7726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7728  ~VmaAllocator_T();
7729 
7730  const VkAllocationCallbacks* GetAllocationCallbacks() const
7731  {
7732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7733  }
7734  const VmaVulkanFunctions& GetVulkanFunctions() const
7735  {
7736  return m_VulkanFunctions;
7737  }
7738 
7739  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7740 
7741  VkDeviceSize GetBufferImageGranularity() const
7742  {
7743  return VMA_MAX(
7744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7746  }
7747 
7748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7750 
7751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7752  {
7753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7755  }
7756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7758  {
7759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7761  }
7762  // Minimum alignment for all allocations in specific memory type.
7763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7764  {
7765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7768  }
7769 
7770  bool IsIntegratedGpu() const
7771  {
7772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7773  }
7774 
7775  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7776 
7777 #if VMA_RECORDING_ENABLED
7778  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7779 #endif
7780 
7781  void GetBufferMemoryRequirements(
7782  VkBuffer hBuffer,
7783  VkMemoryRequirements& memReq,
7784  bool& requiresDedicatedAllocation,
7785  bool& prefersDedicatedAllocation) const;
7786  void GetImageMemoryRequirements(
7787  VkImage hImage,
7788  VkMemoryRequirements& memReq,
7789  bool& requiresDedicatedAllocation,
7790  bool& prefersDedicatedAllocation) const;
7791 
7792  // Main allocation function.
7793  VkResult AllocateMemory(
7794  const VkMemoryRequirements& vkMemReq,
7795  bool requiresDedicatedAllocation,
7796  bool prefersDedicatedAllocation,
7797  VkBuffer dedicatedBuffer,
7798  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7799  VkImage dedicatedImage,
7800  const VmaAllocationCreateInfo& createInfo,
7801  VmaSuballocationType suballocType,
7802  size_t allocationCount,
7803  VmaAllocation* pAllocations);
7804 
7805  // Main deallocation function.
7806  void FreeMemory(
7807  size_t allocationCount,
7808  const VmaAllocation* pAllocations);
7809 
7810  VkResult ResizeAllocation(
7811  const VmaAllocation alloc,
7812  VkDeviceSize newSize);
7813 
7814  void CalculateStats(VmaStats* pStats);
7815 
7816  void GetBudget(
7817  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7818 
7819 #if VMA_STATS_STRING_ENABLED
7820  void PrintDetailedMap(class VmaJsonWriter& json);
7821 #endif
7822 
7823  VkResult DefragmentationBegin(
7824  const VmaDefragmentationInfo2& info,
7825  VmaDefragmentationStats* pStats,
7826  VmaDefragmentationContext* pContext);
7827  VkResult DefragmentationEnd(
7828  VmaDefragmentationContext context);
7829 
7830  VkResult DefragmentationPassBegin(
7832  VmaDefragmentationContext context);
7833  VkResult DefragmentationPassEnd(
7834  VmaDefragmentationContext context);
7835 
7836  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7837  bool TouchAllocation(VmaAllocation hAllocation);
7838 
7839  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7840  void DestroyPool(VmaPool pool);
7841  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7842 
7843  void SetCurrentFrameIndex(uint32_t frameIndex);
7844  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7845 
7846  void MakePoolAllocationsLost(
7847  VmaPool hPool,
7848  size_t* pLostAllocationCount);
7849  VkResult CheckPoolCorruption(VmaPool hPool);
7850  VkResult CheckCorruption(uint32_t memoryTypeBits);
7851 
7852  void CreateLostAllocation(VmaAllocation* pAllocation);
7853 
7854  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7855  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7856  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7857  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7858  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7859  VkResult BindVulkanBuffer(
7860  VkDeviceMemory memory,
7861  VkDeviceSize memoryOffset,
7862  VkBuffer buffer,
7863  const void* pNext);
7864  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7865  VkResult BindVulkanImage(
7866  VkDeviceMemory memory,
7867  VkDeviceSize memoryOffset,
7868  VkImage image,
7869  const void* pNext);
7870 
7871  VkResult Map(VmaAllocation hAllocation, void** ppData);
7872  void Unmap(VmaAllocation hAllocation);
7873 
7874  VkResult BindBufferMemory(
7875  VmaAllocation hAllocation,
7876  VkDeviceSize allocationLocalOffset,
7877  VkBuffer hBuffer,
7878  const void* pNext);
7879  VkResult BindImageMemory(
7880  VmaAllocation hAllocation,
7881  VkDeviceSize allocationLocalOffset,
7882  VkImage hImage,
7883  const void* pNext);
7884 
7885  VkResult FlushOrInvalidateAllocation(
7886  VmaAllocation hAllocation,
7887  VkDeviceSize offset, VkDeviceSize size,
7888  VMA_CACHE_OPERATION op);
7889  VkResult FlushOrInvalidateAllocations(
7890  uint32_t allocationCount,
7891  const VmaAllocation* allocations,
7892  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
7893  VMA_CACHE_OPERATION op);
7894 
7895  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7896 
7897  /*
7898  Returns bit mask of memory types that can support defragmentation on GPU as
7899  they support creation of required buffer for copy operations.
7900  */
7901  uint32_t GetGpuDefragmentationMemoryTypeBits();
7902 
7903 private:
7904  VkDeviceSize m_PreferredLargeHeapBlockSize;
7905 
7906  VkPhysicalDevice m_PhysicalDevice;
7907  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7908  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7909 
7910  VMA_RW_MUTEX m_PoolsMutex;
7911  // Protected by m_PoolsMutex. Sorted by pointer value.
7912  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7913  uint32_t m_NextPoolId;
7914 
7915  VmaVulkanFunctions m_VulkanFunctions;
7916 
7917  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7918  uint32_t m_GlobalMemoryTypeBits;
7919 
7920 #if VMA_RECORDING_ENABLED
7921  VmaRecorder* m_pRecorder;
7922 #endif
7923 
7924  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7925 
7926 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7927  void ImportVulkanFunctions_Static();
7928 #endif
7929 
7930  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
7931 
7932 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
7933  void ImportVulkanFunctions_Dynamic();
7934 #endif
7935 
7936  void ValidateVulkanFunctions();
7937 
7938  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7939 
7940  VkResult AllocateMemoryOfType(
7941  VkDeviceSize size,
7942  VkDeviceSize alignment,
7943  bool dedicatedAllocation,
7944  VkBuffer dedicatedBuffer,
7945  VkBufferUsageFlags dedicatedBufferUsage,
7946  VkImage dedicatedImage,
7947  const VmaAllocationCreateInfo& createInfo,
7948  uint32_t memTypeIndex,
7949  VmaSuballocationType suballocType,
7950  size_t allocationCount,
7951  VmaAllocation* pAllocations);
7952 
7953  // Helper function only to be used inside AllocateDedicatedMemory.
7954  VkResult AllocateDedicatedMemoryPage(
7955  VkDeviceSize size,
7956  VmaSuballocationType suballocType,
7957  uint32_t memTypeIndex,
7958  const VkMemoryAllocateInfo& allocInfo,
7959  bool map,
7960  bool isUserDataString,
7961  void* pUserData,
7962  VmaAllocation* pAllocation);
7963 
7964  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7965  VkResult AllocateDedicatedMemory(
7966  VkDeviceSize size,
7967  VmaSuballocationType suballocType,
7968  uint32_t memTypeIndex,
7969  bool withinBudget,
7970  bool map,
7971  bool isUserDataString,
7972  void* pUserData,
7973  VkBuffer dedicatedBuffer,
7974  VkBufferUsageFlags dedicatedBufferUsage,
7975  VkImage dedicatedImage,
7976  size_t allocationCount,
7977  VmaAllocation* pAllocations);
7978 
7979  void FreeDedicatedMemory(const VmaAllocation allocation);
7980 
7981  /*
7982  Calculates and returns bit mask of memory types that can support defragmentation
7983  on GPU as they support creation of required buffer for copy operations.
7984  */
7985  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7986 
7987  uint32_t CalculateGlobalMemoryTypeBits() const;
7988 
7989  bool GetFlushOrInvalidateRange(
7990  VmaAllocation allocation,
7991  VkDeviceSize offset, VkDeviceSize size,
7992  VkMappedMemoryRange& outRange) const;
7993 
7994 #if VMA_MEMORY_BUDGET
7995  void UpdateVulkanBudget();
7996 #endif // #if VMA_MEMORY_BUDGET
7997 };
7998 
8000 // Memory allocation #2 after VmaAllocator_T definition
8001 
8002 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8003 {
8004  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8005 }
8006 
8007 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8008 {
8009  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8010 }
8011 
8012 template<typename T>
8013 static T* VmaAllocate(VmaAllocator hAllocator)
8014 {
8015  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8016 }
8017 
8018 template<typename T>
8019 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8020 {
8021  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8022 }
8023 
8024 template<typename T>
8025 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8026 {
8027  if(ptr != VMA_NULL)
8028  {
8029  ptr->~T();
8030  VmaFree(hAllocator, ptr);
8031  }
8032 }
8033 
8034 template<typename T>
8035 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8036 {
8037  if(ptr != VMA_NULL)
8038  {
8039  for(size_t i = count; i--; )
8040  ptr[i].~T();
8041  VmaFree(hAllocator, ptr);
8042  }
8043 }
8044 
8046 // VmaStringBuilder
8047 
8048 #if VMA_STATS_STRING_ENABLED
8049 
8050 class VmaStringBuilder
8051 {
8052 public:
8053  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8054  size_t GetLength() const { return m_Data.size(); }
8055  const char* GetData() const { return m_Data.data(); }
8056 
8057  void Add(char ch) { m_Data.push_back(ch); }
8058  void Add(const char* pStr);
8059  void AddNewLine() { Add('\n'); }
8060  void AddNumber(uint32_t num);
8061  void AddNumber(uint64_t num);
8062  void AddPointer(const void* ptr);
8063 
8064 private:
8065  VmaVector< char, VmaStlAllocator<char> > m_Data;
8066 };
8067 
8068 void VmaStringBuilder::Add(const char* pStr)
8069 {
8070  const size_t strLen = strlen(pStr);
8071  if(strLen > 0)
8072  {
8073  const size_t oldCount = m_Data.size();
8074  m_Data.resize(oldCount + strLen);
8075  memcpy(m_Data.data() + oldCount, pStr, strLen);
8076  }
8077 }
8078 
8079 void VmaStringBuilder::AddNumber(uint32_t num)
8080 {
8081  char buf[11];
8082  buf[10] = '\0';
8083  char *p = &buf[10];
8084  do
8085  {
8086  *--p = '0' + (num % 10);
8087  num /= 10;
8088  }
8089  while(num);
8090  Add(p);
8091 }
8092 
8093 void VmaStringBuilder::AddNumber(uint64_t num)
8094 {
8095  char buf[21];
8096  buf[20] = '\0';
8097  char *p = &buf[20];
8098  do
8099  {
8100  *--p = '0' + (num % 10);
8101  num /= 10;
8102  }
8103  while(num);
8104  Add(p);
8105 }
8106 
8107 void VmaStringBuilder::AddPointer(const void* ptr)
8108 {
8109  char buf[21];
8110  VmaPtrToStr(buf, sizeof(buf), ptr);
8111  Add(buf);
8112 }
8113 
8114 #endif // #if VMA_STATS_STRING_ENABLED
8115 
8117 // VmaJsonWriter
8118 
8119 #if VMA_STATS_STRING_ENABLED
8120 
8121 class VmaJsonWriter
8122 {
8123  VMA_CLASS_NO_COPY(VmaJsonWriter)
8124 public:
8125  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8126  ~VmaJsonWriter();
8127 
8128  void BeginObject(bool singleLine = false);
8129  void EndObject();
8130 
8131  void BeginArray(bool singleLine = false);
8132  void EndArray();
8133 
8134  void WriteString(const char* pStr);
8135  void BeginString(const char* pStr = VMA_NULL);
8136  void ContinueString(const char* pStr);
8137  void ContinueString(uint32_t n);
8138  void ContinueString(uint64_t n);
8139  void ContinueString_Pointer(const void* ptr);
8140  void EndString(const char* pStr = VMA_NULL);
8141 
8142  void WriteNumber(uint32_t n);
8143  void WriteNumber(uint64_t n);
8144  void WriteBool(bool b);
8145  void WriteNull();
8146 
8147 private:
8148  static const char* const INDENT;
8149 
8150  enum COLLECTION_TYPE
8151  {
8152  COLLECTION_TYPE_OBJECT,
8153  COLLECTION_TYPE_ARRAY,
8154  };
8155  struct StackItem
8156  {
8157  COLLECTION_TYPE type;
8158  uint32_t valueCount;
8159  bool singleLineMode;
8160  };
8161 
8162  VmaStringBuilder& m_SB;
8163  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8164  bool m_InsideString;
8165 
8166  void BeginValue(bool isString);
8167  void WriteIndent(bool oneLess = false);
8168 };
8169 
8170 const char* const VmaJsonWriter::INDENT = " ";
8171 
8172 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8173  m_SB(sb),
8174  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8175  m_InsideString(false)
8176 {
8177 }
8178 
8179 VmaJsonWriter::~VmaJsonWriter()
8180 {
8181  VMA_ASSERT(!m_InsideString);
8182  VMA_ASSERT(m_Stack.empty());
8183 }
8184 
8185 void VmaJsonWriter::BeginObject(bool singleLine)
8186 {
8187  VMA_ASSERT(!m_InsideString);
8188 
8189  BeginValue(false);
8190  m_SB.Add('{');
8191 
8192  StackItem item;
8193  item.type = COLLECTION_TYPE_OBJECT;
8194  item.valueCount = 0;
8195  item.singleLineMode = singleLine;
8196  m_Stack.push_back(item);
8197 }
8198 
8199 void VmaJsonWriter::EndObject()
8200 {
8201  VMA_ASSERT(!m_InsideString);
8202 
8203  WriteIndent(true);
8204  m_SB.Add('}');
8205 
8206  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8207  m_Stack.pop_back();
8208 }
8209 
8210 void VmaJsonWriter::BeginArray(bool singleLine)
8211 {
8212  VMA_ASSERT(!m_InsideString);
8213 
8214  BeginValue(false);
8215  m_SB.Add('[');
8216 
8217  StackItem item;
8218  item.type = COLLECTION_TYPE_ARRAY;
8219  item.valueCount = 0;
8220  item.singleLineMode = singleLine;
8221  m_Stack.push_back(item);
8222 }
8223 
8224 void VmaJsonWriter::EndArray()
8225 {
8226  VMA_ASSERT(!m_InsideString);
8227 
8228  WriteIndent(true);
8229  m_SB.Add(']');
8230 
8231  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8232  m_Stack.pop_back();
8233 }
8234 
8235 void VmaJsonWriter::WriteString(const char* pStr)
8236 {
8237  BeginString(pStr);
8238  EndString();
8239 }
8240 
8241 void VmaJsonWriter::BeginString(const char* pStr)
8242 {
8243  VMA_ASSERT(!m_InsideString);
8244 
8245  BeginValue(true);
8246  m_SB.Add('"');
8247  m_InsideString = true;
8248  if(pStr != VMA_NULL && pStr[0] != '\0')
8249  {
8250  ContinueString(pStr);
8251  }
8252 }
8253 
8254 void VmaJsonWriter::ContinueString(const char* pStr)
8255 {
8256  VMA_ASSERT(m_InsideString);
8257 
8258  const size_t strLen = strlen(pStr);
8259  for(size_t i = 0; i < strLen; ++i)
8260  {
8261  char ch = pStr[i];
8262  if(ch == '\\')
8263  {
8264  m_SB.Add("\\\\");
8265  }
8266  else if(ch == '"')
8267  {
8268  m_SB.Add("\\\"");
8269  }
8270  else if(ch >= 32)
8271  {
8272  m_SB.Add(ch);
8273  }
8274  else switch(ch)
8275  {
8276  case '\b':
8277  m_SB.Add("\\b");
8278  break;
8279  case '\f':
8280  m_SB.Add("\\f");
8281  break;
8282  case '\n':
8283  m_SB.Add("\\n");
8284  break;
8285  case '\r':
8286  m_SB.Add("\\r");
8287  break;
8288  case '\t':
8289  m_SB.Add("\\t");
8290  break;
8291  default:
8292  VMA_ASSERT(0 && "Character not currently supported.");
8293  break;
8294  }
8295  }
8296 }
8297 
8298 void VmaJsonWriter::ContinueString(uint32_t n)
8299 {
8300  VMA_ASSERT(m_InsideString);
8301  m_SB.AddNumber(n);
8302 }
8303 
8304 void VmaJsonWriter::ContinueString(uint64_t n)
8305 {
8306  VMA_ASSERT(m_InsideString);
8307  m_SB.AddNumber(n);
8308 }
8309 
8310 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8311 {
8312  VMA_ASSERT(m_InsideString);
8313  m_SB.AddPointer(ptr);
8314 }
8315 
8316 void VmaJsonWriter::EndString(const char* pStr)
8317 {
8318  VMA_ASSERT(m_InsideString);
8319  if(pStr != VMA_NULL && pStr[0] != '\0')
8320  {
8321  ContinueString(pStr);
8322  }
8323  m_SB.Add('"');
8324  m_InsideString = false;
8325 }
8326 
8327 void VmaJsonWriter::WriteNumber(uint32_t n)
8328 {
8329  VMA_ASSERT(!m_InsideString);
8330  BeginValue(false);
8331  m_SB.AddNumber(n);
8332 }
8333 
8334 void VmaJsonWriter::WriteNumber(uint64_t n)
8335 {
8336  VMA_ASSERT(!m_InsideString);
8337  BeginValue(false);
8338  m_SB.AddNumber(n);
8339 }
8340 
8341 void VmaJsonWriter::WriteBool(bool b)
8342 {
8343  VMA_ASSERT(!m_InsideString);
8344  BeginValue(false);
8345  m_SB.Add(b ? "true" : "false");
8346 }
8347 
8348 void VmaJsonWriter::WriteNull()
8349 {
8350  VMA_ASSERT(!m_InsideString);
8351  BeginValue(false);
8352  m_SB.Add("null");
8353 }
8354 
8355 void VmaJsonWriter::BeginValue(bool isString)
8356 {
8357  if(!m_Stack.empty())
8358  {
8359  StackItem& currItem = m_Stack.back();
8360  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8361  currItem.valueCount % 2 == 0)
8362  {
8363  VMA_ASSERT(isString);
8364  }
8365 
8366  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8367  currItem.valueCount % 2 != 0)
8368  {
8369  m_SB.Add(": ");
8370  }
8371  else if(currItem.valueCount > 0)
8372  {
8373  m_SB.Add(", ");
8374  WriteIndent();
8375  }
8376  else
8377  {
8378  WriteIndent();
8379  }
8380  ++currItem.valueCount;
8381  }
8382 }
8383 
8384 void VmaJsonWriter::WriteIndent(bool oneLess)
8385 {
8386  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8387  {
8388  m_SB.AddNewLine();
8389 
8390  size_t count = m_Stack.size();
8391  if(count > 0 && oneLess)
8392  {
8393  --count;
8394  }
8395  for(size_t i = 0; i < count; ++i)
8396  {
8397  m_SB.Add(INDENT);
8398  }
8399  }
8400 }
8401 
8402 #endif // #if VMA_STATS_STRING_ENABLED
8403 
8405 
8406 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8407 {
8408  if(IsUserDataString())
8409  {
8410  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8411 
8412  FreeUserDataString(hAllocator);
8413 
8414  if(pUserData != VMA_NULL)
8415  {
8416  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8417  }
8418  }
8419  else
8420  {
8421  m_pUserData = pUserData;
8422  }
8423 }
8424 
8425 void VmaAllocation_T::ChangeBlockAllocation(
8426  VmaAllocator hAllocator,
8427  VmaDeviceMemoryBlock* block,
8428  VkDeviceSize offset)
8429 {
8430  VMA_ASSERT(block != VMA_NULL);
8431  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8432 
8433  // Move mapping reference counter from old block to new block.
8434  if(block != m_BlockAllocation.m_Block)
8435  {
8436  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8437  if(IsPersistentMap())
8438  ++mapRefCount;
8439  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8440  block->Map(hAllocator, mapRefCount, VMA_NULL);
8441  }
8442 
8443  m_BlockAllocation.m_Block = block;
8444  m_BlockAllocation.m_Offset = offset;
8445 }
8446 
8447 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8448 {
8449  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8450  m_BlockAllocation.m_Offset = newOffset;
8451 }
8452 
8453 VkDeviceSize VmaAllocation_T::GetOffset() const
8454 {
8455  switch(m_Type)
8456  {
8457  case ALLOCATION_TYPE_BLOCK:
8458  return m_BlockAllocation.m_Offset;
8459  case ALLOCATION_TYPE_DEDICATED:
8460  return 0;
8461  default:
8462  VMA_ASSERT(0);
8463  return 0;
8464  }
8465 }
8466 
8467 VkDeviceMemory VmaAllocation_T::GetMemory() const
8468 {
8469  switch(m_Type)
8470  {
8471  case ALLOCATION_TYPE_BLOCK:
8472  return m_BlockAllocation.m_Block->GetDeviceMemory();
8473  case ALLOCATION_TYPE_DEDICATED:
8474  return m_DedicatedAllocation.m_hMemory;
8475  default:
8476  VMA_ASSERT(0);
8477  return VK_NULL_HANDLE;
8478  }
8479 }
8480 
8481 void* VmaAllocation_T::GetMappedData() const
8482 {
8483  switch(m_Type)
8484  {
8485  case ALLOCATION_TYPE_BLOCK:
8486  if(m_MapCount != 0)
8487  {
8488  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8489  VMA_ASSERT(pBlockData != VMA_NULL);
8490  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8491  }
8492  else
8493  {
8494  return VMA_NULL;
8495  }
8496  break;
8497  case ALLOCATION_TYPE_DEDICATED:
8498  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8499  return m_DedicatedAllocation.m_pMappedData;
8500  default:
8501  VMA_ASSERT(0);
8502  return VMA_NULL;
8503  }
8504 }
8505 
8506 bool VmaAllocation_T::CanBecomeLost() const
8507 {
8508  switch(m_Type)
8509  {
8510  case ALLOCATION_TYPE_BLOCK:
8511  return m_BlockAllocation.m_CanBecomeLost;
8512  case ALLOCATION_TYPE_DEDICATED:
8513  return false;
8514  default:
8515  VMA_ASSERT(0);
8516  return false;
8517  }
8518 }
8519 
8520 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8521 {
8522  VMA_ASSERT(CanBecomeLost());
8523 
8524  /*
8525  Warning: This is a carefully designed algorithm.
8526  Do not modify unless you really know what you're doing :)
8527  */
8528  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8529  for(;;)
8530  {
8531  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8532  {
8533  VMA_ASSERT(0);
8534  return false;
8535  }
8536  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8537  {
8538  return false;
8539  }
8540  else // Last use time earlier than current time.
8541  {
8542  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8543  {
8544  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8545  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8546  return true;
8547  }
8548  }
8549  }
8550 }
8551 
8552 #if VMA_STATS_STRING_ENABLED
8553 
8554 // Correspond to values of enum VmaSuballocationType.
8555 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8556  "FREE",
8557  "UNKNOWN",
8558  "BUFFER",
8559  "IMAGE_UNKNOWN",
8560  "IMAGE_LINEAR",
8561  "IMAGE_OPTIMAL",
8562 };
8563 
8564 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8565 {
8566  json.WriteString("Type");
8567  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8568 
8569  json.WriteString("Size");
8570  json.WriteNumber(m_Size);
8571 
8572  if(m_pUserData != VMA_NULL)
8573  {
8574  json.WriteString("UserData");
8575  if(IsUserDataString())
8576  {
8577  json.WriteString((const char*)m_pUserData);
8578  }
8579  else
8580  {
8581  json.BeginString();
8582  json.ContinueString_Pointer(m_pUserData);
8583  json.EndString();
8584  }
8585  }
8586 
8587  json.WriteString("CreationFrameIndex");
8588  json.WriteNumber(m_CreationFrameIndex);
8589 
8590  json.WriteString("LastUseFrameIndex");
8591  json.WriteNumber(GetLastUseFrameIndex());
8592 
8593  if(m_BufferImageUsage != 0)
8594  {
8595  json.WriteString("Usage");
8596  json.WriteNumber(m_BufferImageUsage);
8597  }
8598 }
8599 
8600 #endif
8601 
8602 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8603 {
8604  VMA_ASSERT(IsUserDataString());
8605  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8606  m_pUserData = VMA_NULL;
8607 }
8608 
8609 void VmaAllocation_T::BlockAllocMap()
8610 {
8611  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8612 
8613  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8614  {
8615  ++m_MapCount;
8616  }
8617  else
8618  {
8619  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8620  }
8621 }
8622 
8623 void VmaAllocation_T::BlockAllocUnmap()
8624 {
8625  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8626 
8627  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8628  {
8629  --m_MapCount;
8630  }
8631  else
8632  {
8633  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8634  }
8635 }
8636 
8637 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8638 {
8639  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8640 
8641  if(m_MapCount != 0)
8642  {
8643  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8644  {
8645  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8646  *ppData = m_DedicatedAllocation.m_pMappedData;
8647  ++m_MapCount;
8648  return VK_SUCCESS;
8649  }
8650  else
8651  {
8652  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8653  return VK_ERROR_MEMORY_MAP_FAILED;
8654  }
8655  }
8656  else
8657  {
8658  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8659  hAllocator->m_hDevice,
8660  m_DedicatedAllocation.m_hMemory,
8661  0, // offset
8662  VK_WHOLE_SIZE,
8663  0, // flags
8664  ppData);
8665  if(result == VK_SUCCESS)
8666  {
8667  m_DedicatedAllocation.m_pMappedData = *ppData;
8668  m_MapCount = 1;
8669  }
8670  return result;
8671  }
8672 }
8673 
8674 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8675 {
8676  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8677 
8678  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8679  {
8680  --m_MapCount;
8681  if(m_MapCount == 0)
8682  {
8683  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8684  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8685  hAllocator->m_hDevice,
8686  m_DedicatedAllocation.m_hMemory);
8687  }
8688  }
8689  else
8690  {
8691  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8692  }
8693 }
8694 
8695 #if VMA_STATS_STRING_ENABLED
8696 
8697 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8698 {
8699  json.BeginObject();
8700 
8701  json.WriteString("Blocks");
8702  json.WriteNumber(stat.blockCount);
8703 
8704  json.WriteString("Allocations");
8705  json.WriteNumber(stat.allocationCount);
8706 
8707  json.WriteString("UnusedRanges");
8708  json.WriteNumber(stat.unusedRangeCount);
8709 
8710  json.WriteString("UsedBytes");
8711  json.WriteNumber(stat.usedBytes);
8712 
8713  json.WriteString("UnusedBytes");
8714  json.WriteNumber(stat.unusedBytes);
8715 
8716  if(stat.allocationCount > 1)
8717  {
8718  json.WriteString("AllocationSize");
8719  json.BeginObject(true);
8720  json.WriteString("Min");
8721  json.WriteNumber(stat.allocationSizeMin);
8722  json.WriteString("Avg");
8723  json.WriteNumber(stat.allocationSizeAvg);
8724  json.WriteString("Max");
8725  json.WriteNumber(stat.allocationSizeMax);
8726  json.EndObject();
8727  }
8728 
8729  if(stat.unusedRangeCount > 1)
8730  {
8731  json.WriteString("UnusedRangeSize");
8732  json.BeginObject(true);
8733  json.WriteString("Min");
8734  json.WriteNumber(stat.unusedRangeSizeMin);
8735  json.WriteString("Avg");
8736  json.WriteNumber(stat.unusedRangeSizeAvg);
8737  json.WriteString("Max");
8738  json.WriteNumber(stat.unusedRangeSizeMax);
8739  json.EndObject();
8740  }
8741 
8742  json.EndObject();
8743 }
8744 
8745 #endif // #if VMA_STATS_STRING_ENABLED
8746 
8747 struct VmaSuballocationItemSizeLess
8748 {
8749  bool operator()(
8750  const VmaSuballocationList::iterator lhs,
8751  const VmaSuballocationList::iterator rhs) const
8752  {
8753  return lhs->size < rhs->size;
8754  }
8755  bool operator()(
8756  const VmaSuballocationList::iterator lhs,
8757  VkDeviceSize rhsSize) const
8758  {
8759  return lhs->size < rhsSize;
8760  }
8761 };
8762 
8763 
8765 // class VmaBlockMetadata
8766 
8767 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8768  m_Size(0),
8769  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8770 {
8771 }
8772 
8773 #if VMA_STATS_STRING_ENABLED
8774 
8775 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8776  VkDeviceSize unusedBytes,
8777  size_t allocationCount,
8778  size_t unusedRangeCount) const
8779 {
8780  json.BeginObject();
8781 
8782  json.WriteString("TotalBytes");
8783  json.WriteNumber(GetSize());
8784 
8785  json.WriteString("UnusedBytes");
8786  json.WriteNumber(unusedBytes);
8787 
8788  json.WriteString("Allocations");
8789  json.WriteNumber((uint64_t)allocationCount);
8790 
8791  json.WriteString("UnusedRanges");
8792  json.WriteNumber((uint64_t)unusedRangeCount);
8793 
8794  json.WriteString("Suballocations");
8795  json.BeginArray();
8796 }
8797 
8798 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8799  VkDeviceSize offset,
8800  VmaAllocation hAllocation) const
8801 {
8802  json.BeginObject(true);
8803 
8804  json.WriteString("Offset");
8805  json.WriteNumber(offset);
8806 
8807  hAllocation->PrintParameters(json);
8808 
8809  json.EndObject();
8810 }
8811 
8812 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8813  VkDeviceSize offset,
8814  VkDeviceSize size) const
8815 {
8816  json.BeginObject(true);
8817 
8818  json.WriteString("Offset");
8819  json.WriteNumber(offset);
8820 
8821  json.WriteString("Type");
8822  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8823 
8824  json.WriteString("Size");
8825  json.WriteNumber(size);
8826 
8827  json.EndObject();
8828 }
8829 
8830 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8831 {
8832  json.EndArray();
8833  json.EndObject();
8834 }
8835 
8836 #endif // #if VMA_STATS_STRING_ENABLED
8837 
8839 // class VmaBlockMetadata_Generic
8840 
8841 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8842  VmaBlockMetadata(hAllocator),
8843  m_FreeCount(0),
8844  m_SumFreeSize(0),
8845  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8846  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8847 {
8848 }
8849 
8850 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8851 {
8852 }
8853 
8854 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8855 {
8856  VmaBlockMetadata::Init(size);
8857 
8858  m_FreeCount = 1;
8859  m_SumFreeSize = size;
8860 
8861  VmaSuballocation suballoc = {};
8862  suballoc.offset = 0;
8863  suballoc.size = size;
8864  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8865  suballoc.hAllocation = VK_NULL_HANDLE;
8866 
8867  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8868  m_Suballocations.push_back(suballoc);
8869  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8870  --suballocItem;
8871  m_FreeSuballocationsBySize.push_back(suballocItem);
8872 }
8873 
8874 bool VmaBlockMetadata_Generic::Validate() const
8875 {
8876  VMA_VALIDATE(!m_Suballocations.empty());
8877 
8878  // Expected offset of new suballocation as calculated from previous ones.
8879  VkDeviceSize calculatedOffset = 0;
8880  // Expected number of free suballocations as calculated from traversing their list.
8881  uint32_t calculatedFreeCount = 0;
8882  // Expected sum size of free suballocations as calculated from traversing their list.
8883  VkDeviceSize calculatedSumFreeSize = 0;
8884  // Expected number of free suballocations that should be registered in
8885  // m_FreeSuballocationsBySize calculated from traversing their list.
8886  size_t freeSuballocationsToRegister = 0;
8887  // True if previous visited suballocation was free.
8888  bool prevFree = false;
8889 
8890  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8891  suballocItem != m_Suballocations.cend();
8892  ++suballocItem)
8893  {
8894  const VmaSuballocation& subAlloc = *suballocItem;
8895 
8896  // Actual offset of this suballocation doesn't match expected one.
8897  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8898 
8899  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8900  // Two adjacent free suballocations are invalid. They should be merged.
8901  VMA_VALIDATE(!prevFree || !currFree);
8902 
8903  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8904 
8905  if(currFree)
8906  {
8907  calculatedSumFreeSize += subAlloc.size;
8908  ++calculatedFreeCount;
8909  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8910  {
8911  ++freeSuballocationsToRegister;
8912  }
8913 
8914  // Margin required between allocations - every free space must be at least that large.
8915  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8916  }
8917  else
8918  {
8919  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8920  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8921 
8922  // Margin required between allocations - previous allocation must be free.
8923  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8924  }
8925 
8926  calculatedOffset += subAlloc.size;
8927  prevFree = currFree;
8928  }
8929 
8930  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8931  // match expected one.
8932  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8933 
8934  VkDeviceSize lastSize = 0;
8935  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8936  {
8937  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8938 
8939  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8940  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8941  // They must be sorted by size ascending.
8942  VMA_VALIDATE(suballocItem->size >= lastSize);
8943 
8944  lastSize = suballocItem->size;
8945  }
8946 
8947  // Check if totals match calculacted values.
8948  VMA_VALIDATE(ValidateFreeSuballocationList());
8949  VMA_VALIDATE(calculatedOffset == GetSize());
8950  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8951  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8952 
8953  return true;
8954 }
8955 
8956 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8957 {
8958  if(!m_FreeSuballocationsBySize.empty())
8959  {
8960  return m_FreeSuballocationsBySize.back()->size;
8961  }
8962  else
8963  {
8964  return 0;
8965  }
8966 }
8967 
8968 bool VmaBlockMetadata_Generic::IsEmpty() const
8969 {
8970  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8971 }
8972 
8973 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8974 {
8975  outInfo.blockCount = 1;
8976 
8977  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8978  outInfo.allocationCount = rangeCount - m_FreeCount;
8979  outInfo.unusedRangeCount = m_FreeCount;
8980 
8981  outInfo.unusedBytes = m_SumFreeSize;
8982  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8983 
8984  outInfo.allocationSizeMin = UINT64_MAX;
8985  outInfo.allocationSizeMax = 0;
8986  outInfo.unusedRangeSizeMin = UINT64_MAX;
8987  outInfo.unusedRangeSizeMax = 0;
8988 
8989  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8990  suballocItem != m_Suballocations.cend();
8991  ++suballocItem)
8992  {
8993  const VmaSuballocation& suballoc = *suballocItem;
8994  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8995  {
8996  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8997  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8998  }
8999  else
9000  {
9001  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9002  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9003  }
9004  }
9005 }
9006 
9007 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9008 {
9009  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9010 
9011  inoutStats.size += GetSize();
9012  inoutStats.unusedSize += m_SumFreeSize;
9013  inoutStats.allocationCount += rangeCount - m_FreeCount;
9014  inoutStats.unusedRangeCount += m_FreeCount;
9015  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9016 }
9017 
9018 #if VMA_STATS_STRING_ENABLED
9019 
9020 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9021 {
9022  PrintDetailedMap_Begin(json,
9023  m_SumFreeSize, // unusedBytes
9024  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9025  m_FreeCount); // unusedRangeCount
9026 
9027  size_t i = 0;
9028  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9029  suballocItem != m_Suballocations.cend();
9030  ++suballocItem, ++i)
9031  {
9032  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9033  {
9034  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
9035  }
9036  else
9037  {
9038  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
9039  }
9040  }
9041 
9042  PrintDetailedMap_End(json);
9043 }
9044 
9045 #endif // #if VMA_STATS_STRING_ENABLED
9046 
9047 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9048  uint32_t currentFrameIndex,
9049  uint32_t frameInUseCount,
9050  VkDeviceSize bufferImageGranularity,
9051  VkDeviceSize allocSize,
9052  VkDeviceSize allocAlignment,
9053  bool upperAddress,
9054  VmaSuballocationType allocType,
9055  bool canMakeOtherLost,
9056  uint32_t strategy,
9057  VmaAllocationRequest* pAllocationRequest)
9058 {
9059  VMA_ASSERT(allocSize > 0);
9060  VMA_ASSERT(!upperAddress);
9061  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9062  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9063  VMA_HEAVY_ASSERT(Validate());
9064 
9065  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9066 
9067  // There is not enough total free space in this block to fullfill the request: Early return.
9068  if(canMakeOtherLost == false &&
9069  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9070  {
9071  return false;
9072  }
9073 
9074  // New algorithm, efficiently searching freeSuballocationsBySize.
9075  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9076  if(freeSuballocCount > 0)
9077  {
9079  {
9080  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9081  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9082  m_FreeSuballocationsBySize.data(),
9083  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9084  allocSize + 2 * VMA_DEBUG_MARGIN,
9085  VmaSuballocationItemSizeLess());
9086  size_t index = it - m_FreeSuballocationsBySize.data();
9087  for(; index < freeSuballocCount; ++index)
9088  {
9089  if(CheckAllocation(
9090  currentFrameIndex,
9091  frameInUseCount,
9092  bufferImageGranularity,
9093  allocSize,
9094  allocAlignment,
9095  allocType,
9096  m_FreeSuballocationsBySize[index],
9097  false, // canMakeOtherLost
9098  &pAllocationRequest->offset,
9099  &pAllocationRequest->itemsToMakeLostCount,
9100  &pAllocationRequest->sumFreeSize,
9101  &pAllocationRequest->sumItemSize))
9102  {
9103  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9104  return true;
9105  }
9106  }
9107  }
9108  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9109  {
9110  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9111  it != m_Suballocations.end();
9112  ++it)
9113  {
9114  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9115  currentFrameIndex,
9116  frameInUseCount,
9117  bufferImageGranularity,
9118  allocSize,
9119  allocAlignment,
9120  allocType,
9121  it,
9122  false, // canMakeOtherLost
9123  &pAllocationRequest->offset,
9124  &pAllocationRequest->itemsToMakeLostCount,
9125  &pAllocationRequest->sumFreeSize,
9126  &pAllocationRequest->sumItemSize))
9127  {
9128  pAllocationRequest->item = it;
9129  return true;
9130  }
9131  }
9132  }
9133  else // WORST_FIT, FIRST_FIT
9134  {
9135  // Search staring from biggest suballocations.
9136  for(size_t index = freeSuballocCount; index--; )
9137  {
9138  if(CheckAllocation(
9139  currentFrameIndex,
9140  frameInUseCount,
9141  bufferImageGranularity,
9142  allocSize,
9143  allocAlignment,
9144  allocType,
9145  m_FreeSuballocationsBySize[index],
9146  false, // canMakeOtherLost
9147  &pAllocationRequest->offset,
9148  &pAllocationRequest->itemsToMakeLostCount,
9149  &pAllocationRequest->sumFreeSize,
9150  &pAllocationRequest->sumItemSize))
9151  {
9152  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9153  return true;
9154  }
9155  }
9156  }
9157  }
9158 
9159  if(canMakeOtherLost)
9160  {
9161  // Brute-force algorithm. TODO: Come up with something better.
9162 
9163  bool found = false;
9164  VmaAllocationRequest tmpAllocRequest = {};
9165  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9166  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9167  suballocIt != m_Suballocations.end();
9168  ++suballocIt)
9169  {
9170  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9171  suballocIt->hAllocation->CanBecomeLost())
9172  {
9173  if(CheckAllocation(
9174  currentFrameIndex,
9175  frameInUseCount,
9176  bufferImageGranularity,
9177  allocSize,
9178  allocAlignment,
9179  allocType,
9180  suballocIt,
9181  canMakeOtherLost,
9182  &tmpAllocRequest.offset,
9183  &tmpAllocRequest.itemsToMakeLostCount,
9184  &tmpAllocRequest.sumFreeSize,
9185  &tmpAllocRequest.sumItemSize))
9186  {
9188  {
9189  *pAllocationRequest = tmpAllocRequest;
9190  pAllocationRequest->item = suballocIt;
9191  break;
9192  }
9193  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9194  {
9195  *pAllocationRequest = tmpAllocRequest;
9196  pAllocationRequest->item = suballocIt;
9197  found = true;
9198  }
9199  }
9200  }
9201  }
9202 
9203  return found;
9204  }
9205 
9206  return false;
9207 }
9208 
9209 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9210  uint32_t currentFrameIndex,
9211  uint32_t frameInUseCount,
9212  VmaAllocationRequest* pAllocationRequest)
9213 {
9214  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9215 
9216  while(pAllocationRequest->itemsToMakeLostCount > 0)
9217  {
9218  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9219  {
9220  ++pAllocationRequest->item;
9221  }
9222  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9223  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9224  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9225  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9226  {
9227  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9228  --pAllocationRequest->itemsToMakeLostCount;
9229  }
9230  else
9231  {
9232  return false;
9233  }
9234  }
9235 
9236  VMA_HEAVY_ASSERT(Validate());
9237  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9238  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9239 
9240  return true;
9241 }
9242 
9243 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9244 {
9245  uint32_t lostAllocationCount = 0;
9246  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9247  it != m_Suballocations.end();
9248  ++it)
9249  {
9250  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9251  it->hAllocation->CanBecomeLost() &&
9252  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9253  {
9254  it = FreeSuballocation(it);
9255  ++lostAllocationCount;
9256  }
9257  }
9258  return lostAllocationCount;
9259 }
9260 
9261 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9262 {
9263  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9264  it != m_Suballocations.end();
9265  ++it)
9266  {
9267  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
9268  {
9269  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9270  {
9271  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9272  return VK_ERROR_VALIDATION_FAILED_EXT;
9273  }
9274  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9275  {
9276  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9277  return VK_ERROR_VALIDATION_FAILED_EXT;
9278  }
9279  }
9280  }
9281 
9282  return VK_SUCCESS;
9283 }
9284 
9285 void VmaBlockMetadata_Generic::Alloc(
9286  const VmaAllocationRequest& request,
9287  VmaSuballocationType type,
9288  VkDeviceSize allocSize,
9289  VmaAllocation hAllocation)
9290 {
9291  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9292  VMA_ASSERT(request.item != m_Suballocations.end());
9293  VmaSuballocation& suballoc = *request.item;
9294  // Given suballocation is a free block.
9295  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9296  // Given offset is inside this suballocation.
9297  VMA_ASSERT(request.offset >= suballoc.offset);
9298  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9299  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9300  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9301 
9302  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9303  // it to become used.
9304  UnregisterFreeSuballocation(request.item);
9305 
9306  suballoc.offset = request.offset;
9307  suballoc.size = allocSize;
9308  suballoc.type = type;
9309  suballoc.hAllocation = hAllocation;
9310 
9311  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9312  if(paddingEnd)
9313  {
9314  VmaSuballocation paddingSuballoc = {};
9315  paddingSuballoc.offset = request.offset + allocSize;
9316  paddingSuballoc.size = paddingEnd;
9317  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9318  VmaSuballocationList::iterator next = request.item;
9319  ++next;
9320  const VmaSuballocationList::iterator paddingEndItem =
9321  m_Suballocations.insert(next, paddingSuballoc);
9322  RegisterFreeSuballocation(paddingEndItem);
9323  }
9324 
9325  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9326  if(paddingBegin)
9327  {
9328  VmaSuballocation paddingSuballoc = {};
9329  paddingSuballoc.offset = request.offset - paddingBegin;
9330  paddingSuballoc.size = paddingBegin;
9331  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9332  const VmaSuballocationList::iterator paddingBeginItem =
9333  m_Suballocations.insert(request.item, paddingSuballoc);
9334  RegisterFreeSuballocation(paddingBeginItem);
9335  }
9336 
9337  // Update totals.
9338  m_FreeCount = m_FreeCount - 1;
9339  if(paddingBegin > 0)
9340  {
9341  ++m_FreeCount;
9342  }
9343  if(paddingEnd > 0)
9344  {
9345  ++m_FreeCount;
9346  }
9347  m_SumFreeSize -= allocSize;
9348 }
9349 
9350 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9351 {
9352  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9353  suballocItem != m_Suballocations.end();
9354  ++suballocItem)
9355  {
9356  VmaSuballocation& suballoc = *suballocItem;
9357  if(suballoc.hAllocation == allocation)
9358  {
9359  FreeSuballocation(suballocItem);
9360  VMA_HEAVY_ASSERT(Validate());
9361  return;
9362  }
9363  }
9364  VMA_ASSERT(0 && "Not found!");
9365 }
9366 
9367 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9368 {
9369  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9370  suballocItem != m_Suballocations.end();
9371  ++suballocItem)
9372  {
9373  VmaSuballocation& suballoc = *suballocItem;
9374  if(suballoc.offset == offset)
9375  {
9376  FreeSuballocation(suballocItem);
9377  return;
9378  }
9379  }
9380  VMA_ASSERT(0 && "Not found!");
9381 }
9382 
9383 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9384 {
9385  VkDeviceSize lastSize = 0;
9386  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9387  {
9388  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9389 
9390  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9391  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9392  VMA_VALIDATE(it->size >= lastSize);
9393  lastSize = it->size;
9394  }
9395  return true;
9396 }
9397 
9398 bool VmaBlockMetadata_Generic::CheckAllocation(
9399  uint32_t currentFrameIndex,
9400  uint32_t frameInUseCount,
9401  VkDeviceSize bufferImageGranularity,
9402  VkDeviceSize allocSize,
9403  VkDeviceSize allocAlignment,
9404  VmaSuballocationType allocType,
9405  VmaSuballocationList::const_iterator suballocItem,
9406  bool canMakeOtherLost,
9407  VkDeviceSize* pOffset,
9408  size_t* itemsToMakeLostCount,
9409  VkDeviceSize* pSumFreeSize,
9410  VkDeviceSize* pSumItemSize) const
9411 {
9412  VMA_ASSERT(allocSize > 0);
9413  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9414  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9415  VMA_ASSERT(pOffset != VMA_NULL);
9416 
9417  *itemsToMakeLostCount = 0;
9418  *pSumFreeSize = 0;
9419  *pSumItemSize = 0;
9420 
9421  if(canMakeOtherLost)
9422  {
9423  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9424  {
9425  *pSumFreeSize = suballocItem->size;
9426  }
9427  else
9428  {
9429  if(suballocItem->hAllocation->CanBecomeLost() &&
9430  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9431  {
9432  ++*itemsToMakeLostCount;
9433  *pSumItemSize = suballocItem->size;
9434  }
9435  else
9436  {
9437  return false;
9438  }
9439  }
9440 
9441  // Remaining size is too small for this request: Early return.
9442  if(GetSize() - suballocItem->offset < allocSize)
9443  {
9444  return false;
9445  }
9446 
9447  // Start from offset equal to beginning of this suballocation.
9448  *pOffset = suballocItem->offset;
9449 
9450  // Apply VMA_DEBUG_MARGIN at the beginning.
9451  if(VMA_DEBUG_MARGIN > 0)
9452  {
9453  *pOffset += VMA_DEBUG_MARGIN;
9454  }
9455 
9456  // Apply alignment.
9457  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9458 
9459  // Check previous suballocations for BufferImageGranularity conflicts.
9460  // Make bigger alignment if necessary.
9461  if(bufferImageGranularity > 1)
9462  {
9463  bool bufferImageGranularityConflict = false;
9464  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9465  while(prevSuballocItem != m_Suballocations.cbegin())
9466  {
9467  --prevSuballocItem;
9468  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9469  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9470  {
9471  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9472  {
9473  bufferImageGranularityConflict = true;
9474  break;
9475  }
9476  }
9477  else
9478  // Already on previous page.
9479  break;
9480  }
9481  if(bufferImageGranularityConflict)
9482  {
9483  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9484  }
9485  }
9486 
9487  // Now that we have final *pOffset, check if we are past suballocItem.
9488  // If yes, return false - this function should be called for another suballocItem as starting point.
9489  if(*pOffset >= suballocItem->offset + suballocItem->size)
9490  {
9491  return false;
9492  }
9493 
9494  // Calculate padding at the beginning based on current offset.
9495  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9496 
9497  // Calculate required margin at the end.
9498  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9499 
9500  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9501  // Another early return check.
9502  if(suballocItem->offset + totalSize > GetSize())
9503  {
9504  return false;
9505  }
9506 
9507  // Advance lastSuballocItem until desired size is reached.
9508  // Update itemsToMakeLostCount.
9509  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9510  if(totalSize > suballocItem->size)
9511  {
9512  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9513  while(remainingSize > 0)
9514  {
9515  ++lastSuballocItem;
9516  if(lastSuballocItem == m_Suballocations.cend())
9517  {
9518  return false;
9519  }
9520  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9521  {
9522  *pSumFreeSize += lastSuballocItem->size;
9523  }
9524  else
9525  {
9526  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9527  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9528  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9529  {
9530  ++*itemsToMakeLostCount;
9531  *pSumItemSize += lastSuballocItem->size;
9532  }
9533  else
9534  {
9535  return false;
9536  }
9537  }
9538  remainingSize = (lastSuballocItem->size < remainingSize) ?
9539  remainingSize - lastSuballocItem->size : 0;
9540  }
9541  }
9542 
9543  // Check next suballocations for BufferImageGranularity conflicts.
9544  // If conflict exists, we must mark more allocations lost or fail.
9545  if(bufferImageGranularity > 1)
9546  {
9547  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9548  ++nextSuballocItem;
9549  while(nextSuballocItem != m_Suballocations.cend())
9550  {
9551  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9552  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9553  {
9554  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9555  {
9556  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9557  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9558  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9559  {
9560  ++*itemsToMakeLostCount;
9561  }
9562  else
9563  {
9564  return false;
9565  }
9566  }
9567  }
9568  else
9569  {
9570  // Already on next page.
9571  break;
9572  }
9573  ++nextSuballocItem;
9574  }
9575  }
9576  }
9577  else
9578  {
9579  const VmaSuballocation& suballoc = *suballocItem;
9580  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9581 
9582  *pSumFreeSize = suballoc.size;
9583 
9584  // Size of this suballocation is too small for this request: Early return.
9585  if(suballoc.size < allocSize)
9586  {
9587  return false;
9588  }
9589 
9590  // Start from offset equal to beginning of this suballocation.
9591  *pOffset = suballoc.offset;
9592 
9593  // Apply VMA_DEBUG_MARGIN at the beginning.
9594  if(VMA_DEBUG_MARGIN > 0)
9595  {
9596  *pOffset += VMA_DEBUG_MARGIN;
9597  }
9598 
9599  // Apply alignment.
9600  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9601 
9602  // Check previous suballocations for BufferImageGranularity conflicts.
9603  // Make bigger alignment if necessary.
9604  if(bufferImageGranularity > 1)
9605  {
9606  bool bufferImageGranularityConflict = false;
9607  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9608  while(prevSuballocItem != m_Suballocations.cbegin())
9609  {
9610  --prevSuballocItem;
9611  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9612  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9613  {
9614  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9615  {
9616  bufferImageGranularityConflict = true;
9617  break;
9618  }
9619  }
9620  else
9621  // Already on previous page.
9622  break;
9623  }
9624  if(bufferImageGranularityConflict)
9625  {
9626  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9627  }
9628  }
9629 
9630  // Calculate padding at the beginning based on current offset.
9631  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9632 
9633  // Calculate required margin at the end.
9634  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9635 
9636  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9637  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9638  {
9639  return false;
9640  }
9641 
9642  // Check next suballocations for BufferImageGranularity conflicts.
9643  // If conflict exists, allocation cannot be made here.
9644  if(bufferImageGranularity > 1)
9645  {
9646  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9647  ++nextSuballocItem;
9648  while(nextSuballocItem != m_Suballocations.cend())
9649  {
9650  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9651  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9652  {
9653  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9654  {
9655  return false;
9656  }
9657  }
9658  else
9659  {
9660  // Already on next page.
9661  break;
9662  }
9663  ++nextSuballocItem;
9664  }
9665  }
9666  }
9667 
9668  // All tests passed: Success. pOffset is already filled.
9669  return true;
9670 }
9671 
9672 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9673 {
9674  VMA_ASSERT(item != m_Suballocations.end());
9675  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9676 
9677  VmaSuballocationList::iterator nextItem = item;
9678  ++nextItem;
9679  VMA_ASSERT(nextItem != m_Suballocations.end());
9680  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9681 
9682  item->size += nextItem->size;
9683  --m_FreeCount;
9684  m_Suballocations.erase(nextItem);
9685 }
9686 
9687 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9688 {
9689  // Change this suballocation to be marked as free.
9690  VmaSuballocation& suballoc = *suballocItem;
9691  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9692  suballoc.hAllocation = VK_NULL_HANDLE;
9693 
9694  // Update totals.
9695  ++m_FreeCount;
9696  m_SumFreeSize += suballoc.size;
9697 
9698  // Merge with previous and/or next suballocation if it's also free.
9699  bool mergeWithNext = false;
9700  bool mergeWithPrev = false;
9701 
9702  VmaSuballocationList::iterator nextItem = suballocItem;
9703  ++nextItem;
9704  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9705  {
9706  mergeWithNext = true;
9707  }
9708 
9709  VmaSuballocationList::iterator prevItem = suballocItem;
9710  if(suballocItem != m_Suballocations.begin())
9711  {
9712  --prevItem;
9713  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9714  {
9715  mergeWithPrev = true;
9716  }
9717  }
9718 
9719  if(mergeWithNext)
9720  {
9721  UnregisterFreeSuballocation(nextItem);
9722  MergeFreeWithNext(suballocItem);
9723  }
9724 
9725  if(mergeWithPrev)
9726  {
9727  UnregisterFreeSuballocation(prevItem);
9728  MergeFreeWithNext(prevItem);
9729  RegisterFreeSuballocation(prevItem);
9730  return prevItem;
9731  }
9732  else
9733  {
9734  RegisterFreeSuballocation(suballocItem);
9735  return suballocItem;
9736  }
9737 }
9738 
9739 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9740 {
9741  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9742  VMA_ASSERT(item->size > 0);
9743 
9744  // You may want to enable this validation at the beginning or at the end of
9745  // this function, depending on what do you want to check.
9746  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9747 
9748  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9749  {
9750  if(m_FreeSuballocationsBySize.empty())
9751  {
9752  m_FreeSuballocationsBySize.push_back(item);
9753  }
9754  else
9755  {
9756  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9757  }
9758  }
9759 
9760  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9761 }
9762 
9763 
9764 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9765 {
9766  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9767  VMA_ASSERT(item->size > 0);
9768 
9769  // You may want to enable this validation at the beginning or at the end of
9770  // this function, depending on what do you want to check.
9771  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9772 
9773  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9774  {
9775  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9776  m_FreeSuballocationsBySize.data(),
9777  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9778  item,
9779  VmaSuballocationItemSizeLess());
9780  for(size_t index = it - m_FreeSuballocationsBySize.data();
9781  index < m_FreeSuballocationsBySize.size();
9782  ++index)
9783  {
9784  if(m_FreeSuballocationsBySize[index] == item)
9785  {
9786  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9787  return;
9788  }
9789  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9790  }
9791  VMA_ASSERT(0 && "Not found.");
9792  }
9793 
9794  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9795 }
9796 
9797 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9798  VkDeviceSize bufferImageGranularity,
9799  VmaSuballocationType& inOutPrevSuballocType) const
9800 {
9801  if(bufferImageGranularity == 1 || IsEmpty())
9802  {
9803  return false;
9804  }
9805 
9806  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9807  bool typeConflictFound = false;
9808  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9809  it != m_Suballocations.cend();
9810  ++it)
9811  {
9812  const VmaSuballocationType suballocType = it->type;
9813  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9814  {
9815  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9816  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9817  {
9818  typeConflictFound = true;
9819  }
9820  inOutPrevSuballocType = suballocType;
9821  }
9822  }
9823 
9824  return typeConflictFound || minAlignment >= bufferImageGranularity;
9825 }
9826 
9828 // class VmaBlockMetadata_Linear
9829 
9830 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9831  VmaBlockMetadata(hAllocator),
9832  m_SumFreeSize(0),
9833  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9834  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9835  m_1stVectorIndex(0),
9836  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9837  m_1stNullItemsBeginCount(0),
9838  m_1stNullItemsMiddleCount(0),
9839  m_2ndNullItemsCount(0)
9840 {
9841 }
9842 
9843 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9844 {
9845 }
9846 
9847 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9848 {
9849  VmaBlockMetadata::Init(size);
9850  m_SumFreeSize = size;
9851 }
9852 
9853 bool VmaBlockMetadata_Linear::Validate() const
9854 {
9855  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9856  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9857 
9858  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9859  VMA_VALIDATE(!suballocations1st.empty() ||
9860  suballocations2nd.empty() ||
9861  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9862 
9863  if(!suballocations1st.empty())
9864  {
9865  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9866  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9867  // Null item at the end should be just pop_back().
9868  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9869  }
9870  if(!suballocations2nd.empty())
9871  {
9872  // Null item at the end should be just pop_back().
9873  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9874  }
9875 
9876  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9877  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9878 
9879  VkDeviceSize sumUsedSize = 0;
9880  const size_t suballoc1stCount = suballocations1st.size();
9881  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9882 
9883  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9884  {
9885  const size_t suballoc2ndCount = suballocations2nd.size();
9886  size_t nullItem2ndCount = 0;
9887  for(size_t i = 0; i < suballoc2ndCount; ++i)
9888  {
9889  const VmaSuballocation& suballoc = suballocations2nd[i];
9890  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9891 
9892  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9893  VMA_VALIDATE(suballoc.offset >= offset);
9894 
9895  if(!currFree)
9896  {
9897  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9898  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9899  sumUsedSize += suballoc.size;
9900  }
9901  else
9902  {
9903  ++nullItem2ndCount;
9904  }
9905 
9906  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9907  }
9908 
9909  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9910  }
9911 
9912  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9913  {
9914  const VmaSuballocation& suballoc = suballocations1st[i];
9915  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9916  suballoc.hAllocation == VK_NULL_HANDLE);
9917  }
9918 
9919  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9920 
9921  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9922  {
9923  const VmaSuballocation& suballoc = suballocations1st[i];
9924  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9925 
9926  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9927  VMA_VALIDATE(suballoc.offset >= offset);
9928  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9929 
9930  if(!currFree)
9931  {
9932  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9933  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9934  sumUsedSize += suballoc.size;
9935  }
9936  else
9937  {
9938  ++nullItem1stCount;
9939  }
9940 
9941  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9942  }
9943  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9944 
9945  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9946  {
9947  const size_t suballoc2ndCount = suballocations2nd.size();
9948  size_t nullItem2ndCount = 0;
9949  for(size_t i = suballoc2ndCount; i--; )
9950  {
9951  const VmaSuballocation& suballoc = suballocations2nd[i];
9952  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9953 
9954  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9955  VMA_VALIDATE(suballoc.offset >= offset);
9956 
9957  if(!currFree)
9958  {
9959  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9960  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9961  sumUsedSize += suballoc.size;
9962  }
9963  else
9964  {
9965  ++nullItem2ndCount;
9966  }
9967 
9968  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9969  }
9970 
9971  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9972  }
9973 
9974  VMA_VALIDATE(offset <= GetSize());
9975  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9976 
9977  return true;
9978 }
9979 
9980 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9981 {
9982  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9983  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9984 }
9985 
9986 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9987 {
9988  const VkDeviceSize size = GetSize();
9989 
9990  /*
9991  We don't consider gaps inside allocation vectors with freed allocations because
9992  they are not suitable for reuse in linear allocator. We consider only space that
9993  is available for new allocations.
9994  */
9995  if(IsEmpty())
9996  {
9997  return size;
9998  }
9999 
10000  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10001 
10002  switch(m_2ndVectorMode)
10003  {
10004  case SECOND_VECTOR_EMPTY:
10005  /*
10006  Available space is after end of 1st, as well as before beginning of 1st (which
10007  whould make it a ring buffer).
10008  */
10009  {
10010  const size_t suballocations1stCount = suballocations1st.size();
10011  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10012  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10013  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10014  return VMA_MAX(
10015  firstSuballoc.offset,
10016  size - (lastSuballoc.offset + lastSuballoc.size));
10017  }
10018  break;
10019 
10020  case SECOND_VECTOR_RING_BUFFER:
10021  /*
10022  Available space is only between end of 2nd and beginning of 1st.
10023  */
10024  {
10025  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10026  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10027  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10028  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10029  }
10030  break;
10031 
10032  case SECOND_VECTOR_DOUBLE_STACK:
10033  /*
10034  Available space is only between end of 1st and top of 2nd.
10035  */
10036  {
10037  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10038  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10039  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10040  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10041  }
10042  break;
10043 
10044  default:
10045  VMA_ASSERT(0);
10046  return 0;
10047  }
10048 }
10049 
10050 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10051 {
10052  const VkDeviceSize size = GetSize();
10053  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10054  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10055  const size_t suballoc1stCount = suballocations1st.size();
10056  const size_t suballoc2ndCount = suballocations2nd.size();
10057 
10058  outInfo.blockCount = 1;
10059  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10060  outInfo.unusedRangeCount = 0;
10061  outInfo.usedBytes = 0;
10062  outInfo.allocationSizeMin = UINT64_MAX;
10063  outInfo.allocationSizeMax = 0;
10064  outInfo.unusedRangeSizeMin = UINT64_MAX;
10065  outInfo.unusedRangeSizeMax = 0;
10066 
10067  VkDeviceSize lastOffset = 0;
10068 
10069  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10070  {
10071  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10072  size_t nextAlloc2ndIndex = 0;
10073  while(lastOffset < freeSpace2ndTo1stEnd)
10074  {
10075  // Find next non-null allocation or move nextAllocIndex to the end.
10076  while(nextAlloc2ndIndex < suballoc2ndCount &&
10077  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10078  {
10079  ++nextAlloc2ndIndex;
10080  }
10081 
10082  // Found non-null allocation.
10083  if(nextAlloc2ndIndex < suballoc2ndCount)
10084  {
10085  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10086 
10087  // 1. Process free space before this allocation.
10088  if(lastOffset < suballoc.offset)
10089  {
10090  // There is free space from lastOffset to suballoc.offset.
10091  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10092  ++outInfo.unusedRangeCount;
10093  outInfo.unusedBytes += unusedRangeSize;
10094  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10095  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10096  }
10097 
10098  // 2. Process this allocation.
10099  // There is allocation with suballoc.offset, suballoc.size.
10100  outInfo.usedBytes += suballoc.size;
10101  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10102  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10103 
10104  // 3. Prepare for next iteration.
10105  lastOffset = suballoc.offset + suballoc.size;
10106  ++nextAlloc2ndIndex;
10107  }
10108  // We are at the end.
10109  else
10110  {
10111  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10112  if(lastOffset < freeSpace2ndTo1stEnd)
10113  {
10114  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10115  ++outInfo.unusedRangeCount;
10116  outInfo.unusedBytes += unusedRangeSize;
10117  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10118  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10119  }
10120 
10121  // End of loop.
10122  lastOffset = freeSpace2ndTo1stEnd;
10123  }
10124  }
10125  }
10126 
10127  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10128  const VkDeviceSize freeSpace1stTo2ndEnd =
10129  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10130  while(lastOffset < freeSpace1stTo2ndEnd)
10131  {
10132  // Find next non-null allocation or move nextAllocIndex to the end.
10133  while(nextAlloc1stIndex < suballoc1stCount &&
10134  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10135  {
10136  ++nextAlloc1stIndex;
10137  }
10138 
10139  // Found non-null allocation.
10140  if(nextAlloc1stIndex < suballoc1stCount)
10141  {
10142  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10143 
10144  // 1. Process free space before this allocation.
10145  if(lastOffset < suballoc.offset)
10146  {
10147  // There is free space from lastOffset to suballoc.offset.
10148  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10149  ++outInfo.unusedRangeCount;
10150  outInfo.unusedBytes += unusedRangeSize;
10151  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10152  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10153  }
10154 
10155  // 2. Process this allocation.
10156  // There is allocation with suballoc.offset, suballoc.size.
10157  outInfo.usedBytes += suballoc.size;
10158  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10159  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10160 
10161  // 3. Prepare for next iteration.
10162  lastOffset = suballoc.offset + suballoc.size;
10163  ++nextAlloc1stIndex;
10164  }
10165  // We are at the end.
10166  else
10167  {
10168  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10169  if(lastOffset < freeSpace1stTo2ndEnd)
10170  {
10171  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10172  ++outInfo.unusedRangeCount;
10173  outInfo.unusedBytes += unusedRangeSize;
10174  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10175  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10176  }
10177 
10178  // End of loop.
10179  lastOffset = freeSpace1stTo2ndEnd;
10180  }
10181  }
10182 
10183  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10184  {
10185  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10186  while(lastOffset < size)
10187  {
10188  // Find next non-null allocation or move nextAllocIndex to the end.
10189  while(nextAlloc2ndIndex != SIZE_MAX &&
10190  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10191  {
10192  --nextAlloc2ndIndex;
10193  }
10194 
10195  // Found non-null allocation.
10196  if(nextAlloc2ndIndex != SIZE_MAX)
10197  {
10198  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10199 
10200  // 1. Process free space before this allocation.
10201  if(lastOffset < suballoc.offset)
10202  {
10203  // There is free space from lastOffset to suballoc.offset.
10204  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10205  ++outInfo.unusedRangeCount;
10206  outInfo.unusedBytes += unusedRangeSize;
10207  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10208  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10209  }
10210 
10211  // 2. Process this allocation.
10212  // There is allocation with suballoc.offset, suballoc.size.
10213  outInfo.usedBytes += suballoc.size;
10214  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10215  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10216 
10217  // 3. Prepare for next iteration.
10218  lastOffset = suballoc.offset + suballoc.size;
10219  --nextAlloc2ndIndex;
10220  }
10221  // We are at the end.
10222  else
10223  {
10224  // There is free space from lastOffset to size.
10225  if(lastOffset < size)
10226  {
10227  const VkDeviceSize unusedRangeSize = size - lastOffset;
10228  ++outInfo.unusedRangeCount;
10229  outInfo.unusedBytes += unusedRangeSize;
10230  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10231  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10232  }
10233 
10234  // End of loop.
10235  lastOffset = size;
10236  }
10237  }
10238  }
10239 
10240  outInfo.unusedBytes = size - outInfo.usedBytes;
10241 }
10242 
10243 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10244 {
10245  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10246  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10247  const VkDeviceSize size = GetSize();
10248  const size_t suballoc1stCount = suballocations1st.size();
10249  const size_t suballoc2ndCount = suballocations2nd.size();
10250 
10251  inoutStats.size += size;
10252 
10253  VkDeviceSize lastOffset = 0;
10254 
10255  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10256  {
10257  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10258  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10259  while(lastOffset < freeSpace2ndTo1stEnd)
10260  {
10261  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10262  while(nextAlloc2ndIndex < suballoc2ndCount &&
10263  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10264  {
10265  ++nextAlloc2ndIndex;
10266  }
10267 
10268  // Found non-null allocation.
10269  if(nextAlloc2ndIndex < suballoc2ndCount)
10270  {
10271  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10272 
10273  // 1. Process free space before this allocation.
10274  if(lastOffset < suballoc.offset)
10275  {
10276  // There is free space from lastOffset to suballoc.offset.
10277  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10278  inoutStats.unusedSize += unusedRangeSize;
10279  ++inoutStats.unusedRangeCount;
10280  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10281  }
10282 
10283  // 2. Process this allocation.
10284  // There is allocation with suballoc.offset, suballoc.size.
10285  ++inoutStats.allocationCount;
10286 
10287  // 3. Prepare for next iteration.
10288  lastOffset = suballoc.offset + suballoc.size;
10289  ++nextAlloc2ndIndex;
10290  }
10291  // We are at the end.
10292  else
10293  {
10294  if(lastOffset < freeSpace2ndTo1stEnd)
10295  {
10296  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10297  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10298  inoutStats.unusedSize += unusedRangeSize;
10299  ++inoutStats.unusedRangeCount;
10300  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10301  }
10302 
10303  // End of loop.
10304  lastOffset = freeSpace2ndTo1stEnd;
10305  }
10306  }
10307  }
10308 
10309  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10310  const VkDeviceSize freeSpace1stTo2ndEnd =
10311  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10312  while(lastOffset < freeSpace1stTo2ndEnd)
10313  {
10314  // Find next non-null allocation or move nextAllocIndex to the end.
10315  while(nextAlloc1stIndex < suballoc1stCount &&
10316  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10317  {
10318  ++nextAlloc1stIndex;
10319  }
10320 
10321  // Found non-null allocation.
10322  if(nextAlloc1stIndex < suballoc1stCount)
10323  {
10324  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10325 
10326  // 1. Process free space before this allocation.
10327  if(lastOffset < suballoc.offset)
10328  {
10329  // There is free space from lastOffset to suballoc.offset.
10330  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10331  inoutStats.unusedSize += unusedRangeSize;
10332  ++inoutStats.unusedRangeCount;
10333  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10334  }
10335 
10336  // 2. Process this allocation.
10337  // There is allocation with suballoc.offset, suballoc.size.
10338  ++inoutStats.allocationCount;
10339 
10340  // 3. Prepare for next iteration.
10341  lastOffset = suballoc.offset + suballoc.size;
10342  ++nextAlloc1stIndex;
10343  }
10344  // We are at the end.
10345  else
10346  {
10347  if(lastOffset < freeSpace1stTo2ndEnd)
10348  {
10349  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10350  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10351  inoutStats.unusedSize += unusedRangeSize;
10352  ++inoutStats.unusedRangeCount;
10353  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10354  }
10355 
10356  // End of loop.
10357  lastOffset = freeSpace1stTo2ndEnd;
10358  }
10359  }
10360 
10361  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10362  {
10363  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10364  while(lastOffset < size)
10365  {
10366  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10367  while(nextAlloc2ndIndex != SIZE_MAX &&
10368  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10369  {
10370  --nextAlloc2ndIndex;
10371  }
10372 
10373  // Found non-null allocation.
10374  if(nextAlloc2ndIndex != SIZE_MAX)
10375  {
10376  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10377 
10378  // 1. Process free space before this allocation.
10379  if(lastOffset < suballoc.offset)
10380  {
10381  // There is free space from lastOffset to suballoc.offset.
10382  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10383  inoutStats.unusedSize += unusedRangeSize;
10384  ++inoutStats.unusedRangeCount;
10385  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10386  }
10387 
10388  // 2. Process this allocation.
10389  // There is allocation with suballoc.offset, suballoc.size.
10390  ++inoutStats.allocationCount;
10391 
10392  // 3. Prepare for next iteration.
10393  lastOffset = suballoc.offset + suballoc.size;
10394  --nextAlloc2ndIndex;
10395  }
10396  // We are at the end.
10397  else
10398  {
10399  if(lastOffset < size)
10400  {
10401  // There is free space from lastOffset to size.
10402  const VkDeviceSize unusedRangeSize = size - lastOffset;
10403  inoutStats.unusedSize += unusedRangeSize;
10404  ++inoutStats.unusedRangeCount;
10405  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10406  }
10407 
10408  // End of loop.
10409  lastOffset = size;
10410  }
10411  }
10412  }
10413 }
10414 
10415 #if VMA_STATS_STRING_ENABLED
10416 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10417 {
10418  const VkDeviceSize size = GetSize();
10419  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10420  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10421  const size_t suballoc1stCount = suballocations1st.size();
10422  const size_t suballoc2ndCount = suballocations2nd.size();
10423 
10424  // FIRST PASS
10425 
10426  size_t unusedRangeCount = 0;
10427  VkDeviceSize usedBytes = 0;
10428 
10429  VkDeviceSize lastOffset = 0;
10430 
10431  size_t alloc2ndCount = 0;
10432  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10433  {
10434  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10435  size_t nextAlloc2ndIndex = 0;
10436  while(lastOffset < freeSpace2ndTo1stEnd)
10437  {
10438  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10439  while(nextAlloc2ndIndex < suballoc2ndCount &&
10440  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10441  {
10442  ++nextAlloc2ndIndex;
10443  }
10444 
10445  // Found non-null allocation.
10446  if(nextAlloc2ndIndex < suballoc2ndCount)
10447  {
10448  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10449 
10450  // 1. Process free space before this allocation.
10451  if(lastOffset < suballoc.offset)
10452  {
10453  // There is free space from lastOffset to suballoc.offset.
10454  ++unusedRangeCount;
10455  }
10456 
10457  // 2. Process this allocation.
10458  // There is allocation with suballoc.offset, suballoc.size.
10459  ++alloc2ndCount;
10460  usedBytes += suballoc.size;
10461 
10462  // 3. Prepare for next iteration.
10463  lastOffset = suballoc.offset + suballoc.size;
10464  ++nextAlloc2ndIndex;
10465  }
10466  // We are at the end.
10467  else
10468  {
10469  if(lastOffset < freeSpace2ndTo1stEnd)
10470  {
10471  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10472  ++unusedRangeCount;
10473  }
10474 
10475  // End of loop.
10476  lastOffset = freeSpace2ndTo1stEnd;
10477  }
10478  }
10479  }
10480 
10481  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10482  size_t alloc1stCount = 0;
10483  const VkDeviceSize freeSpace1stTo2ndEnd =
10484  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10485  while(lastOffset < freeSpace1stTo2ndEnd)
10486  {
10487  // Find next non-null allocation or move nextAllocIndex to the end.
10488  while(nextAlloc1stIndex < suballoc1stCount &&
10489  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10490  {
10491  ++nextAlloc1stIndex;
10492  }
10493 
10494  // Found non-null allocation.
10495  if(nextAlloc1stIndex < suballoc1stCount)
10496  {
10497  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10498 
10499  // 1. Process free space before this allocation.
10500  if(lastOffset < suballoc.offset)
10501  {
10502  // There is free space from lastOffset to suballoc.offset.
10503  ++unusedRangeCount;
10504  }
10505 
10506  // 2. Process this allocation.
10507  // There is allocation with suballoc.offset, suballoc.size.
10508  ++alloc1stCount;
10509  usedBytes += suballoc.size;
10510 
10511  // 3. Prepare for next iteration.
10512  lastOffset = suballoc.offset + suballoc.size;
10513  ++nextAlloc1stIndex;
10514  }
10515  // We are at the end.
10516  else
10517  {
10518  if(lastOffset < size)
10519  {
10520  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10521  ++unusedRangeCount;
10522  }
10523 
10524  // End of loop.
10525  lastOffset = freeSpace1stTo2ndEnd;
10526  }
10527  }
10528 
10529  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10530  {
10531  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10532  while(lastOffset < size)
10533  {
10534  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10535  while(nextAlloc2ndIndex != SIZE_MAX &&
10536  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10537  {
10538  --nextAlloc2ndIndex;
10539  }
10540 
10541  // Found non-null allocation.
10542  if(nextAlloc2ndIndex != SIZE_MAX)
10543  {
10544  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10545 
10546  // 1. Process free space before this allocation.
10547  if(lastOffset < suballoc.offset)
10548  {
10549  // There is free space from lastOffset to suballoc.offset.
10550  ++unusedRangeCount;
10551  }
10552 
10553  // 2. Process this allocation.
10554  // There is allocation with suballoc.offset, suballoc.size.
10555  ++alloc2ndCount;
10556  usedBytes += suballoc.size;
10557 
10558  // 3. Prepare for next iteration.
10559  lastOffset = suballoc.offset + suballoc.size;
10560  --nextAlloc2ndIndex;
10561  }
10562  // We are at the end.
10563  else
10564  {
10565  if(lastOffset < size)
10566  {
10567  // There is free space from lastOffset to size.
10568  ++unusedRangeCount;
10569  }
10570 
10571  // End of loop.
10572  lastOffset = size;
10573  }
10574  }
10575  }
10576 
10577  const VkDeviceSize unusedBytes = size - usedBytes;
10578  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10579 
10580  // SECOND PASS
10581  lastOffset = 0;
10582 
10583  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10584  {
10585  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10586  size_t nextAlloc2ndIndex = 0;
10587  while(lastOffset < freeSpace2ndTo1stEnd)
10588  {
10589  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10590  while(nextAlloc2ndIndex < suballoc2ndCount &&
10591  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10592  {
10593  ++nextAlloc2ndIndex;
10594  }
10595 
10596  // Found non-null allocation.
10597  if(nextAlloc2ndIndex < suballoc2ndCount)
10598  {
10599  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10600 
10601  // 1. Process free space before this allocation.
10602  if(lastOffset < suballoc.offset)
10603  {
10604  // There is free space from lastOffset to suballoc.offset.
10605  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10606  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10607  }
10608 
10609  // 2. Process this allocation.
10610  // There is allocation with suballoc.offset, suballoc.size.
10611  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10612 
10613  // 3. Prepare for next iteration.
10614  lastOffset = suballoc.offset + suballoc.size;
10615  ++nextAlloc2ndIndex;
10616  }
10617  // We are at the end.
10618  else
10619  {
10620  if(lastOffset < freeSpace2ndTo1stEnd)
10621  {
10622  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10623  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10624  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10625  }
10626 
10627  // End of loop.
10628  lastOffset = freeSpace2ndTo1stEnd;
10629  }
10630  }
10631  }
10632 
10633  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10634  while(lastOffset < freeSpace1stTo2ndEnd)
10635  {
10636  // Find next non-null allocation or move nextAllocIndex to the end.
10637  while(nextAlloc1stIndex < suballoc1stCount &&
10638  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10639  {
10640  ++nextAlloc1stIndex;
10641  }
10642 
10643  // Found non-null allocation.
10644  if(nextAlloc1stIndex < suballoc1stCount)
10645  {
10646  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10647 
10648  // 1. Process free space before this allocation.
10649  if(lastOffset < suballoc.offset)
10650  {
10651  // There is free space from lastOffset to suballoc.offset.
10652  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10653  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10654  }
10655 
10656  // 2. Process this allocation.
10657  // There is allocation with suballoc.offset, suballoc.size.
10658  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10659 
10660  // 3. Prepare for next iteration.
10661  lastOffset = suballoc.offset + suballoc.size;
10662  ++nextAlloc1stIndex;
10663  }
10664  // We are at the end.
10665  else
10666  {
10667  if(lastOffset < freeSpace1stTo2ndEnd)
10668  {
10669  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10670  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10671  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10672  }
10673 
10674  // End of loop.
10675  lastOffset = freeSpace1stTo2ndEnd;
10676  }
10677  }
10678 
10679  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10680  {
10681  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10682  while(lastOffset < size)
10683  {
10684  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10685  while(nextAlloc2ndIndex != SIZE_MAX &&
10686  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10687  {
10688  --nextAlloc2ndIndex;
10689  }
10690 
10691  // Found non-null allocation.
10692  if(nextAlloc2ndIndex != SIZE_MAX)
10693  {
10694  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10695 
10696  // 1. Process free space before this allocation.
10697  if(lastOffset < suballoc.offset)
10698  {
10699  // There is free space from lastOffset to suballoc.offset.
10700  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10701  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10702  }
10703 
10704  // 2. Process this allocation.
10705  // There is allocation with suballoc.offset, suballoc.size.
10706  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10707 
10708  // 3. Prepare for next iteration.
10709  lastOffset = suballoc.offset + suballoc.size;
10710  --nextAlloc2ndIndex;
10711  }
10712  // We are at the end.
10713  else
10714  {
10715  if(lastOffset < size)
10716  {
10717  // There is free space from lastOffset to size.
10718  const VkDeviceSize unusedRangeSize = size - lastOffset;
10719  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10720  }
10721 
10722  // End of loop.
10723  lastOffset = size;
10724  }
10725  }
10726  }
10727 
10728  PrintDetailedMap_End(json);
10729 }
10730 #endif // #if VMA_STATS_STRING_ENABLED
10731 
10732 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10733  uint32_t currentFrameIndex,
10734  uint32_t frameInUseCount,
10735  VkDeviceSize bufferImageGranularity,
10736  VkDeviceSize allocSize,
10737  VkDeviceSize allocAlignment,
10738  bool upperAddress,
10739  VmaSuballocationType allocType,
10740  bool canMakeOtherLost,
10741  uint32_t strategy,
10742  VmaAllocationRequest* pAllocationRequest)
10743 {
10744  VMA_ASSERT(allocSize > 0);
10745  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10746  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10747  VMA_HEAVY_ASSERT(Validate());
10748  return upperAddress ?
10749  CreateAllocationRequest_UpperAddress(
10750  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10751  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10752  CreateAllocationRequest_LowerAddress(
10753  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10754  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10755 }
10756 
10757 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10758  uint32_t currentFrameIndex,
10759  uint32_t frameInUseCount,
10760  VkDeviceSize bufferImageGranularity,
10761  VkDeviceSize allocSize,
10762  VkDeviceSize allocAlignment,
10763  VmaSuballocationType allocType,
10764  bool canMakeOtherLost,
10765  uint32_t strategy,
10766  VmaAllocationRequest* pAllocationRequest)
10767 {
10768  const VkDeviceSize size = GetSize();
10769  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10770  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10771 
10772  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10773  {
10774  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10775  return false;
10776  }
10777 
10778  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10779  if(allocSize > size)
10780  {
10781  return false;
10782  }
10783  VkDeviceSize resultBaseOffset = size - allocSize;
10784  if(!suballocations2nd.empty())
10785  {
10786  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10787  resultBaseOffset = lastSuballoc.offset - allocSize;
10788  if(allocSize > lastSuballoc.offset)
10789  {
10790  return false;
10791  }
10792  }
10793 
10794  // Start from offset equal to end of free space.
10795  VkDeviceSize resultOffset = resultBaseOffset;
10796 
10797  // Apply VMA_DEBUG_MARGIN at the end.
10798  if(VMA_DEBUG_MARGIN > 0)
10799  {
10800  if(resultOffset < VMA_DEBUG_MARGIN)
10801  {
10802  return false;
10803  }
10804  resultOffset -= VMA_DEBUG_MARGIN;
10805  }
10806 
10807  // Apply alignment.
10808  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10809 
10810  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10811  // Make bigger alignment if necessary.
10812  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10813  {
10814  bool bufferImageGranularityConflict = false;
10815  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10816  {
10817  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10818  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10819  {
10820  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10821  {
10822  bufferImageGranularityConflict = true;
10823  break;
10824  }
10825  }
10826  else
10827  // Already on previous page.
10828  break;
10829  }
10830  if(bufferImageGranularityConflict)
10831  {
10832  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10833  }
10834  }
10835 
10836  // There is enough free space.
10837  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10838  suballocations1st.back().offset + suballocations1st.back().size :
10839  0;
10840  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10841  {
10842  // Check previous suballocations for BufferImageGranularity conflicts.
10843  // If conflict exists, allocation cannot be made here.
10844  if(bufferImageGranularity > 1)
10845  {
10846  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10847  {
10848  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10849  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10850  {
10851  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10852  {
10853  return false;
10854  }
10855  }
10856  else
10857  {
10858  // Already on next page.
10859  break;
10860  }
10861  }
10862  }
10863 
10864  // All tests passed: Success.
10865  pAllocationRequest->offset = resultOffset;
10866  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10867  pAllocationRequest->sumItemSize = 0;
10868  // pAllocationRequest->item unused.
10869  pAllocationRequest->itemsToMakeLostCount = 0;
10870  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10871  return true;
10872  }
10873 
10874  return false;
10875 }
10876 
10877 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10878  uint32_t currentFrameIndex,
10879  uint32_t frameInUseCount,
10880  VkDeviceSize bufferImageGranularity,
10881  VkDeviceSize allocSize,
10882  VkDeviceSize allocAlignment,
10883  VmaSuballocationType allocType,
10884  bool canMakeOtherLost,
10885  uint32_t strategy,
10886  VmaAllocationRequest* pAllocationRequest)
10887 {
10888  const VkDeviceSize size = GetSize();
10889  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10890  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10891 
10892  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10893  {
10894  // Try to allocate at the end of 1st vector.
10895 
10896  VkDeviceSize resultBaseOffset = 0;
10897  if(!suballocations1st.empty())
10898  {
10899  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10900  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10901  }
10902 
10903  // Start from offset equal to beginning of free space.
10904  VkDeviceSize resultOffset = resultBaseOffset;
10905 
10906  // Apply VMA_DEBUG_MARGIN at the beginning.
10907  if(VMA_DEBUG_MARGIN > 0)
10908  {
10909  resultOffset += VMA_DEBUG_MARGIN;
10910  }
10911 
10912  // Apply alignment.
10913  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10914 
10915  // Check previous suballocations for BufferImageGranularity conflicts.
10916  // Make bigger alignment if necessary.
10917  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10918  {
10919  bool bufferImageGranularityConflict = false;
10920  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10921  {
10922  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10923  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10924  {
10925  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10926  {
10927  bufferImageGranularityConflict = true;
10928  break;
10929  }
10930  }
10931  else
10932  // Already on previous page.
10933  break;
10934  }
10935  if(bufferImageGranularityConflict)
10936  {
10937  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10938  }
10939  }
10940 
10941  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10942  suballocations2nd.back().offset : size;
10943 
10944  // There is enough free space at the end after alignment.
10945  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10946  {
10947  // Check next suballocations for BufferImageGranularity conflicts.
10948  // If conflict exists, allocation cannot be made here.
10949  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10950  {
10951  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10952  {
10953  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10954  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10955  {
10956  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10957  {
10958  return false;
10959  }
10960  }
10961  else
10962  {
10963  // Already on previous page.
10964  break;
10965  }
10966  }
10967  }
10968 
10969  // All tests passed: Success.
10970  pAllocationRequest->offset = resultOffset;
10971  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10972  pAllocationRequest->sumItemSize = 0;
10973  // pAllocationRequest->item, customData unused.
10974  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10975  pAllocationRequest->itemsToMakeLostCount = 0;
10976  return true;
10977  }
10978  }
10979 
10980  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10981  // beginning of 1st vector as the end of free space.
10982  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10983  {
10984  VMA_ASSERT(!suballocations1st.empty());
10985 
10986  VkDeviceSize resultBaseOffset = 0;
10987  if(!suballocations2nd.empty())
10988  {
10989  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10990  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10991  }
10992 
10993  // Start from offset equal to beginning of free space.
10994  VkDeviceSize resultOffset = resultBaseOffset;
10995 
10996  // Apply VMA_DEBUG_MARGIN at the beginning.
10997  if(VMA_DEBUG_MARGIN > 0)
10998  {
10999  resultOffset += VMA_DEBUG_MARGIN;
11000  }
11001 
11002  // Apply alignment.
11003  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11004 
11005  // Check previous suballocations for BufferImageGranularity conflicts.
11006  // Make bigger alignment if necessary.
11007  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
11008  {
11009  bool bufferImageGranularityConflict = false;
11010  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11011  {
11012  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11013  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11014  {
11015  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11016  {
11017  bufferImageGranularityConflict = true;
11018  break;
11019  }
11020  }
11021  else
11022  // Already on previous page.
11023  break;
11024  }
11025  if(bufferImageGranularityConflict)
11026  {
11027  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11028  }
11029  }
11030 
11031  pAllocationRequest->itemsToMakeLostCount = 0;
11032  pAllocationRequest->sumItemSize = 0;
11033  size_t index1st = m_1stNullItemsBeginCount;
11034 
11035  if(canMakeOtherLost)
11036  {
11037  while(index1st < suballocations1st.size() &&
11038  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11039  {
11040  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11041  const VmaSuballocation& suballoc = suballocations1st[index1st];
11042  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11043  {
11044  // No problem.
11045  }
11046  else
11047  {
11048  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11049  if(suballoc.hAllocation->CanBecomeLost() &&
11050  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11051  {
11052  ++pAllocationRequest->itemsToMakeLostCount;
11053  pAllocationRequest->sumItemSize += suballoc.size;
11054  }
11055  else
11056  {
11057  return false;
11058  }
11059  }
11060  ++index1st;
11061  }
11062 
11063  // Check next suballocations for BufferImageGranularity conflicts.
11064  // If conflict exists, we must mark more allocations lost or fail.
11065  if(bufferImageGranularity > 1)
11066  {
11067  while(index1st < suballocations1st.size())
11068  {
11069  const VmaSuballocation& suballoc = suballocations1st[index1st];
11070  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11071  {
11072  if(suballoc.hAllocation != VK_NULL_HANDLE)
11073  {
11074  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11075  if(suballoc.hAllocation->CanBecomeLost() &&
11076  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11077  {
11078  ++pAllocationRequest->itemsToMakeLostCount;
11079  pAllocationRequest->sumItemSize += suballoc.size;
11080  }
11081  else
11082  {
11083  return false;
11084  }
11085  }
11086  }
11087  else
11088  {
11089  // Already on next page.
11090  break;
11091  }
11092  ++index1st;
11093  }
11094  }
11095 
11096  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11097  if(index1st == suballocations1st.size() &&
11098  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11099  {
11100  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11101  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11102  }
11103  }
11104 
11105  // There is enough free space at the end after alignment.
11106  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11107  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11108  {
11109  // Check next suballocations for BufferImageGranularity conflicts.
11110  // If conflict exists, allocation cannot be made here.
11111  if(bufferImageGranularity > 1)
11112  {
11113  for(size_t nextSuballocIndex = index1st;
11114  nextSuballocIndex < suballocations1st.size();
11115  nextSuballocIndex++)
11116  {
11117  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11118  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11119  {
11120  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11121  {
11122  return false;
11123  }
11124  }
11125  else
11126  {
11127  // Already on next page.
11128  break;
11129  }
11130  }
11131  }
11132 
11133  // All tests passed: Success.
11134  pAllocationRequest->offset = resultOffset;
11135  pAllocationRequest->sumFreeSize =
11136  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11137  - resultBaseOffset
11138  - pAllocationRequest->sumItemSize;
11139  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11140  // pAllocationRequest->item, customData unused.
11141  return true;
11142  }
11143  }
11144 
11145  return false;
11146 }
11147 
11148 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11149  uint32_t currentFrameIndex,
11150  uint32_t frameInUseCount,
11151  VmaAllocationRequest* pAllocationRequest)
11152 {
11153  if(pAllocationRequest->itemsToMakeLostCount == 0)
11154  {
11155  return true;
11156  }
11157 
11158  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11159 
11160  // We always start from 1st.
11161  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11162  size_t index = m_1stNullItemsBeginCount;
11163  size_t madeLostCount = 0;
11164  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11165  {
11166  if(index == suballocations->size())
11167  {
11168  index = 0;
11169  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11170  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11171  {
11172  suballocations = &AccessSuballocations2nd();
11173  }
11174  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11175  // suballocations continues pointing at AccessSuballocations1st().
11176  VMA_ASSERT(!suballocations->empty());
11177  }
11178  VmaSuballocation& suballoc = (*suballocations)[index];
11179  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11180  {
11181  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11182  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11183  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11184  {
11185  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11186  suballoc.hAllocation = VK_NULL_HANDLE;
11187  m_SumFreeSize += suballoc.size;
11188  if(suballocations == &AccessSuballocations1st())
11189  {
11190  ++m_1stNullItemsMiddleCount;
11191  }
11192  else
11193  {
11194  ++m_2ndNullItemsCount;
11195  }
11196  ++madeLostCount;
11197  }
11198  else
11199  {
11200  return false;
11201  }
11202  }
11203  ++index;
11204  }
11205 
11206  CleanupAfterFree();
11207  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
11208 
11209  return true;
11210 }
11211 
11212 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11213 {
11214  uint32_t lostAllocationCount = 0;
11215 
11216  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11217  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11218  {
11219  VmaSuballocation& suballoc = suballocations1st[i];
11220  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11221  suballoc.hAllocation->CanBecomeLost() &&
11222  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11223  {
11224  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11225  suballoc.hAllocation = VK_NULL_HANDLE;
11226  ++m_1stNullItemsMiddleCount;
11227  m_SumFreeSize += suballoc.size;
11228  ++lostAllocationCount;
11229  }
11230  }
11231 
11232  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11233  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11234  {
11235  VmaSuballocation& suballoc = suballocations2nd[i];
11236  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11237  suballoc.hAllocation->CanBecomeLost() &&
11238  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11239  {
11240  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11241  suballoc.hAllocation = VK_NULL_HANDLE;
11242  ++m_2ndNullItemsCount;
11243  m_SumFreeSize += suballoc.size;
11244  ++lostAllocationCount;
11245  }
11246  }
11247 
11248  if(lostAllocationCount)
11249  {
11250  CleanupAfterFree();
11251  }
11252 
11253  return lostAllocationCount;
11254 }
11255 
11256 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11257 {
11258  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11259  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11260  {
11261  const VmaSuballocation& suballoc = suballocations1st[i];
11262  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11263  {
11264  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11265  {
11266  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11267  return VK_ERROR_VALIDATION_FAILED_EXT;
11268  }
11269  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11270  {
11271  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11272  return VK_ERROR_VALIDATION_FAILED_EXT;
11273  }
11274  }
11275  }
11276 
11277  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11278  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11279  {
11280  const VmaSuballocation& suballoc = suballocations2nd[i];
11281  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11282  {
11283  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11284  {
11285  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11286  return VK_ERROR_VALIDATION_FAILED_EXT;
11287  }
11288  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11289  {
11290  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11291  return VK_ERROR_VALIDATION_FAILED_EXT;
11292  }
11293  }
11294  }
11295 
11296  return VK_SUCCESS;
11297 }
11298 
11299 void VmaBlockMetadata_Linear::Alloc(
11300  const VmaAllocationRequest& request,
11301  VmaSuballocationType type,
11302  VkDeviceSize allocSize,
11303  VmaAllocation hAllocation)
11304 {
11305  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11306 
11307  switch(request.type)
11308  {
11309  case VmaAllocationRequestType::UpperAddress:
11310  {
11311  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11312  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11313  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11314  suballocations2nd.push_back(newSuballoc);
11315  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11316  }
11317  break;
11318  case VmaAllocationRequestType::EndOf1st:
11319  {
11320  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11321 
11322  VMA_ASSERT(suballocations1st.empty() ||
11323  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11324  // Check if it fits before the end of the block.
11325  VMA_ASSERT(request.offset + allocSize <= GetSize());
11326 
11327  suballocations1st.push_back(newSuballoc);
11328  }
11329  break;
11330  case VmaAllocationRequestType::EndOf2nd:
11331  {
11332  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11333  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11334  VMA_ASSERT(!suballocations1st.empty() &&
11335  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11336  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11337 
11338  switch(m_2ndVectorMode)
11339  {
11340  case SECOND_VECTOR_EMPTY:
11341  // First allocation from second part ring buffer.
11342  VMA_ASSERT(suballocations2nd.empty());
11343  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11344  break;
11345  case SECOND_VECTOR_RING_BUFFER:
11346  // 2-part ring buffer is already started.
11347  VMA_ASSERT(!suballocations2nd.empty());
11348  break;
11349  case SECOND_VECTOR_DOUBLE_STACK:
11350  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11351  break;
11352  default:
11353  VMA_ASSERT(0);
11354  }
11355 
11356  suballocations2nd.push_back(newSuballoc);
11357  }
11358  break;
11359  default:
11360  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11361  }
11362 
11363  m_SumFreeSize -= newSuballoc.size;
11364 }
11365 
11366 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11367 {
11368  FreeAtOffset(allocation->GetOffset());
11369 }
11370 
11371 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11372 {
11373  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11374  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11375 
11376  if(!suballocations1st.empty())
11377  {
11378  // First allocation: Mark it as next empty at the beginning.
11379  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11380  if(firstSuballoc.offset == offset)
11381  {
11382  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11383  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11384  m_SumFreeSize += firstSuballoc.size;
11385  ++m_1stNullItemsBeginCount;
11386  CleanupAfterFree();
11387  return;
11388  }
11389  }
11390 
11391  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11392  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11393  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11394  {
11395  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11396  if(lastSuballoc.offset == offset)
11397  {
11398  m_SumFreeSize += lastSuballoc.size;
11399  suballocations2nd.pop_back();
11400  CleanupAfterFree();
11401  return;
11402  }
11403  }
11404  // Last allocation in 1st vector.
11405  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11406  {
11407  VmaSuballocation& lastSuballoc = suballocations1st.back();
11408  if(lastSuballoc.offset == offset)
11409  {
11410  m_SumFreeSize += lastSuballoc.size;
11411  suballocations1st.pop_back();
11412  CleanupAfterFree();
11413  return;
11414  }
11415  }
11416 
11417  // Item from the middle of 1st vector.
11418  {
11419  VmaSuballocation refSuballoc;
11420  refSuballoc.offset = offset;
11421  // Rest of members stays uninitialized intentionally for better performance.
11422  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11423  suballocations1st.begin() + m_1stNullItemsBeginCount,
11424  suballocations1st.end(),
11425  refSuballoc,
11426  VmaSuballocationOffsetLess());
11427  if(it != suballocations1st.end())
11428  {
11429  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11430  it->hAllocation = VK_NULL_HANDLE;
11431  ++m_1stNullItemsMiddleCount;
11432  m_SumFreeSize += it->size;
11433  CleanupAfterFree();
11434  return;
11435  }
11436  }
11437 
11438  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11439  {
11440  // Item from the middle of 2nd vector.
11441  VmaSuballocation refSuballoc;
11442  refSuballoc.offset = offset;
11443  // Rest of members stays uninitialized intentionally for better performance.
11444  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11445  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11446  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11447  if(it != suballocations2nd.end())
11448  {
11449  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11450  it->hAllocation = VK_NULL_HANDLE;
11451  ++m_2ndNullItemsCount;
11452  m_SumFreeSize += it->size;
11453  CleanupAfterFree();
11454  return;
11455  }
11456  }
11457 
11458  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11459 }
11460 
11461 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11462 {
11463  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11464  const size_t suballocCount = AccessSuballocations1st().size();
11465  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11466 }
11467 
11468 void VmaBlockMetadata_Linear::CleanupAfterFree()
11469 {
11470  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11471  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11472 
11473  if(IsEmpty())
11474  {
11475  suballocations1st.clear();
11476  suballocations2nd.clear();
11477  m_1stNullItemsBeginCount = 0;
11478  m_1stNullItemsMiddleCount = 0;
11479  m_2ndNullItemsCount = 0;
11480  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11481  }
11482  else
11483  {
11484  const size_t suballoc1stCount = suballocations1st.size();
11485  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11486  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11487 
11488  // Find more null items at the beginning of 1st vector.
11489  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11490  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11491  {
11492  ++m_1stNullItemsBeginCount;
11493  --m_1stNullItemsMiddleCount;
11494  }
11495 
11496  // Find more null items at the end of 1st vector.
11497  while(m_1stNullItemsMiddleCount > 0 &&
11498  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11499  {
11500  --m_1stNullItemsMiddleCount;
11501  suballocations1st.pop_back();
11502  }
11503 
11504  // Find more null items at the end of 2nd vector.
11505  while(m_2ndNullItemsCount > 0 &&
11506  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11507  {
11508  --m_2ndNullItemsCount;
11509  suballocations2nd.pop_back();
11510  }
11511 
11512  // Find more null items at the beginning of 2nd vector.
11513  while(m_2ndNullItemsCount > 0 &&
11514  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11515  {
11516  --m_2ndNullItemsCount;
11517  VmaVectorRemove(suballocations2nd, 0);
11518  }
11519 
11520  if(ShouldCompact1st())
11521  {
11522  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11523  size_t srcIndex = m_1stNullItemsBeginCount;
11524  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11525  {
11526  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11527  {
11528  ++srcIndex;
11529  }
11530  if(dstIndex != srcIndex)
11531  {
11532  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11533  }
11534  ++srcIndex;
11535  }
11536  suballocations1st.resize(nonNullItemCount);
11537  m_1stNullItemsBeginCount = 0;
11538  m_1stNullItemsMiddleCount = 0;
11539  }
11540 
11541  // 2nd vector became empty.
11542  if(suballocations2nd.empty())
11543  {
11544  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11545  }
11546 
11547  // 1st vector became empty.
11548  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11549  {
11550  suballocations1st.clear();
11551  m_1stNullItemsBeginCount = 0;
11552 
11553  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11554  {
11555  // Swap 1st with 2nd. Now 2nd is empty.
11556  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11557  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11558  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11559  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11560  {
11561  ++m_1stNullItemsBeginCount;
11562  --m_1stNullItemsMiddleCount;
11563  }
11564  m_2ndNullItemsCount = 0;
11565  m_1stVectorIndex ^= 1;
11566  }
11567  }
11568  }
11569 
11570  VMA_HEAVY_ASSERT(Validate());
11571 }
11572 
11573 
11575 // class VmaBlockMetadata_Buddy
11576 
11577 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11578  VmaBlockMetadata(hAllocator),
11579  m_Root(VMA_NULL),
11580  m_AllocationCount(0),
11581  m_FreeCount(1),
11582  m_SumFreeSize(0)
11583 {
11584  memset(m_FreeList, 0, sizeof(m_FreeList));
11585 }
11586 
11587 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11588 {
11589  DeleteNode(m_Root);
11590 }
11591 
11592 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11593 {
11594  VmaBlockMetadata::Init(size);
11595 
11596  m_UsableSize = VmaPrevPow2(size);
11597  m_SumFreeSize = m_UsableSize;
11598 
11599  // Calculate m_LevelCount.
11600  m_LevelCount = 1;
11601  while(m_LevelCount < MAX_LEVELS &&
11602  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11603  {
11604  ++m_LevelCount;
11605  }
11606 
11607  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11608  rootNode->offset = 0;
11609  rootNode->type = Node::TYPE_FREE;
11610  rootNode->parent = VMA_NULL;
11611  rootNode->buddy = VMA_NULL;
11612 
11613  m_Root = rootNode;
11614  AddToFreeListFront(0, rootNode);
11615 }
11616 
11617 bool VmaBlockMetadata_Buddy::Validate() const
11618 {
11619  // Validate tree.
11620  ValidationContext ctx;
11621  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11622  {
11623  VMA_VALIDATE(false && "ValidateNode failed.");
11624  }
11625  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11626  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11627 
11628  // Validate free node lists.
11629  for(uint32_t level = 0; level < m_LevelCount; ++level)
11630  {
11631  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11632  m_FreeList[level].front->free.prev == VMA_NULL);
11633 
11634  for(Node* node = m_FreeList[level].front;
11635  node != VMA_NULL;
11636  node = node->free.next)
11637  {
11638  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11639 
11640  if(node->free.next == VMA_NULL)
11641  {
11642  VMA_VALIDATE(m_FreeList[level].back == node);
11643  }
11644  else
11645  {
11646  VMA_VALIDATE(node->free.next->free.prev == node);
11647  }
11648  }
11649  }
11650 
11651  // Validate that free lists ar higher levels are empty.
11652  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11653  {
11654  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11655  }
11656 
11657  return true;
11658 }
11659 
11660 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11661 {
11662  for(uint32_t level = 0; level < m_LevelCount; ++level)
11663  {
11664  if(m_FreeList[level].front != VMA_NULL)
11665  {
11666  return LevelToNodeSize(level);
11667  }
11668  }
11669  return 0;
11670 }
11671 
11672 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11673 {
11674  const VkDeviceSize unusableSize = GetUnusableSize();
11675 
11676  outInfo.blockCount = 1;
11677 
11678  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11679  outInfo.usedBytes = outInfo.unusedBytes = 0;
11680 
11681  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11682  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11683  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11684 
11685  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11686 
11687  if(unusableSize > 0)
11688  {
11689  ++outInfo.unusedRangeCount;
11690  outInfo.unusedBytes += unusableSize;
11691  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11692  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11693  }
11694 }
11695 
11696 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11697 {
11698  const VkDeviceSize unusableSize = GetUnusableSize();
11699 
11700  inoutStats.size += GetSize();
11701  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11702  inoutStats.allocationCount += m_AllocationCount;
11703  inoutStats.unusedRangeCount += m_FreeCount;
11704  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11705 
11706  if(unusableSize > 0)
11707  {
11708  ++inoutStats.unusedRangeCount;
11709  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11710  }
11711 }
11712 
11713 #if VMA_STATS_STRING_ENABLED
11714 
11715 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11716 {
11717  // TODO optimize
11718  VmaStatInfo stat;
11719  CalcAllocationStatInfo(stat);
11720 
11721  PrintDetailedMap_Begin(
11722  json,
11723  stat.unusedBytes,
11724  stat.allocationCount,
11725  stat.unusedRangeCount);
11726 
11727  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11728 
11729  const VkDeviceSize unusableSize = GetUnusableSize();
11730  if(unusableSize > 0)
11731  {
11732  PrintDetailedMap_UnusedRange(json,
11733  m_UsableSize, // offset
11734  unusableSize); // size
11735  }
11736 
11737  PrintDetailedMap_End(json);
11738 }
11739 
11740 #endif // #if VMA_STATS_STRING_ENABLED
11741 
11742 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11743  uint32_t currentFrameIndex,
11744  uint32_t frameInUseCount,
11745  VkDeviceSize bufferImageGranularity,
11746  VkDeviceSize allocSize,
11747  VkDeviceSize allocAlignment,
11748  bool upperAddress,
11749  VmaSuballocationType allocType,
11750  bool canMakeOtherLost,
11751  uint32_t strategy,
11752  VmaAllocationRequest* pAllocationRequest)
11753 {
11754  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11755 
11756  // Simple way to respect bufferImageGranularity. May be optimized some day.
11757  // Whenever it might be an OPTIMAL image...
11758  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11759  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11760  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11761  {
11762  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11763  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11764  }
11765 
11766  if(allocSize > m_UsableSize)
11767  {
11768  return false;
11769  }
11770 
11771  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11772  for(uint32_t level = targetLevel + 1; level--; )
11773  {
11774  for(Node* freeNode = m_FreeList[level].front;
11775  freeNode != VMA_NULL;
11776  freeNode = freeNode->free.next)
11777  {
11778  if(freeNode->offset % allocAlignment == 0)
11779  {
11780  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11781  pAllocationRequest->offset = freeNode->offset;
11782  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11783  pAllocationRequest->sumItemSize = 0;
11784  pAllocationRequest->itemsToMakeLostCount = 0;
11785  pAllocationRequest->customData = (void*)(uintptr_t)level;
11786  return true;
11787  }
11788  }
11789  }
11790 
11791  return false;
11792 }
11793 
11794 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11795  uint32_t currentFrameIndex,
11796  uint32_t frameInUseCount,
11797  VmaAllocationRequest* pAllocationRequest)
11798 {
11799  /*
11800  Lost allocations are not supported in buddy allocator at the moment.
11801  Support might be added in the future.
11802  */
11803  return pAllocationRequest->itemsToMakeLostCount == 0;
11804 }
11805 
11806 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11807 {
11808  /*
11809  Lost allocations are not supported in buddy allocator at the moment.
11810  Support might be added in the future.
11811  */
11812  return 0;
11813 }
11814 
11815 void VmaBlockMetadata_Buddy::Alloc(
11816  const VmaAllocationRequest& request,
11817  VmaSuballocationType type,
11818  VkDeviceSize allocSize,
11819  VmaAllocation hAllocation)
11820 {
11821  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11822 
11823  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11824  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11825 
11826  Node* currNode = m_FreeList[currLevel].front;
11827  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11828  while(currNode->offset != request.offset)
11829  {
11830  currNode = currNode->free.next;
11831  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11832  }
11833 
11834  // Go down, splitting free nodes.
11835  while(currLevel < targetLevel)
11836  {
11837  // currNode is already first free node at currLevel.
11838  // Remove it from list of free nodes at this currLevel.
11839  RemoveFromFreeList(currLevel, currNode);
11840 
11841  const uint32_t childrenLevel = currLevel + 1;
11842 
11843  // Create two free sub-nodes.
11844  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11845  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11846 
11847  leftChild->offset = currNode->offset;
11848  leftChild->type = Node::TYPE_FREE;
11849  leftChild->parent = currNode;
11850  leftChild->buddy = rightChild;
11851 
11852  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11853  rightChild->type = Node::TYPE_FREE;
11854  rightChild->parent = currNode;
11855  rightChild->buddy = leftChild;
11856 
11857  // Convert current currNode to split type.
11858  currNode->type = Node::TYPE_SPLIT;
11859  currNode->split.leftChild = leftChild;
11860 
11861  // Add child nodes to free list. Order is important!
11862  AddToFreeListFront(childrenLevel, rightChild);
11863  AddToFreeListFront(childrenLevel, leftChild);
11864 
11865  ++m_FreeCount;
11866  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11867  ++currLevel;
11868  currNode = m_FreeList[currLevel].front;
11869 
11870  /*
11871  We can be sure that currNode, as left child of node previously split,
11872  also fullfills the alignment requirement.
11873  */
11874  }
11875 
11876  // Remove from free list.
11877  VMA_ASSERT(currLevel == targetLevel &&
11878  currNode != VMA_NULL &&
11879  currNode->type == Node::TYPE_FREE);
11880  RemoveFromFreeList(currLevel, currNode);
11881 
11882  // Convert to allocation node.
11883  currNode->type = Node::TYPE_ALLOCATION;
11884  currNode->allocation.alloc = hAllocation;
11885 
11886  ++m_AllocationCount;
11887  --m_FreeCount;
11888  m_SumFreeSize -= allocSize;
11889 }
11890 
11891 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11892 {
11893  if(node->type == Node::TYPE_SPLIT)
11894  {
11895  DeleteNode(node->split.leftChild->buddy);
11896  DeleteNode(node->split.leftChild);
11897  }
11898 
11899  vma_delete(GetAllocationCallbacks(), node);
11900 }
11901 
11902 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11903 {
11904  VMA_VALIDATE(level < m_LevelCount);
11905  VMA_VALIDATE(curr->parent == parent);
11906  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11907  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11908  switch(curr->type)
11909  {
11910  case Node::TYPE_FREE:
11911  // curr->free.prev, next are validated separately.
11912  ctx.calculatedSumFreeSize += levelNodeSize;
11913  ++ctx.calculatedFreeCount;
11914  break;
11915  case Node::TYPE_ALLOCATION:
11916  ++ctx.calculatedAllocationCount;
11917  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11918  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11919  break;
11920  case Node::TYPE_SPLIT:
11921  {
11922  const uint32_t childrenLevel = level + 1;
11923  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11924  const Node* const leftChild = curr->split.leftChild;
11925  VMA_VALIDATE(leftChild != VMA_NULL);
11926  VMA_VALIDATE(leftChild->offset == curr->offset);
11927  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11928  {
11929  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11930  }
11931  const Node* const rightChild = leftChild->buddy;
11932  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11933  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11934  {
11935  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11936  }
11937  }
11938  break;
11939  default:
11940  return false;
11941  }
11942 
11943  return true;
11944 }
11945 
11946 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11947 {
11948  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11949  uint32_t level = 0;
11950  VkDeviceSize currLevelNodeSize = m_UsableSize;
11951  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11952  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11953  {
11954  ++level;
11955  currLevelNodeSize = nextLevelNodeSize;
11956  nextLevelNodeSize = currLevelNodeSize >> 1;
11957  }
11958  return level;
11959 }
11960 
11961 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11962 {
11963  // Find node and level.
11964  Node* node = m_Root;
11965  VkDeviceSize nodeOffset = 0;
11966  uint32_t level = 0;
11967  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11968  while(node->type == Node::TYPE_SPLIT)
11969  {
11970  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11971  if(offset < nodeOffset + nextLevelSize)
11972  {
11973  node = node->split.leftChild;
11974  }
11975  else
11976  {
11977  node = node->split.leftChild->buddy;
11978  nodeOffset += nextLevelSize;
11979  }
11980  ++level;
11981  levelNodeSize = nextLevelSize;
11982  }
11983 
11984  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11985  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11986 
11987  ++m_FreeCount;
11988  --m_AllocationCount;
11989  m_SumFreeSize += alloc->GetSize();
11990 
11991  node->type = Node::TYPE_FREE;
11992 
11993  // Join free nodes if possible.
11994  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11995  {
11996  RemoveFromFreeList(level, node->buddy);
11997  Node* const parent = node->parent;
11998 
11999  vma_delete(GetAllocationCallbacks(), node->buddy);
12000  vma_delete(GetAllocationCallbacks(), node);
12001  parent->type = Node::TYPE_FREE;
12002 
12003  node = parent;
12004  --level;
12005  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12006  --m_FreeCount;
12007  }
12008 
12009  AddToFreeListFront(level, node);
12010 }
12011 
12012 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12013 {
12014  switch(node->type)
12015  {
12016  case Node::TYPE_FREE:
12017  ++outInfo.unusedRangeCount;
12018  outInfo.unusedBytes += levelNodeSize;
12019  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12020  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12021  break;
12022  case Node::TYPE_ALLOCATION:
12023  {
12024  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12025  ++outInfo.allocationCount;
12026  outInfo.usedBytes += allocSize;
12027  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12028  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12029 
12030  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12031  if(unusedRangeSize > 0)
12032  {
12033  ++outInfo.unusedRangeCount;
12034  outInfo.unusedBytes += unusedRangeSize;
12035  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12036  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12037  }
12038  }
12039  break;
12040  case Node::TYPE_SPLIT:
12041  {
12042  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12043  const Node* const leftChild = node->split.leftChild;
12044  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12045  const Node* const rightChild = leftChild->buddy;
12046  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12047  }
12048  break;
12049  default:
12050  VMA_ASSERT(0);
12051  }
12052 }
12053 
12054 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12055 {
12056  VMA_ASSERT(node->type == Node::TYPE_FREE);
12057 
12058  // List is empty.
12059  Node* const frontNode = m_FreeList[level].front;
12060  if(frontNode == VMA_NULL)
12061  {
12062  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12063  node->free.prev = node->free.next = VMA_NULL;
12064  m_FreeList[level].front = m_FreeList[level].back = node;
12065  }
12066  else
12067  {
12068  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12069  node->free.prev = VMA_NULL;
12070  node->free.next = frontNode;
12071  frontNode->free.prev = node;
12072  m_FreeList[level].front = node;
12073  }
12074 }
12075 
12076 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12077 {
12078  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12079 
12080  // It is at the front.
12081  if(node->free.prev == VMA_NULL)
12082  {
12083  VMA_ASSERT(m_FreeList[level].front == node);
12084  m_FreeList[level].front = node->free.next;
12085  }
12086  else
12087  {
12088  Node* const prevFreeNode = node->free.prev;
12089  VMA_ASSERT(prevFreeNode->free.next == node);
12090  prevFreeNode->free.next = node->free.next;
12091  }
12092 
12093  // It is at the back.
12094  if(node->free.next == VMA_NULL)
12095  {
12096  VMA_ASSERT(m_FreeList[level].back == node);
12097  m_FreeList[level].back = node->free.prev;
12098  }
12099  else
12100  {
12101  Node* const nextFreeNode = node->free.next;
12102  VMA_ASSERT(nextFreeNode->free.prev == node);
12103  nextFreeNode->free.prev = node->free.prev;
12104  }
12105 }
12106 
12107 #if VMA_STATS_STRING_ENABLED
12108 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12109 {
12110  switch(node->type)
12111  {
12112  case Node::TYPE_FREE:
12113  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12114  break;
12115  case Node::TYPE_ALLOCATION:
12116  {
12117  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12118  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12119  if(allocSize < levelNodeSize)
12120  {
12121  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12122  }
12123  }
12124  break;
12125  case Node::TYPE_SPLIT:
12126  {
12127  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12128  const Node* const leftChild = node->split.leftChild;
12129  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12130  const Node* const rightChild = leftChild->buddy;
12131  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12132  }
12133  break;
12134  default:
12135  VMA_ASSERT(0);
12136  }
12137 }
12138 #endif // #if VMA_STATS_STRING_ENABLED
12139 
12140 
12142 // class VmaDeviceMemoryBlock
12143 
12144 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12145  m_pMetadata(VMA_NULL),
12146  m_MemoryTypeIndex(UINT32_MAX),
12147  m_Id(0),
12148  m_hMemory(VK_NULL_HANDLE),
12149  m_MapCount(0),
12150  m_pMappedData(VMA_NULL)
12151 {
12152 }
12153 
12154 void VmaDeviceMemoryBlock::Init(
12155  VmaAllocator hAllocator,
12156  VmaPool hParentPool,
12157  uint32_t newMemoryTypeIndex,
12158  VkDeviceMemory newMemory,
12159  VkDeviceSize newSize,
12160  uint32_t id,
12161  uint32_t algorithm)
12162 {
12163  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12164 
12165  m_hParentPool = hParentPool;
12166  m_MemoryTypeIndex = newMemoryTypeIndex;
12167  m_Id = id;
12168  m_hMemory = newMemory;
12169 
12170  switch(algorithm)
12171  {
12173  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12174  break;
12176  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12177  break;
12178  default:
12179  VMA_ASSERT(0);
12180  // Fall-through.
12181  case 0:
12182  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12183  }
12184  m_pMetadata->Init(newSize);
12185 }
12186 
12187 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12188 {
12189  // This is the most important assert in the entire library.
12190  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12191  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12192 
12193  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12194  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12195  m_hMemory = VK_NULL_HANDLE;
12196 
12197  vma_delete(allocator, m_pMetadata);
12198  m_pMetadata = VMA_NULL;
12199 }
12200 
12201 bool VmaDeviceMemoryBlock::Validate() const
12202 {
12203  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12204  (m_pMetadata->GetSize() != 0));
12205 
12206  return m_pMetadata->Validate();
12207 }
12208 
12209 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12210 {
12211  void* pData = nullptr;
12212  VkResult res = Map(hAllocator, 1, &pData);
12213  if(res != VK_SUCCESS)
12214  {
12215  return res;
12216  }
12217 
12218  res = m_pMetadata->CheckCorruption(pData);
12219 
12220  Unmap(hAllocator, 1);
12221 
12222  return res;
12223 }
12224 
12225 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12226 {
12227  if(count == 0)
12228  {
12229  return VK_SUCCESS;
12230  }
12231 
12232  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12233  if(m_MapCount != 0)
12234  {
12235  m_MapCount += count;
12236  VMA_ASSERT(m_pMappedData != VMA_NULL);
12237  if(ppData != VMA_NULL)
12238  {
12239  *ppData = m_pMappedData;
12240  }
12241  return VK_SUCCESS;
12242  }
12243  else
12244  {
12245  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12246  hAllocator->m_hDevice,
12247  m_hMemory,
12248  0, // offset
12249  VK_WHOLE_SIZE,
12250  0, // flags
12251  &m_pMappedData);
12252  if(result == VK_SUCCESS)
12253  {
12254  if(ppData != VMA_NULL)
12255  {
12256  *ppData = m_pMappedData;
12257  }
12258  m_MapCount = count;
12259  }
12260  return result;
12261  }
12262 }
12263 
12264 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12265 {
12266  if(count == 0)
12267  {
12268  return;
12269  }
12270 
12271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12272  if(m_MapCount >= count)
12273  {
12274  m_MapCount -= count;
12275  if(m_MapCount == 0)
12276  {
12277  m_pMappedData = VMA_NULL;
12278  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12279  }
12280  }
12281  else
12282  {
12283  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12284  }
12285 }
12286 
12287 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12288 {
12289  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12290  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12291 
12292  void* pData;
12293  VkResult res = Map(hAllocator, 1, &pData);
12294  if(res != VK_SUCCESS)
12295  {
12296  return res;
12297  }
12298 
12299  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12300  VmaWriteMagicValue(pData, allocOffset + allocSize);
12301 
12302  Unmap(hAllocator, 1);
12303 
12304  return VK_SUCCESS;
12305 }
12306 
12307 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12308 {
12309  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12310  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12311 
12312  void* pData;
12313  VkResult res = Map(hAllocator, 1, &pData);
12314  if(res != VK_SUCCESS)
12315  {
12316  return res;
12317  }
12318 
12319  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12320  {
12321  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12322  }
12323  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12324  {
12325  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12326  }
12327 
12328  Unmap(hAllocator, 1);
12329 
12330  return VK_SUCCESS;
12331 }
12332 
12333 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12334  const VmaAllocator hAllocator,
12335  const VmaAllocation hAllocation,
12336  VkDeviceSize allocationLocalOffset,
12337  VkBuffer hBuffer,
12338  const void* pNext)
12339 {
12340  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12341  hAllocation->GetBlock() == this);
12342  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12343  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12344  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12345  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12346  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12347  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12348 }
12349 
12350 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12351  const VmaAllocator hAllocator,
12352  const VmaAllocation hAllocation,
12353  VkDeviceSize allocationLocalOffset,
12354  VkImage hImage,
12355  const void* pNext)
12356 {
12357  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12358  hAllocation->GetBlock() == this);
12359  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12360  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12361  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12362  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12363  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12364  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12365 }
12366 
12367 static void InitStatInfo(VmaStatInfo& outInfo)
12368 {
12369  memset(&outInfo, 0, sizeof(outInfo));
12370  outInfo.allocationSizeMin = UINT64_MAX;
12371  outInfo.unusedRangeSizeMin = UINT64_MAX;
12372 }
12373 
12374 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12375 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12376 {
12377  inoutInfo.blockCount += srcInfo.blockCount;
12378  inoutInfo.allocationCount += srcInfo.allocationCount;
12379  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12380  inoutInfo.usedBytes += srcInfo.usedBytes;
12381  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12382  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12383  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12384  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12385  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12386 }
12387 
12388 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12389 {
12390  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12391  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12392  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12393  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12394 }
12395 
12396 VmaPool_T::VmaPool_T(
12397  VmaAllocator hAllocator,
12398  const VmaPoolCreateInfo& createInfo,
12399  VkDeviceSize preferredBlockSize) :
12400  m_BlockVector(
12401  hAllocator,
12402  this, // hParentPool
12403  createInfo.memoryTypeIndex,
12404  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12405  createInfo.minBlockCount,
12406  createInfo.maxBlockCount,
12407  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12408  createInfo.frameInUseCount,
12409  createInfo.blockSize != 0, // explicitBlockSize
12410  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12411  m_Id(0),
12412  m_Name(VMA_NULL)
12413 {
12414 }
12415 
12416 VmaPool_T::~VmaPool_T()
12417 {
12418 }
12419 
12420 void VmaPool_T::SetName(const char* pName)
12421 {
12422  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12423  VmaFreeString(allocs, m_Name);
12424 
12425  if(pName != VMA_NULL)
12426  {
12427  m_Name = VmaCreateStringCopy(allocs, pName);
12428  }
12429  else
12430  {
12431  m_Name = VMA_NULL;
12432  }
12433 }
12434 
12435 #if VMA_STATS_STRING_ENABLED
12436 
12437 #endif // #if VMA_STATS_STRING_ENABLED
12438 
12439 VmaBlockVector::VmaBlockVector(
12440  VmaAllocator hAllocator,
12441  VmaPool hParentPool,
12442  uint32_t memoryTypeIndex,
12443  VkDeviceSize preferredBlockSize,
12444  size_t minBlockCount,
12445  size_t maxBlockCount,
12446  VkDeviceSize bufferImageGranularity,
12447  uint32_t frameInUseCount,
12448  bool explicitBlockSize,
12449  uint32_t algorithm) :
12450  m_hAllocator(hAllocator),
12451  m_hParentPool(hParentPool),
12452  m_MemoryTypeIndex(memoryTypeIndex),
12453  m_PreferredBlockSize(preferredBlockSize),
12454  m_MinBlockCount(minBlockCount),
12455  m_MaxBlockCount(maxBlockCount),
12456  m_BufferImageGranularity(bufferImageGranularity),
12457  m_FrameInUseCount(frameInUseCount),
12458  m_ExplicitBlockSize(explicitBlockSize),
12459  m_Algorithm(algorithm),
12460  m_HasEmptyBlock(false),
12461  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12462  m_NextBlockId(0)
12463 {
12464 }
12465 
12466 VmaBlockVector::~VmaBlockVector()
12467 {
12468  for(size_t i = m_Blocks.size(); i--; )
12469  {
12470  m_Blocks[i]->Destroy(m_hAllocator);
12471  vma_delete(m_hAllocator, m_Blocks[i]);
12472  }
12473 }
12474 
12475 VkResult VmaBlockVector::CreateMinBlocks()
12476 {
12477  for(size_t i = 0; i < m_MinBlockCount; ++i)
12478  {
12479  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12480  if(res != VK_SUCCESS)
12481  {
12482  return res;
12483  }
12484  }
12485  return VK_SUCCESS;
12486 }
12487 
12488 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12489 {
12490  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12491 
12492  const size_t blockCount = m_Blocks.size();
12493 
12494  pStats->size = 0;
12495  pStats->unusedSize = 0;
12496  pStats->allocationCount = 0;
12497  pStats->unusedRangeCount = 0;
12498  pStats->unusedRangeSizeMax = 0;
12499  pStats->blockCount = blockCount;
12500 
12501  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12502  {
12503  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12504  VMA_ASSERT(pBlock);
12505  VMA_HEAVY_ASSERT(pBlock->Validate());
12506  pBlock->m_pMetadata->AddPoolStats(*pStats);
12507  }
12508 }
12509 
12510 bool VmaBlockVector::IsEmpty()
12511 {
12512  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12513  return m_Blocks.empty();
12514 }
12515 
12516 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12517 {
12518  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12519  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12520  (VMA_DEBUG_MARGIN > 0) &&
12521  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12522  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12523 }
12524 
12525 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12526 
12527 VkResult VmaBlockVector::Allocate(
12528  uint32_t currentFrameIndex,
12529  VkDeviceSize size,
12530  VkDeviceSize alignment,
12531  const VmaAllocationCreateInfo& createInfo,
12532  VmaSuballocationType suballocType,
12533  size_t allocationCount,
12534  VmaAllocation* pAllocations)
12535 {
12536  size_t allocIndex;
12537  VkResult res = VK_SUCCESS;
12538 
12539  if(IsCorruptionDetectionEnabled())
12540  {
12541  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12542  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12543  }
12544 
12545  {
12546  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12547  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12548  {
12549  res = AllocatePage(
12550  currentFrameIndex,
12551  size,
12552  alignment,
12553  createInfo,
12554  suballocType,
12555  pAllocations + allocIndex);
12556  if(res != VK_SUCCESS)
12557  {
12558  break;
12559  }
12560  }
12561  }
12562 
12563  if(res != VK_SUCCESS)
12564  {
12565  // Free all already created allocations.
12566  while(allocIndex--)
12567  {
12568  Free(pAllocations[allocIndex]);
12569  }
12570  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12571  }
12572 
12573  return res;
12574 }
12575 
12576 VkResult VmaBlockVector::AllocatePage(
12577  uint32_t currentFrameIndex,
12578  VkDeviceSize size,
12579  VkDeviceSize alignment,
12580  const VmaAllocationCreateInfo& createInfo,
12581  VmaSuballocationType suballocType,
12582  VmaAllocation* pAllocation)
12583 {
12584  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12585  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12586  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12587  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12588 
12589  VkDeviceSize freeMemory;
12590  {
12591  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12592  VmaBudget heapBudget = {};
12593  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12594  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12595  }
12596 
12597  const bool canFallbackToDedicated = !IsCustomPool();
12598  const bool canCreateNewBlock =
12599  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12600  (m_Blocks.size() < m_MaxBlockCount) &&
12601  (freeMemory >= size || !canFallbackToDedicated);
12602  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12603 
12604  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12605  // Which in turn is available only when maxBlockCount = 1.
12606  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12607  {
12608  canMakeOtherLost = false;
12609  }
12610 
12611  // Upper address can only be used with linear allocator and within single memory block.
12612  if(isUpperAddress &&
12613  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12614  {
12615  return VK_ERROR_FEATURE_NOT_PRESENT;
12616  }
12617 
12618  // Validate strategy.
12619  switch(strategy)
12620  {
12621  case 0:
12623  break;
12627  break;
12628  default:
12629  return VK_ERROR_FEATURE_NOT_PRESENT;
12630  }
12631 
12632  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12633  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12634  {
12635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12636  }
12637 
12638  /*
12639  Under certain condition, this whole section can be skipped for optimization, so
12640  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12641  e.g. for custom pools with linear algorithm.
12642  */
12643  if(!canMakeOtherLost || canCreateNewBlock)
12644  {
12645  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12646  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12648 
12649  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12650  {
12651  // Use only last block.
12652  if(!m_Blocks.empty())
12653  {
12654  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12655  VMA_ASSERT(pCurrBlock);
12656  VkResult res = AllocateFromBlock(
12657  pCurrBlock,
12658  currentFrameIndex,
12659  size,
12660  alignment,
12661  allocFlagsCopy,
12662  createInfo.pUserData,
12663  suballocType,
12664  strategy,
12665  pAllocation);
12666  if(res == VK_SUCCESS)
12667  {
12668  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12669  return VK_SUCCESS;
12670  }
12671  }
12672  }
12673  else
12674  {
12676  {
12677  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12678  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12679  {
12680  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12681  VMA_ASSERT(pCurrBlock);
12682  VkResult res = AllocateFromBlock(
12683  pCurrBlock,
12684  currentFrameIndex,
12685  size,
12686  alignment,
12687  allocFlagsCopy,
12688  createInfo.pUserData,
12689  suballocType,
12690  strategy,
12691  pAllocation);
12692  if(res == VK_SUCCESS)
12693  {
12694  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12695  return VK_SUCCESS;
12696  }
12697  }
12698  }
12699  else // WORST_FIT, FIRST_FIT
12700  {
12701  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12702  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12703  {
12704  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12705  VMA_ASSERT(pCurrBlock);
12706  VkResult res = AllocateFromBlock(
12707  pCurrBlock,
12708  currentFrameIndex,
12709  size,
12710  alignment,
12711  allocFlagsCopy,
12712  createInfo.pUserData,
12713  suballocType,
12714  strategy,
12715  pAllocation);
12716  if(res == VK_SUCCESS)
12717  {
12718  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12719  return VK_SUCCESS;
12720  }
12721  }
12722  }
12723  }
12724 
12725  // 2. Try to create new block.
12726  if(canCreateNewBlock)
12727  {
12728  // Calculate optimal size for new block.
12729  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12730  uint32_t newBlockSizeShift = 0;
12731  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12732 
12733  if(!m_ExplicitBlockSize)
12734  {
12735  // Allocate 1/8, 1/4, 1/2 as first blocks.
12736  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12737  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12738  {
12739  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12740  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12741  {
12742  newBlockSize = smallerNewBlockSize;
12743  ++newBlockSizeShift;
12744  }
12745  else
12746  {
12747  break;
12748  }
12749  }
12750  }
12751 
12752  size_t newBlockIndex = 0;
12753  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12754  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12755  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12756  if(!m_ExplicitBlockSize)
12757  {
12758  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12759  {
12760  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12761  if(smallerNewBlockSize >= size)
12762  {
12763  newBlockSize = smallerNewBlockSize;
12764  ++newBlockSizeShift;
12765  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12766  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12767  }
12768  else
12769  {
12770  break;
12771  }
12772  }
12773  }
12774 
12775  if(res == VK_SUCCESS)
12776  {
12777  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12778  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12779 
12780  res = AllocateFromBlock(
12781  pBlock,
12782  currentFrameIndex,
12783  size,
12784  alignment,
12785  allocFlagsCopy,
12786  createInfo.pUserData,
12787  suballocType,
12788  strategy,
12789  pAllocation);
12790  if(res == VK_SUCCESS)
12791  {
12792  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12793  return VK_SUCCESS;
12794  }
12795  else
12796  {
12797  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12799  }
12800  }
12801  }
12802  }
12803 
12804  // 3. Try to allocate from existing blocks with making other allocations lost.
12805  if(canMakeOtherLost)
12806  {
12807  uint32_t tryIndex = 0;
12808  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12809  {
12810  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12811  VmaAllocationRequest bestRequest = {};
12812  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12813 
12814  // 1. Search existing allocations.
12816  {
12817  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12818  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12819  {
12820  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12821  VMA_ASSERT(pCurrBlock);
12822  VmaAllocationRequest currRequest = {};
12823  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12824  currentFrameIndex,
12825  m_FrameInUseCount,
12826  m_BufferImageGranularity,
12827  size,
12828  alignment,
12829  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12830  suballocType,
12831  canMakeOtherLost,
12832  strategy,
12833  &currRequest))
12834  {
12835  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12836  if(pBestRequestBlock == VMA_NULL ||
12837  currRequestCost < bestRequestCost)
12838  {
12839  pBestRequestBlock = pCurrBlock;
12840  bestRequest = currRequest;
12841  bestRequestCost = currRequestCost;
12842 
12843  if(bestRequestCost == 0)
12844  {
12845  break;
12846  }
12847  }
12848  }
12849  }
12850  }
12851  else // WORST_FIT, FIRST_FIT
12852  {
12853  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12854  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12855  {
12856  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12857  VMA_ASSERT(pCurrBlock);
12858  VmaAllocationRequest currRequest = {};
12859  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12860  currentFrameIndex,
12861  m_FrameInUseCount,
12862  m_BufferImageGranularity,
12863  size,
12864  alignment,
12865  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12866  suballocType,
12867  canMakeOtherLost,
12868  strategy,
12869  &currRequest))
12870  {
12871  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12872  if(pBestRequestBlock == VMA_NULL ||
12873  currRequestCost < bestRequestCost ||
12875  {
12876  pBestRequestBlock = pCurrBlock;
12877  bestRequest = currRequest;
12878  bestRequestCost = currRequestCost;
12879 
12880  if(bestRequestCost == 0 ||
12882  {
12883  break;
12884  }
12885  }
12886  }
12887  }
12888  }
12889 
12890  if(pBestRequestBlock != VMA_NULL)
12891  {
12892  if(mapped)
12893  {
12894  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12895  if(res != VK_SUCCESS)
12896  {
12897  return res;
12898  }
12899  }
12900 
12901  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12902  currentFrameIndex,
12903  m_FrameInUseCount,
12904  &bestRequest))
12905  {
12906  // Allocate from this pBlock.
12907  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12908  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12909  UpdateHasEmptyBlock();
12910  (*pAllocation)->InitBlockAllocation(
12911  pBestRequestBlock,
12912  bestRequest.offset,
12913  alignment,
12914  size,
12915  m_MemoryTypeIndex,
12916  suballocType,
12917  mapped,
12918  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12919  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12920  VMA_DEBUG_LOG(" Returned from existing block");
12921  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12922  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12923  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12924  {
12925  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12926  }
12927  if(IsCorruptionDetectionEnabled())
12928  {
12929  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12930  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12931  }
12932  return VK_SUCCESS;
12933  }
12934  // else: Some allocations must have been touched while we are here. Next try.
12935  }
12936  else
12937  {
12938  // Could not find place in any of the blocks - break outer loop.
12939  break;
12940  }
12941  }
12942  /* Maximum number of tries exceeded - a very unlike event when many other
12943  threads are simultaneously touching allocations making it impossible to make
12944  lost at the same time as we try to allocate. */
12945  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12946  {
12947  return VK_ERROR_TOO_MANY_OBJECTS;
12948  }
12949  }
12950 
12951  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12952 }
12953 
12954 void VmaBlockVector::Free(
12955  const VmaAllocation hAllocation)
12956 {
12957  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12958 
12959  bool budgetExceeded = false;
12960  {
12961  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12962  VmaBudget heapBudget = {};
12963  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12964  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12965  }
12966 
12967  // Scope for lock.
12968  {
12969  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12970 
12971  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12972 
12973  if(IsCorruptionDetectionEnabled())
12974  {
12975  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12976  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12977  }
12978 
12979  if(hAllocation->IsPersistentMap())
12980  {
12981  pBlock->Unmap(m_hAllocator, 1);
12982  }
12983 
12984  pBlock->m_pMetadata->Free(hAllocation);
12985  VMA_HEAVY_ASSERT(pBlock->Validate());
12986 
12987  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12988 
12989  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12990  // pBlock became empty after this deallocation.
12991  if(pBlock->m_pMetadata->IsEmpty())
12992  {
12993  // Already has empty block. We don't want to have two, so delete this one.
12994  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12995  {
12996  pBlockToDelete = pBlock;
12997  Remove(pBlock);
12998  }
12999  // else: We now have an empty block - leave it.
13000  }
13001  // pBlock didn't become empty, but we have another empty block - find and free that one.
13002  // (This is optional, heuristics.)
13003  else if(m_HasEmptyBlock && canDeleteBlock)
13004  {
13005  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13006  if(pLastBlock->m_pMetadata->IsEmpty())
13007  {
13008  pBlockToDelete = pLastBlock;
13009  m_Blocks.pop_back();
13010  }
13011  }
13012 
13013  UpdateHasEmptyBlock();
13014  IncrementallySortBlocks();
13015  }
13016 
13017  // Destruction of a free block. Deferred until this point, outside of mutex
13018  // lock, for performance reason.
13019  if(pBlockToDelete != VMA_NULL)
13020  {
13021  VMA_DEBUG_LOG(" Deleted empty block");
13022  pBlockToDelete->Destroy(m_hAllocator);
13023  vma_delete(m_hAllocator, pBlockToDelete);
13024  }
13025 }
13026 
13027 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13028 {
13029  VkDeviceSize result = 0;
13030  for(size_t i = m_Blocks.size(); i--; )
13031  {
13032  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13033  if(result >= m_PreferredBlockSize)
13034  {
13035  break;
13036  }
13037  }
13038  return result;
13039 }
13040 
13041 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13042 {
13043  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13044  {
13045  if(m_Blocks[blockIndex] == pBlock)
13046  {
13047  VmaVectorRemove(m_Blocks, blockIndex);
13048  return;
13049  }
13050  }
13051  VMA_ASSERT(0);
13052 }
13053 
13054 void VmaBlockVector::IncrementallySortBlocks()
13055 {
13056  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13057  {
13058  // Bubble sort only until first swap.
13059  for(size_t i = 1; i < m_Blocks.size(); ++i)
13060  {
13061  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13062  {
13063  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13064  return;
13065  }
13066  }
13067  }
13068 }
13069 
13070 VkResult VmaBlockVector::AllocateFromBlock(
13071  VmaDeviceMemoryBlock* pBlock,
13072  uint32_t currentFrameIndex,
13073  VkDeviceSize size,
13074  VkDeviceSize alignment,
13075  VmaAllocationCreateFlags allocFlags,
13076  void* pUserData,
13077  VmaSuballocationType suballocType,
13078  uint32_t strategy,
13079  VmaAllocation* pAllocation)
13080 {
13081  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13082  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13083  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13084  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13085 
13086  VmaAllocationRequest currRequest = {};
13087  if(pBlock->m_pMetadata->CreateAllocationRequest(
13088  currentFrameIndex,
13089  m_FrameInUseCount,
13090  m_BufferImageGranularity,
13091  size,
13092  alignment,
13093  isUpperAddress,
13094  suballocType,
13095  false, // canMakeOtherLost
13096  strategy,
13097  &currRequest))
13098  {
13099  // Allocate from pCurrBlock.
13100  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13101 
13102  if(mapped)
13103  {
13104  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13105  if(res != VK_SUCCESS)
13106  {
13107  return res;
13108  }
13109  }
13110 
13111  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13112  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13113  UpdateHasEmptyBlock();
13114  (*pAllocation)->InitBlockAllocation(
13115  pBlock,
13116  currRequest.offset,
13117  alignment,
13118  size,
13119  m_MemoryTypeIndex,
13120  suballocType,
13121  mapped,
13122  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13123  VMA_HEAVY_ASSERT(pBlock->Validate());
13124  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13125  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13126  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13127  {
13128  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13129  }
13130  if(IsCorruptionDetectionEnabled())
13131  {
13132  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13133  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13134  }
13135  return VK_SUCCESS;
13136  }
13137  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13138 }
13139 
13140 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13141 {
13142  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13143  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13144  allocInfo.allocationSize = blockSize;
13145 
13146 #if VMA_BUFFER_DEVICE_ADDRESS
13147  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13148  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13149  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13150  {
13151  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13152  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13153  }
13154 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13155 
13156  VkDeviceMemory mem = VK_NULL_HANDLE;
13157  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13158  if(res < 0)
13159  {
13160  return res;
13161  }
13162 
13163  // New VkDeviceMemory successfully created.
13164 
13165  // Create new Allocation for it.
13166  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13167  pBlock->Init(
13168  m_hAllocator,
13169  m_hParentPool,
13170  m_MemoryTypeIndex,
13171  mem,
13172  allocInfo.allocationSize,
13173  m_NextBlockId++,
13174  m_Algorithm);
13175 
13176  m_Blocks.push_back(pBlock);
13177  if(pNewBlockIndex != VMA_NULL)
13178  {
13179  *pNewBlockIndex = m_Blocks.size() - 1;
13180  }
13181 
13182  return VK_SUCCESS;
13183 }
13184 
13185 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13186  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13187  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13188 {
13189  const size_t blockCount = m_Blocks.size();
13190  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13191 
13192  enum BLOCK_FLAG
13193  {
13194  BLOCK_FLAG_USED = 0x00000001,
13195  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13196  };
13197 
13198  struct BlockInfo
13199  {
13200  uint32_t flags;
13201  void* pMappedData;
13202  };
13203  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13204  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13205  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13206 
13207  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13208  const size_t moveCount = moves.size();
13209  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13210  {
13211  const VmaDefragmentationMove& move = moves[moveIndex];
13212  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13213  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13214  }
13215 
13216  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13217 
13218  // Go over all blocks. Get mapped pointer or map if necessary.
13219  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13220  {
13221  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13222  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13223  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13224  {
13225  currBlockInfo.pMappedData = pBlock->GetMappedData();
13226  // It is not originally mapped - map it.
13227  if(currBlockInfo.pMappedData == VMA_NULL)
13228  {
13229  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13230  if(pDefragCtx->res == VK_SUCCESS)
13231  {
13232  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13233  }
13234  }
13235  }
13236  }
13237 
13238  // Go over all moves. Do actual data transfer.
13239  if(pDefragCtx->res == VK_SUCCESS)
13240  {
13241  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13242  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13243 
13244  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13245  {
13246  const VmaDefragmentationMove& move = moves[moveIndex];
13247 
13248  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13249  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13250 
13251  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13252 
13253  // Invalidate source.
13254  if(isNonCoherent)
13255  {
13256  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13257  memRange.memory = pSrcBlock->GetDeviceMemory();
13258  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13259  memRange.size = VMA_MIN(
13260  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13261  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13262  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13263  }
13264 
13265  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13266  memmove(
13267  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13268  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13269  static_cast<size_t>(move.size));
13270 
13271  if(IsCorruptionDetectionEnabled())
13272  {
13273  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13274  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13275  }
13276 
13277  // Flush destination.
13278  if(isNonCoherent)
13279  {
13280  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13281  memRange.memory = pDstBlock->GetDeviceMemory();
13282  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13283  memRange.size = VMA_MIN(
13284  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13285  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13286  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13287  }
13288  }
13289  }
13290 
13291  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13292  // Regardless of pCtx->res == VK_SUCCESS.
13293  for(size_t blockIndex = blockCount; blockIndex--; )
13294  {
13295  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13296  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13297  {
13298  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13299  pBlock->Unmap(m_hAllocator, 1);
13300  }
13301  }
13302 }
13303 
13304 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13305  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13306  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13307  VkCommandBuffer commandBuffer)
13308 {
13309  const size_t blockCount = m_Blocks.size();
13310 
13311  pDefragCtx->blockContexts.resize(blockCount);
13312  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13313 
13314  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13315  const size_t moveCount = moves.size();
13316  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13317  {
13318  const VmaDefragmentationMove& move = moves[moveIndex];
13319 
13320  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13321  {
13322  // Old school move still require us to map the whole block
13323  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13324  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13325  }
13326  }
13327 
13328  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13329 
13330  // Go over all blocks. Create and bind buffer for whole block if necessary.
13331  {
13332  VkBufferCreateInfo bufCreateInfo;
13333  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13334 
13335  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13336  {
13337  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13338  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13339  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13340  {
13341  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13342  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13343  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13344  if(pDefragCtx->res == VK_SUCCESS)
13345  {
13346  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13347  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13348  }
13349  }
13350  }
13351  }
13352 
13353  // Go over all moves. Post data transfer commands to command buffer.
13354  if(pDefragCtx->res == VK_SUCCESS)
13355  {
13356  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13357  {
13358  const VmaDefragmentationMove& move = moves[moveIndex];
13359 
13360  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13361  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13362 
13363  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13364 
13365  VkBufferCopy region = {
13366  move.srcOffset,
13367  move.dstOffset,
13368  move.size };
13369  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13370  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13371  }
13372  }
13373 
13374  // Save buffers to defrag context for later destruction.
13375  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13376  {
13377  pDefragCtx->res = VK_NOT_READY;
13378  }
13379 }
13380 
13381 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13382 {
13383  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13384  {
13385  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13386  if(pBlock->m_pMetadata->IsEmpty())
13387  {
13388  if(m_Blocks.size() > m_MinBlockCount)
13389  {
13390  if(pDefragmentationStats != VMA_NULL)
13391  {
13392  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13393  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13394  }
13395 
13396  VmaVectorRemove(m_Blocks, blockIndex);
13397  pBlock->Destroy(m_hAllocator);
13398  vma_delete(m_hAllocator, pBlock);
13399  }
13400  else
13401  {
13402  break;
13403  }
13404  }
13405  }
13406  UpdateHasEmptyBlock();
13407 }
13408 
13409 void VmaBlockVector::UpdateHasEmptyBlock()
13410 {
13411  m_HasEmptyBlock = false;
13412  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13413  {
13414  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13415  if(pBlock->m_pMetadata->IsEmpty())
13416  {
13417  m_HasEmptyBlock = true;
13418  break;
13419  }
13420  }
13421 }
13422 
13423 #if VMA_STATS_STRING_ENABLED
13424 
13425 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13426 {
13427  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13428 
13429  json.BeginObject();
13430 
13431  if(IsCustomPool())
13432  {
13433  const char* poolName = m_hParentPool->GetName();
13434  if(poolName != VMA_NULL && poolName[0] != '\0')
13435  {
13436  json.WriteString("Name");
13437  json.WriteString(poolName);
13438  }
13439 
13440  json.WriteString("MemoryTypeIndex");
13441  json.WriteNumber(m_MemoryTypeIndex);
13442 
13443  json.WriteString("BlockSize");
13444  json.WriteNumber(m_PreferredBlockSize);
13445 
13446  json.WriteString("BlockCount");
13447  json.BeginObject(true);
13448  if(m_MinBlockCount > 0)
13449  {
13450  json.WriteString("Min");
13451  json.WriteNumber((uint64_t)m_MinBlockCount);
13452  }
13453  if(m_MaxBlockCount < SIZE_MAX)
13454  {
13455  json.WriteString("Max");
13456  json.WriteNumber((uint64_t)m_MaxBlockCount);
13457  }
13458  json.WriteString("Cur");
13459  json.WriteNumber((uint64_t)m_Blocks.size());
13460  json.EndObject();
13461 
13462  if(m_FrameInUseCount > 0)
13463  {
13464  json.WriteString("FrameInUseCount");
13465  json.WriteNumber(m_FrameInUseCount);
13466  }
13467 
13468  if(m_Algorithm != 0)
13469  {
13470  json.WriteString("Algorithm");
13471  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13472  }
13473  }
13474  else
13475  {
13476  json.WriteString("PreferredBlockSize");
13477  json.WriteNumber(m_PreferredBlockSize);
13478  }
13479 
13480  json.WriteString("Blocks");
13481  json.BeginObject();
13482  for(size_t i = 0; i < m_Blocks.size(); ++i)
13483  {
13484  json.BeginString();
13485  json.ContinueString(m_Blocks[i]->GetId());
13486  json.EndString();
13487 
13488  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13489  }
13490  json.EndObject();
13491 
13492  json.EndObject();
13493 }
13494 
13495 #endif // #if VMA_STATS_STRING_ENABLED
13496 
13497 void VmaBlockVector::Defragment(
13498  class VmaBlockVectorDefragmentationContext* pCtx,
13500  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13501  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13502  VkCommandBuffer commandBuffer)
13503 {
13504  pCtx->res = VK_SUCCESS;
13505 
13506  const VkMemoryPropertyFlags memPropFlags =
13507  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13508  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13509 
13510  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13511  isHostVisible;
13512  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13513  !IsCorruptionDetectionEnabled() &&
13514  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13515 
13516  // There are options to defragment this memory type.
13517  if(canDefragmentOnCpu || canDefragmentOnGpu)
13518  {
13519  bool defragmentOnGpu;
13520  // There is only one option to defragment this memory type.
13521  if(canDefragmentOnGpu != canDefragmentOnCpu)
13522  {
13523  defragmentOnGpu = canDefragmentOnGpu;
13524  }
13525  // Both options are available: Heuristics to choose the best one.
13526  else
13527  {
13528  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13529  m_hAllocator->IsIntegratedGpu();
13530  }
13531 
13532  bool overlappingMoveSupported = !defragmentOnGpu;
13533 
13534  if(m_hAllocator->m_UseMutex)
13535  {
13537  {
13538  if(!m_Mutex.TryLockWrite())
13539  {
13540  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13541  return;
13542  }
13543  }
13544  else
13545  {
13546  m_Mutex.LockWrite();
13547  pCtx->mutexLocked = true;
13548  }
13549  }
13550 
13551  pCtx->Begin(overlappingMoveSupported, flags);
13552 
13553  // Defragment.
13554 
13555  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13556  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13557  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13558 
13559  // Accumulate statistics.
13560  if(pStats != VMA_NULL)
13561  {
13562  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13563  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13564  pStats->bytesMoved += bytesMoved;
13565  pStats->allocationsMoved += allocationsMoved;
13566  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13567  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13568  if(defragmentOnGpu)
13569  {
13570  maxGpuBytesToMove -= bytesMoved;
13571  maxGpuAllocationsToMove -= allocationsMoved;
13572  }
13573  else
13574  {
13575  maxCpuBytesToMove -= bytesMoved;
13576  maxCpuAllocationsToMove -= allocationsMoved;
13577  }
13578  }
13579 
13581  {
13582  if(m_hAllocator->m_UseMutex)
13583  m_Mutex.UnlockWrite();
13584 
13585  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13586  pCtx->res = VK_NOT_READY;
13587 
13588  return;
13589  }
13590 
13591  if(pCtx->res >= VK_SUCCESS)
13592  {
13593  if(defragmentOnGpu)
13594  {
13595  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13596  }
13597  else
13598  {
13599  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13600  }
13601  }
13602  }
13603 }
13604 
13605 void VmaBlockVector::DefragmentationEnd(
13606  class VmaBlockVectorDefragmentationContext* pCtx,
13607  uint32_t flags,
13608  VmaDefragmentationStats* pStats)
13609 {
13610  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13611  {
13612  VMA_ASSERT(pCtx->mutexLocked == false);
13613 
13614  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13615  // lock protecting us. Since we mutate state here, we have to take the lock out now
13616  m_Mutex.LockWrite();
13617  pCtx->mutexLocked = true;
13618  }
13619 
13620  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13621  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13622  {
13623  // Destroy buffers.
13624  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13625  {
13626  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13627  if(blockCtx.hBuffer)
13628  {
13629  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13630  }
13631  }
13632 
13633  if(pCtx->res >= VK_SUCCESS)
13634  {
13635  FreeEmptyBlocks(pStats);
13636  }
13637  }
13638 
13639  if(pCtx->mutexLocked)
13640  {
13641  VMA_ASSERT(m_hAllocator->m_UseMutex);
13642  m_Mutex.UnlockWrite();
13643  }
13644 }
13645 
13646 uint32_t VmaBlockVector::ProcessDefragmentations(
13647  class VmaBlockVectorDefragmentationContext *pCtx,
13648  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13649 {
13650  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13651 
13652  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13653 
13654  for(uint32_t i = 0; i < moveCount; ++ i)
13655  {
13656  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13657 
13658  pMove->allocation = move.hAllocation;
13659  pMove->memory = move.pDstBlock->GetDeviceMemory();
13660  pMove->offset = move.dstOffset;
13661 
13662  ++ pMove;
13663  }
13664 
13665  pCtx->defragmentationMovesProcessed += moveCount;
13666 
13667  return moveCount;
13668 }
13669 
13670 void VmaBlockVector::CommitDefragmentations(
13671  class VmaBlockVectorDefragmentationContext *pCtx,
13672  VmaDefragmentationStats* pStats)
13673 {
13674  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13675 
13676  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13677  {
13678  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13679 
13680  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13681  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13682  }
13683 
13684  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13685  FreeEmptyBlocks(pStats);
13686 }
13687 
13688 size_t VmaBlockVector::CalcAllocationCount() const
13689 {
13690  size_t result = 0;
13691  for(size_t i = 0; i < m_Blocks.size(); ++i)
13692  {
13693  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13694  }
13695  return result;
13696 }
13697 
13698 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13699 {
13700  if(m_BufferImageGranularity == 1)
13701  {
13702  return false;
13703  }
13704  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13705  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13706  {
13707  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13708  VMA_ASSERT(m_Algorithm == 0);
13709  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13710  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13711  {
13712  return true;
13713  }
13714  }
13715  return false;
13716 }
13717 
13718 void VmaBlockVector::MakePoolAllocationsLost(
13719  uint32_t currentFrameIndex,
13720  size_t* pLostAllocationCount)
13721 {
13722  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13723  size_t lostAllocationCount = 0;
13724  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13725  {
13726  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13727  VMA_ASSERT(pBlock);
13728  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13729  }
13730  if(pLostAllocationCount != VMA_NULL)
13731  {
13732  *pLostAllocationCount = lostAllocationCount;
13733  }
13734 }
13735 
13736 VkResult VmaBlockVector::CheckCorruption()
13737 {
13738  if(!IsCorruptionDetectionEnabled())
13739  {
13740  return VK_ERROR_FEATURE_NOT_PRESENT;
13741  }
13742 
13743  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13744  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13745  {
13746  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13747  VMA_ASSERT(pBlock);
13748  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13749  if(res != VK_SUCCESS)
13750  {
13751  return res;
13752  }
13753  }
13754  return VK_SUCCESS;
13755 }
13756 
13757 void VmaBlockVector::AddStats(VmaStats* pStats)
13758 {
13759  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13760  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13761 
13762  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13763 
13764  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13765  {
13766  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13767  VMA_ASSERT(pBlock);
13768  VMA_HEAVY_ASSERT(pBlock->Validate());
13769  VmaStatInfo allocationStatInfo;
13770  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13771  VmaAddStatInfo(pStats->total, allocationStatInfo);
13772  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13773  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13774  }
13775 }
13776 
13778 // VmaDefragmentationAlgorithm_Generic members definition
13779 
13780 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13781  VmaAllocator hAllocator,
13782  VmaBlockVector* pBlockVector,
13783  uint32_t currentFrameIndex,
13784  bool overlappingMoveSupported) :
13785  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13786  m_AllocationCount(0),
13787  m_AllAllocations(false),
13788  m_BytesMoved(0),
13789  m_AllocationsMoved(0),
13790  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13791 {
13792  // Create block info for each block.
13793  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13794  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13795  {
13796  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13797  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13798  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13799  m_Blocks.push_back(pBlockInfo);
13800  }
13801 
13802  // Sort them by m_pBlock pointer value.
13803  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13804 }
13805 
13806 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13807 {
13808  for(size_t i = m_Blocks.size(); i--; )
13809  {
13810  vma_delete(m_hAllocator, m_Blocks[i]);
13811  }
13812 }
13813 
13814 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13815 {
13816  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13817  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13818  {
13819  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13820  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13821  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13822  {
13823  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13824  (*it)->m_Allocations.push_back(allocInfo);
13825  }
13826  else
13827  {
13828  VMA_ASSERT(0);
13829  }
13830 
13831  ++m_AllocationCount;
13832  }
13833 }
13834 
13835 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13836  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13837  VkDeviceSize maxBytesToMove,
13838  uint32_t maxAllocationsToMove,
13839  bool freeOldAllocations)
13840 {
13841  if(m_Blocks.empty())
13842  {
13843  return VK_SUCCESS;
13844  }
13845 
13846  // This is a choice based on research.
13847  // Option 1:
13848  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13849  // Option 2:
13850  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13851  // Option 3:
13852  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13853 
13854  size_t srcBlockMinIndex = 0;
13855  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13856  /*
13857  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13858  {
13859  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13860  if(blocksWithNonMovableCount > 0)
13861  {
13862  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13863  }
13864  }
13865  */
13866 
13867  size_t srcBlockIndex = m_Blocks.size() - 1;
13868  size_t srcAllocIndex = SIZE_MAX;
13869  for(;;)
13870  {
13871  // 1. Find next allocation to move.
13872  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13873  // 1.2. Then start from last to first m_Allocations.
13874  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13875  {
13876  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13877  {
13878  // Finished: no more allocations to process.
13879  if(srcBlockIndex == srcBlockMinIndex)
13880  {
13881  return VK_SUCCESS;
13882  }
13883  else
13884  {
13885  --srcBlockIndex;
13886  srcAllocIndex = SIZE_MAX;
13887  }
13888  }
13889  else
13890  {
13891  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13892  }
13893  }
13894 
13895  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13896  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13897 
13898  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13899  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13900  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13901  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13902 
13903  // 2. Try to find new place for this allocation in preceding or current block.
13904  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13905  {
13906  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13907  VmaAllocationRequest dstAllocRequest;
13908  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13909  m_CurrentFrameIndex,
13910  m_pBlockVector->GetFrameInUseCount(),
13911  m_pBlockVector->GetBufferImageGranularity(),
13912  size,
13913  alignment,
13914  false, // upperAddress
13915  suballocType,
13916  false, // canMakeOtherLost
13917  strategy,
13918  &dstAllocRequest) &&
13919  MoveMakesSense(
13920  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13921  {
13922  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13923 
13924  // Reached limit on number of allocations or bytes to move.
13925  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13926  (m_BytesMoved + size > maxBytesToMove))
13927  {
13928  return VK_SUCCESS;
13929  }
13930 
13931  VmaDefragmentationMove move = {};
13932  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13933  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13934  move.srcOffset = srcOffset;
13935  move.dstOffset = dstAllocRequest.offset;
13936  move.size = size;
13937  move.hAllocation = allocInfo.m_hAllocation;
13938  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13939  move.pDstBlock = pDstBlockInfo->m_pBlock;
13940 
13941  moves.push_back(move);
13942 
13943  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13944  dstAllocRequest,
13945  suballocType,
13946  size,
13947  allocInfo.m_hAllocation);
13948 
13949  if(freeOldAllocations)
13950  {
13951  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13952  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13953  }
13954 
13955  if(allocInfo.m_pChanged != VMA_NULL)
13956  {
13957  *allocInfo.m_pChanged = VK_TRUE;
13958  }
13959 
13960  ++m_AllocationsMoved;
13961  m_BytesMoved += size;
13962 
13963  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13964 
13965  break;
13966  }
13967  }
13968 
13969  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13970 
13971  if(srcAllocIndex > 0)
13972  {
13973  --srcAllocIndex;
13974  }
13975  else
13976  {
13977  if(srcBlockIndex > 0)
13978  {
13979  --srcBlockIndex;
13980  srcAllocIndex = SIZE_MAX;
13981  }
13982  else
13983  {
13984  return VK_SUCCESS;
13985  }
13986  }
13987  }
13988 }
13989 
13990 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13991 {
13992  size_t result = 0;
13993  for(size_t i = 0; i < m_Blocks.size(); ++i)
13994  {
13995  if(m_Blocks[i]->m_HasNonMovableAllocations)
13996  {
13997  ++result;
13998  }
13999  }
14000  return result;
14001 }
14002 
14003 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14004  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14005  VkDeviceSize maxBytesToMove,
14006  uint32_t maxAllocationsToMove,
14008 {
14009  if(!m_AllAllocations && m_AllocationCount == 0)
14010  {
14011  return VK_SUCCESS;
14012  }
14013 
14014  const size_t blockCount = m_Blocks.size();
14015  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14016  {
14017  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14018 
14019  if(m_AllAllocations)
14020  {
14021  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14022  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14023  it != pMetadata->m_Suballocations.end();
14024  ++it)
14025  {
14026  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14027  {
14028  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14029  pBlockInfo->m_Allocations.push_back(allocInfo);
14030  }
14031  }
14032  }
14033 
14034  pBlockInfo->CalcHasNonMovableAllocations();
14035 
14036  // This is a choice based on research.
14037  // Option 1:
14038  pBlockInfo->SortAllocationsByOffsetDescending();
14039  // Option 2:
14040  //pBlockInfo->SortAllocationsBySizeDescending();
14041  }
14042 
14043  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14044  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14045 
14046  // This is a choice based on research.
14047  const uint32_t roundCount = 2;
14048 
14049  // Execute defragmentation rounds (the main part).
14050  VkResult result = VK_SUCCESS;
14051  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14052  {
14053  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14054  }
14055 
14056  return result;
14057 }
14058 
14059 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14060  size_t dstBlockIndex, VkDeviceSize dstOffset,
14061  size_t srcBlockIndex, VkDeviceSize srcOffset)
14062 {
14063  if(dstBlockIndex < srcBlockIndex)
14064  {
14065  return true;
14066  }
14067  if(dstBlockIndex > srcBlockIndex)
14068  {
14069  return false;
14070  }
14071  if(dstOffset < srcOffset)
14072  {
14073  return true;
14074  }
14075  return false;
14076 }
14077 
14079 // VmaDefragmentationAlgorithm_Fast
14080 
14081 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14082  VmaAllocator hAllocator,
14083  VmaBlockVector* pBlockVector,
14084  uint32_t currentFrameIndex,
14085  bool overlappingMoveSupported) :
14086  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14087  m_OverlappingMoveSupported(overlappingMoveSupported),
14088  m_AllocationCount(0),
14089  m_AllAllocations(false),
14090  m_BytesMoved(0),
14091  m_AllocationsMoved(0),
14092  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14093 {
14094  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14095 
14096 }
14097 
14098 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14099 {
14100 }
14101 
14102 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14103  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14104  VkDeviceSize maxBytesToMove,
14105  uint32_t maxAllocationsToMove,
14107 {
14108  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14109 
14110  const size_t blockCount = m_pBlockVector->GetBlockCount();
14111  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14112  {
14113  return VK_SUCCESS;
14114  }
14115 
14116  PreprocessMetadata();
14117 
14118  // Sort blocks in order from most destination.
14119 
14120  m_BlockInfos.resize(blockCount);
14121  for(size_t i = 0; i < blockCount; ++i)
14122  {
14123  m_BlockInfos[i].origBlockIndex = i;
14124  }
14125 
14126  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14127  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14128  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14129  });
14130 
14131  // THE MAIN ALGORITHM
14132 
14133  FreeSpaceDatabase freeSpaceDb;
14134 
14135  size_t dstBlockInfoIndex = 0;
14136  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14137  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14138  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14139  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14140  VkDeviceSize dstOffset = 0;
14141 
14142  bool end = false;
14143  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14144  {
14145  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14146  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14147  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14148  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14149  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14150  {
14151  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14152  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14153  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14154  if(m_AllocationsMoved == maxAllocationsToMove ||
14155  m_BytesMoved + srcAllocSize > maxBytesToMove)
14156  {
14157  end = true;
14158  break;
14159  }
14160  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14161 
14162  VmaDefragmentationMove move = {};
14163  // Try to place it in one of free spaces from the database.
14164  size_t freeSpaceInfoIndex;
14165  VkDeviceSize dstAllocOffset;
14166  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14167  freeSpaceInfoIndex, dstAllocOffset))
14168  {
14169  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14170  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14171  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14172 
14173  // Same block
14174  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14175  {
14176  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14177 
14178  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14179 
14180  VmaSuballocation suballoc = *srcSuballocIt;
14181  suballoc.offset = dstAllocOffset;
14182  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14183  m_BytesMoved += srcAllocSize;
14184  ++m_AllocationsMoved;
14185 
14186  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14187  ++nextSuballocIt;
14188  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14189  srcSuballocIt = nextSuballocIt;
14190 
14191  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14192 
14193  move.srcBlockIndex = srcOrigBlockIndex;
14194  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14195  move.srcOffset = srcAllocOffset;
14196  move.dstOffset = dstAllocOffset;
14197  move.size = srcAllocSize;
14198 
14199  moves.push_back(move);
14200  }
14201  // Different block
14202  else
14203  {
14204  // MOVE OPTION 2: Move the allocation to a different block.
14205 
14206  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14207 
14208  VmaSuballocation suballoc = *srcSuballocIt;
14209  suballoc.offset = dstAllocOffset;
14210  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14211  m_BytesMoved += srcAllocSize;
14212  ++m_AllocationsMoved;
14213 
14214  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14215  ++nextSuballocIt;
14216  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14217  srcSuballocIt = nextSuballocIt;
14218 
14219  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14220 
14221  move.srcBlockIndex = srcOrigBlockIndex;
14222  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14223  move.srcOffset = srcAllocOffset;
14224  move.dstOffset = dstAllocOffset;
14225  move.size = srcAllocSize;
14226 
14227  moves.push_back(move);
14228  }
14229  }
14230  else
14231  {
14232  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14233 
14234  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14235  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14236  dstAllocOffset + srcAllocSize > dstBlockSize)
14237  {
14238  // But before that, register remaining free space at the end of dst block.
14239  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14240 
14241  ++dstBlockInfoIndex;
14242  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14243  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14244  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14245  dstBlockSize = pDstMetadata->GetSize();
14246  dstOffset = 0;
14247  dstAllocOffset = 0;
14248  }
14249 
14250  // Same block
14251  if(dstBlockInfoIndex == srcBlockInfoIndex)
14252  {
14253  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14254 
14255  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14256 
14257  bool skipOver = overlap;
14258  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14259  {
14260  // If destination and source place overlap, skip if it would move it
14261  // by only < 1/64 of its size.
14262  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14263  }
14264 
14265  if(skipOver)
14266  {
14267  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14268 
14269  dstOffset = srcAllocOffset + srcAllocSize;
14270  ++srcSuballocIt;
14271  }
14272  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14273  else
14274  {
14275  srcSuballocIt->offset = dstAllocOffset;
14276  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14277  dstOffset = dstAllocOffset + srcAllocSize;
14278  m_BytesMoved += srcAllocSize;
14279  ++m_AllocationsMoved;
14280  ++srcSuballocIt;
14281 
14282  move.srcBlockIndex = srcOrigBlockIndex;
14283  move.dstBlockIndex = dstOrigBlockIndex;
14284  move.srcOffset = srcAllocOffset;
14285  move.dstOffset = dstAllocOffset;
14286  move.size = srcAllocSize;
14287 
14288  moves.push_back(move);
14289  }
14290  }
14291  // Different block
14292  else
14293  {
14294  // MOVE OPTION 2: Move the allocation to a different block.
14295 
14296  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14297  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14298 
14299  VmaSuballocation suballoc = *srcSuballocIt;
14300  suballoc.offset = dstAllocOffset;
14301  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14302  dstOffset = dstAllocOffset + srcAllocSize;
14303  m_BytesMoved += srcAllocSize;
14304  ++m_AllocationsMoved;
14305 
14306  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14307  ++nextSuballocIt;
14308  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14309  srcSuballocIt = nextSuballocIt;
14310 
14311  pDstMetadata->m_Suballocations.push_back(suballoc);
14312 
14313  move.srcBlockIndex = srcOrigBlockIndex;
14314  move.dstBlockIndex = dstOrigBlockIndex;
14315  move.srcOffset = srcAllocOffset;
14316  move.dstOffset = dstAllocOffset;
14317  move.size = srcAllocSize;
14318 
14319  moves.push_back(move);
14320  }
14321  }
14322  }
14323  }
14324 
14325  m_BlockInfos.clear();
14326 
14327  PostprocessMetadata();
14328 
14329  return VK_SUCCESS;
14330 }
14331 
14332 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14333 {
14334  const size_t blockCount = m_pBlockVector->GetBlockCount();
14335  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14336  {
14337  VmaBlockMetadata_Generic* const pMetadata =
14338  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14339  pMetadata->m_FreeCount = 0;
14340  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14341  pMetadata->m_FreeSuballocationsBySize.clear();
14342  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14343  it != pMetadata->m_Suballocations.end(); )
14344  {
14345  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14346  {
14347  VmaSuballocationList::iterator nextIt = it;
14348  ++nextIt;
14349  pMetadata->m_Suballocations.erase(it);
14350  it = nextIt;
14351  }
14352  else
14353  {
14354  ++it;
14355  }
14356  }
14357  }
14358 }
14359 
14360 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14361 {
14362  const size_t blockCount = m_pBlockVector->GetBlockCount();
14363  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14364  {
14365  VmaBlockMetadata_Generic* const pMetadata =
14366  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14367  const VkDeviceSize blockSize = pMetadata->GetSize();
14368 
14369  // No allocations in this block - entire area is free.
14370  if(pMetadata->m_Suballocations.empty())
14371  {
14372  pMetadata->m_FreeCount = 1;
14373  //pMetadata->m_SumFreeSize is already set to blockSize.
14374  VmaSuballocation suballoc = {
14375  0, // offset
14376  blockSize, // size
14377  VMA_NULL, // hAllocation
14378  VMA_SUBALLOCATION_TYPE_FREE };
14379  pMetadata->m_Suballocations.push_back(suballoc);
14380  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14381  }
14382  // There are some allocations in this block.
14383  else
14384  {
14385  VkDeviceSize offset = 0;
14386  VmaSuballocationList::iterator it;
14387  for(it = pMetadata->m_Suballocations.begin();
14388  it != pMetadata->m_Suballocations.end();
14389  ++it)
14390  {
14391  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14392  VMA_ASSERT(it->offset >= offset);
14393 
14394  // Need to insert preceding free space.
14395  if(it->offset > offset)
14396  {
14397  ++pMetadata->m_FreeCount;
14398  const VkDeviceSize freeSize = it->offset - offset;
14399  VmaSuballocation suballoc = {
14400  offset, // offset
14401  freeSize, // size
14402  VMA_NULL, // hAllocation
14403  VMA_SUBALLOCATION_TYPE_FREE };
14404  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14405  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14406  {
14407  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14408  }
14409  }
14410 
14411  pMetadata->m_SumFreeSize -= it->size;
14412  offset = it->offset + it->size;
14413  }
14414 
14415  // Need to insert trailing free space.
14416  if(offset < blockSize)
14417  {
14418  ++pMetadata->m_FreeCount;
14419  const VkDeviceSize freeSize = blockSize - offset;
14420  VmaSuballocation suballoc = {
14421  offset, // offset
14422  freeSize, // size
14423  VMA_NULL, // hAllocation
14424  VMA_SUBALLOCATION_TYPE_FREE };
14425  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14426  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14427  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14428  {
14429  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14430  }
14431  }
14432 
14433  VMA_SORT(
14434  pMetadata->m_FreeSuballocationsBySize.begin(),
14435  pMetadata->m_FreeSuballocationsBySize.end(),
14436  VmaSuballocationItemSizeLess());
14437  }
14438 
14439  VMA_HEAVY_ASSERT(pMetadata->Validate());
14440  }
14441 }
14442 
14443 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14444 {
14445  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14446  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14447  while(it != pMetadata->m_Suballocations.end())
14448  {
14449  if(it->offset < suballoc.offset)
14450  {
14451  ++it;
14452  }
14453  }
14454  pMetadata->m_Suballocations.insert(it, suballoc);
14455 }
14456 
14458 // VmaBlockVectorDefragmentationContext
14459 
14460 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14461  VmaAllocator hAllocator,
14462  VmaPool hCustomPool,
14463  VmaBlockVector* pBlockVector,
14464  uint32_t currFrameIndex) :
14465  res(VK_SUCCESS),
14466  mutexLocked(false),
14467  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14468  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14469  defragmentationMovesProcessed(0),
14470  defragmentationMovesCommitted(0),
14471  hasDefragmentationPlan(0),
14472  m_hAllocator(hAllocator),
14473  m_hCustomPool(hCustomPool),
14474  m_pBlockVector(pBlockVector),
14475  m_CurrFrameIndex(currFrameIndex),
14476  m_pAlgorithm(VMA_NULL),
14477  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14478  m_AllAllocations(false)
14479 {
14480 }
14481 
14482 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14483 {
14484  vma_delete(m_hAllocator, m_pAlgorithm);
14485 }
14486 
14487 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14488 {
14489  AllocInfo info = { hAlloc, pChanged };
14490  m_Allocations.push_back(info);
14491 }
14492 
14493 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14494 {
14495  const bool allAllocations = m_AllAllocations ||
14496  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14497 
14498  /********************************
14499  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14500  ********************************/
14501 
14502  /*
14503  Fast algorithm is supported only when certain criteria are met:
14504  - VMA_DEBUG_MARGIN is 0.
14505  - All allocations in this block vector are moveable.
14506  - There is no possibility of image/buffer granularity conflict.
14507  - The defragmentation is not incremental
14508  */
14509  if(VMA_DEBUG_MARGIN == 0 &&
14510  allAllocations &&
14511  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14513  {
14514  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14515  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14516  }
14517  else
14518  {
14519  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14520  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14521  }
14522 
14523  if(allAllocations)
14524  {
14525  m_pAlgorithm->AddAll();
14526  }
14527  else
14528  {
14529  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14530  {
14531  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14532  }
14533  }
14534 }
14535 
14537 // VmaDefragmentationContext
14538 
14539 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14540  VmaAllocator hAllocator,
14541  uint32_t currFrameIndex,
14542  uint32_t flags,
14543  VmaDefragmentationStats* pStats) :
14544  m_hAllocator(hAllocator),
14545  m_CurrFrameIndex(currFrameIndex),
14546  m_Flags(flags),
14547  m_pStats(pStats),
14548  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14549 {
14550  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14551 }
14552 
14553 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14554 {
14555  for(size_t i = m_CustomPoolContexts.size(); i--; )
14556  {
14557  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14558  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14559  vma_delete(m_hAllocator, pBlockVectorCtx);
14560  }
14561  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14562  {
14563  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14564  if(pBlockVectorCtx)
14565  {
14566  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14567  vma_delete(m_hAllocator, pBlockVectorCtx);
14568  }
14569  }
14570 }
14571 
14572 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
14573 {
14574  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14575  {
14576  VmaPool pool = pPools[poolIndex];
14577  VMA_ASSERT(pool);
14578  // Pools with algorithm other than default are not defragmented.
14579  if(pool->m_BlockVector.GetAlgorithm() == 0)
14580  {
14581  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14582 
14583  for(size_t i = m_CustomPoolContexts.size(); i--; )
14584  {
14585  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14586  {
14587  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14588  break;
14589  }
14590  }
14591 
14592  if(!pBlockVectorDefragCtx)
14593  {
14594  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14595  m_hAllocator,
14596  pool,
14597  &pool->m_BlockVector,
14598  m_CurrFrameIndex);
14599  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14600  }
14601 
14602  pBlockVectorDefragCtx->AddAll();
14603  }
14604  }
14605 }
14606 
14607 void VmaDefragmentationContext_T::AddAllocations(
14608  uint32_t allocationCount,
14609  const VmaAllocation* pAllocations,
14610  VkBool32* pAllocationsChanged)
14611 {
14612  // Dispatch pAllocations among defragmentators. Create them when necessary.
14613  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14614  {
14615  const VmaAllocation hAlloc = pAllocations[allocIndex];
14616  VMA_ASSERT(hAlloc);
14617  // DedicatedAlloc cannot be defragmented.
14618  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14619  // Lost allocation cannot be defragmented.
14620  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14621  {
14622  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14623 
14624  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14625  // This allocation belongs to custom pool.
14626  if(hAllocPool != VK_NULL_HANDLE)
14627  {
14628  // Pools with algorithm other than default are not defragmented.
14629  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14630  {
14631  for(size_t i = m_CustomPoolContexts.size(); i--; )
14632  {
14633  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14634  {
14635  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14636  break;
14637  }
14638  }
14639  if(!pBlockVectorDefragCtx)
14640  {
14641  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14642  m_hAllocator,
14643  hAllocPool,
14644  &hAllocPool->m_BlockVector,
14645  m_CurrFrameIndex);
14646  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14647  }
14648  }
14649  }
14650  // This allocation belongs to default pool.
14651  else
14652  {
14653  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14654  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14655  if(!pBlockVectorDefragCtx)
14656  {
14657  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14658  m_hAllocator,
14659  VMA_NULL, // hCustomPool
14660  m_hAllocator->m_pBlockVectors[memTypeIndex],
14661  m_CurrFrameIndex);
14662  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14663  }
14664  }
14665 
14666  if(pBlockVectorDefragCtx)
14667  {
14668  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14669  &pAllocationsChanged[allocIndex] : VMA_NULL;
14670  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14671  }
14672  }
14673  }
14674 }
14675 
14676 VkResult VmaDefragmentationContext_T::Defragment(
14677  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14678  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14679  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14680 {
14681  if(pStats)
14682  {
14683  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14684  }
14685 
14687  {
14688  // For incremental defragmetnations, we just earmark how much we can move
14689  // The real meat is in the defragmentation steps
14690  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14691  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14692 
14693  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14694  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14695 
14696  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14697  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14698  return VK_SUCCESS;
14699 
14700  return VK_NOT_READY;
14701  }
14702 
14703  if(commandBuffer == VK_NULL_HANDLE)
14704  {
14705  maxGpuBytesToMove = 0;
14706  maxGpuAllocationsToMove = 0;
14707  }
14708 
14709  VkResult res = VK_SUCCESS;
14710 
14711  // Process default pools.
14712  for(uint32_t memTypeIndex = 0;
14713  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14714  ++memTypeIndex)
14715  {
14716  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14717  if(pBlockVectorCtx)
14718  {
14719  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14720  pBlockVectorCtx->GetBlockVector()->Defragment(
14721  pBlockVectorCtx,
14722  pStats, flags,
14723  maxCpuBytesToMove, maxCpuAllocationsToMove,
14724  maxGpuBytesToMove, maxGpuAllocationsToMove,
14725  commandBuffer);
14726  if(pBlockVectorCtx->res != VK_SUCCESS)
14727  {
14728  res = pBlockVectorCtx->res;
14729  }
14730  }
14731  }
14732 
14733  // Process custom pools.
14734  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14735  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14736  ++customCtxIndex)
14737  {
14738  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14739  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14740  pBlockVectorCtx->GetBlockVector()->Defragment(
14741  pBlockVectorCtx,
14742  pStats, flags,
14743  maxCpuBytesToMove, maxCpuAllocationsToMove,
14744  maxGpuBytesToMove, maxGpuAllocationsToMove,
14745  commandBuffer);
14746  if(pBlockVectorCtx->res != VK_SUCCESS)
14747  {
14748  res = pBlockVectorCtx->res;
14749  }
14750  }
14751 
14752  return res;
14753 }
14754 
14755 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14756 {
14757  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14758  uint32_t movesLeft = pInfo->moveCount;
14759 
14760  // Process default pools.
14761  for(uint32_t memTypeIndex = 0;
14762  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14763  ++memTypeIndex)
14764  {
14765  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14766  if(pBlockVectorCtx)
14767  {
14768  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14769 
14770  if(!pBlockVectorCtx->hasDefragmentationPlan)
14771  {
14772  pBlockVectorCtx->GetBlockVector()->Defragment(
14773  pBlockVectorCtx,
14774  m_pStats, m_Flags,
14775  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14776  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14777  VK_NULL_HANDLE);
14778 
14779  if(pBlockVectorCtx->res < VK_SUCCESS)
14780  continue;
14781 
14782  pBlockVectorCtx->hasDefragmentationPlan = true;
14783  }
14784 
14785  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14786  pBlockVectorCtx,
14787  pCurrentMove, movesLeft);
14788 
14789  movesLeft -= processed;
14790  pCurrentMove += processed;
14791  }
14792  }
14793 
14794  // Process custom pools.
14795  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14796  customCtxIndex < customCtxCount;
14797  ++customCtxIndex)
14798  {
14799  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14800  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14801 
14802  if(!pBlockVectorCtx->hasDefragmentationPlan)
14803  {
14804  pBlockVectorCtx->GetBlockVector()->Defragment(
14805  pBlockVectorCtx,
14806  m_pStats, m_Flags,
14807  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14808  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14809  VK_NULL_HANDLE);
14810 
14811  if(pBlockVectorCtx->res < VK_SUCCESS)
14812  continue;
14813 
14814  pBlockVectorCtx->hasDefragmentationPlan = true;
14815  }
14816 
14817  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14818  pBlockVectorCtx,
14819  pCurrentMove, movesLeft);
14820 
14821  movesLeft -= processed;
14822  pCurrentMove += processed;
14823  }
14824 
14825  pInfo->moveCount = pInfo->moveCount - movesLeft;
14826 
14827  return VK_SUCCESS;
14828 }
14829 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14830 {
14831  VkResult res = VK_SUCCESS;
14832 
14833  // Process default pools.
14834  for(uint32_t memTypeIndex = 0;
14835  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14836  ++memTypeIndex)
14837  {
14838  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14839  if(pBlockVectorCtx)
14840  {
14841  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14842 
14843  if(!pBlockVectorCtx->hasDefragmentationPlan)
14844  {
14845  res = VK_NOT_READY;
14846  continue;
14847  }
14848 
14849  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14850  pBlockVectorCtx, m_pStats);
14851 
14852  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14853  res = VK_NOT_READY;
14854  }
14855  }
14856 
14857  // Process custom pools.
14858  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14859  customCtxIndex < customCtxCount;
14860  ++customCtxIndex)
14861  {
14862  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14863  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14864 
14865  if(!pBlockVectorCtx->hasDefragmentationPlan)
14866  {
14867  res = VK_NOT_READY;
14868  continue;
14869  }
14870 
14871  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14872  pBlockVectorCtx, m_pStats);
14873 
14874  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14875  res = VK_NOT_READY;
14876  }
14877 
14878  return res;
14879 }
14880 
14882 // VmaRecorder
14883 
14884 #if VMA_RECORDING_ENABLED
14885 
14886 VmaRecorder::VmaRecorder() :
14887  m_UseMutex(true),
14888  m_Flags(0),
14889  m_File(VMA_NULL),
14890  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
14891 {
14892 }
14893 
14894 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14895 {
14896  m_UseMutex = useMutex;
14897  m_Flags = settings.flags;
14898 
14899 #if defined(_WIN32)
14900  // Open file for writing.
14901  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14902 
14903  if(err != 0)
14904  {
14905  return VK_ERROR_INITIALIZATION_FAILED;
14906  }
14907 #else
14908  // Open file for writing.
14909  m_File = fopen(settings.pFilePath, "wb");
14910 
14911  if(m_File == 0)
14912  {
14913  return VK_ERROR_INITIALIZATION_FAILED;
14914  }
14915 #endif
14916 
14917  // Write header.
14918  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14919  fprintf(m_File, "%s\n", "1,8");
14920 
14921  return VK_SUCCESS;
14922 }
14923 
14924 VmaRecorder::~VmaRecorder()
14925 {
14926  if(m_File != VMA_NULL)
14927  {
14928  fclose(m_File);
14929  }
14930 }
14931 
14932 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14933 {
14934  CallParams callParams;
14935  GetBasicParams(callParams);
14936 
14937  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14938  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14939  Flush();
14940 }
14941 
14942 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14943 {
14944  CallParams callParams;
14945  GetBasicParams(callParams);
14946 
14947  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14948  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14949  Flush();
14950 }
14951 
14952 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14953 {
14954  CallParams callParams;
14955  GetBasicParams(callParams);
14956 
14957  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14958  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14959  createInfo.memoryTypeIndex,
14960  createInfo.flags,
14961  createInfo.blockSize,
14962  (uint64_t)createInfo.minBlockCount,
14963  (uint64_t)createInfo.maxBlockCount,
14964  createInfo.frameInUseCount,
14965  pool);
14966  Flush();
14967 }
14968 
14969 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14970 {
14971  CallParams callParams;
14972  GetBasicParams(callParams);
14973 
14974  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14975  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14976  pool);
14977  Flush();
14978 }
14979 
14980 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14981  const VkMemoryRequirements& vkMemReq,
14982  const VmaAllocationCreateInfo& createInfo,
14983  VmaAllocation allocation)
14984 {
14985  CallParams callParams;
14986  GetBasicParams(callParams);
14987 
14988  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14989  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14990  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14991  vkMemReq.size,
14992  vkMemReq.alignment,
14993  vkMemReq.memoryTypeBits,
14994  createInfo.flags,
14995  createInfo.usage,
14996  createInfo.requiredFlags,
14997  createInfo.preferredFlags,
14998  createInfo.memoryTypeBits,
14999  createInfo.pool,
15000  allocation,
15001  userDataStr.GetString());
15002  Flush();
15003 }
15004 
15005 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15006  const VkMemoryRequirements& vkMemReq,
15007  const VmaAllocationCreateInfo& createInfo,
15008  uint64_t allocationCount,
15009  const VmaAllocation* pAllocations)
15010 {
15011  CallParams callParams;
15012  GetBasicParams(callParams);
15013 
15014  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15015  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15016  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15017  vkMemReq.size,
15018  vkMemReq.alignment,
15019  vkMemReq.memoryTypeBits,
15020  createInfo.flags,
15021  createInfo.usage,
15022  createInfo.requiredFlags,
15023  createInfo.preferredFlags,
15024  createInfo.memoryTypeBits,
15025  createInfo.pool);
15026  PrintPointerList(allocationCount, pAllocations);
15027  fprintf(m_File, ",%s\n", userDataStr.GetString());
15028  Flush();
15029 }
15030 
15031 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15032  const VkMemoryRequirements& vkMemReq,
15033  bool requiresDedicatedAllocation,
15034  bool prefersDedicatedAllocation,
15035  const VmaAllocationCreateInfo& createInfo,
15036  VmaAllocation allocation)
15037 {
15038  CallParams callParams;
15039  GetBasicParams(callParams);
15040 
15041  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15042  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15043  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15044  vkMemReq.size,
15045  vkMemReq.alignment,
15046  vkMemReq.memoryTypeBits,
15047  requiresDedicatedAllocation ? 1 : 0,
15048  prefersDedicatedAllocation ? 1 : 0,
15049  createInfo.flags,
15050  createInfo.usage,
15051  createInfo.requiredFlags,
15052  createInfo.preferredFlags,
15053  createInfo.memoryTypeBits,
15054  createInfo.pool,
15055  allocation,
15056  userDataStr.GetString());
15057  Flush();
15058 }
15059 
15060 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15061  const VkMemoryRequirements& vkMemReq,
15062  bool requiresDedicatedAllocation,
15063  bool prefersDedicatedAllocation,
15064  const VmaAllocationCreateInfo& createInfo,
15065  VmaAllocation allocation)
15066 {
15067  CallParams callParams;
15068  GetBasicParams(callParams);
15069 
15070  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15071  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15072  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15073  vkMemReq.size,
15074  vkMemReq.alignment,
15075  vkMemReq.memoryTypeBits,
15076  requiresDedicatedAllocation ? 1 : 0,
15077  prefersDedicatedAllocation ? 1 : 0,
15078  createInfo.flags,
15079  createInfo.usage,
15080  createInfo.requiredFlags,
15081  createInfo.preferredFlags,
15082  createInfo.memoryTypeBits,
15083  createInfo.pool,
15084  allocation,
15085  userDataStr.GetString());
15086  Flush();
15087 }
15088 
15089 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15090  VmaAllocation allocation)
15091 {
15092  CallParams callParams;
15093  GetBasicParams(callParams);
15094 
15095  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15096  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15097  allocation);
15098  Flush();
15099 }
15100 
15101 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15102  uint64_t allocationCount,
15103  const VmaAllocation* pAllocations)
15104 {
15105  CallParams callParams;
15106  GetBasicParams(callParams);
15107 
15108  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15109  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15110  PrintPointerList(allocationCount, pAllocations);
15111  fprintf(m_File, "\n");
15112  Flush();
15113 }
15114 
15115 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15116  VmaAllocation allocation,
15117  const void* pUserData)
15118 {
15119  CallParams callParams;
15120  GetBasicParams(callParams);
15121 
15122  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15123  UserDataString userDataStr(
15124  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15125  pUserData);
15126  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15127  allocation,
15128  userDataStr.GetString());
15129  Flush();
15130 }
15131 
15132 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15133  VmaAllocation allocation)
15134 {
15135  CallParams callParams;
15136  GetBasicParams(callParams);
15137 
15138  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15139  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15140  allocation);
15141  Flush();
15142 }
15143 
15144 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15145  VmaAllocation allocation)
15146 {
15147  CallParams callParams;
15148  GetBasicParams(callParams);
15149 
15150  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15151  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15152  allocation);
15153  Flush();
15154 }
15155 
15156 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15157  VmaAllocation allocation)
15158 {
15159  CallParams callParams;
15160  GetBasicParams(callParams);
15161 
15162  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15163  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15164  allocation);
15165  Flush();
15166 }
15167 
15168 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15169  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15170 {
15171  CallParams callParams;
15172  GetBasicParams(callParams);
15173 
15174  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15175  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15176  allocation,
15177  offset,
15178  size);
15179  Flush();
15180 }
15181 
15182 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15183  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15184 {
15185  CallParams callParams;
15186  GetBasicParams(callParams);
15187 
15188  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15189  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15190  allocation,
15191  offset,
15192  size);
15193  Flush();
15194 }
15195 
15196 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15197  const VkBufferCreateInfo& bufCreateInfo,
15198  const VmaAllocationCreateInfo& allocCreateInfo,
15199  VmaAllocation allocation)
15200 {
15201  CallParams callParams;
15202  GetBasicParams(callParams);
15203 
15204  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15205  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15206  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15207  bufCreateInfo.flags,
15208  bufCreateInfo.size,
15209  bufCreateInfo.usage,
15210  bufCreateInfo.sharingMode,
15211  allocCreateInfo.flags,
15212  allocCreateInfo.usage,
15213  allocCreateInfo.requiredFlags,
15214  allocCreateInfo.preferredFlags,
15215  allocCreateInfo.memoryTypeBits,
15216  allocCreateInfo.pool,
15217  allocation,
15218  userDataStr.GetString());
15219  Flush();
15220 }
15221 
15222 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15223  const VkImageCreateInfo& imageCreateInfo,
15224  const VmaAllocationCreateInfo& allocCreateInfo,
15225  VmaAllocation allocation)
15226 {
15227  CallParams callParams;
15228  GetBasicParams(callParams);
15229 
15230  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15231  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15232  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15233  imageCreateInfo.flags,
15234  imageCreateInfo.imageType,
15235  imageCreateInfo.format,
15236  imageCreateInfo.extent.width,
15237  imageCreateInfo.extent.height,
15238  imageCreateInfo.extent.depth,
15239  imageCreateInfo.mipLevels,
15240  imageCreateInfo.arrayLayers,
15241  imageCreateInfo.samples,
15242  imageCreateInfo.tiling,
15243  imageCreateInfo.usage,
15244  imageCreateInfo.sharingMode,
15245  imageCreateInfo.initialLayout,
15246  allocCreateInfo.flags,
15247  allocCreateInfo.usage,
15248  allocCreateInfo.requiredFlags,
15249  allocCreateInfo.preferredFlags,
15250  allocCreateInfo.memoryTypeBits,
15251  allocCreateInfo.pool,
15252  allocation,
15253  userDataStr.GetString());
15254  Flush();
15255 }
15256 
15257 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15258  VmaAllocation allocation)
15259 {
15260  CallParams callParams;
15261  GetBasicParams(callParams);
15262 
15263  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15264  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15265  allocation);
15266  Flush();
15267 }
15268 
15269 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15270  VmaAllocation allocation)
15271 {
15272  CallParams callParams;
15273  GetBasicParams(callParams);
15274 
15275  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15276  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15277  allocation);
15278  Flush();
15279 }
15280 
15281 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15282  VmaAllocation allocation)
15283 {
15284  CallParams callParams;
15285  GetBasicParams(callParams);
15286 
15287  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15288  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15289  allocation);
15290  Flush();
15291 }
15292 
15293 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15294  VmaAllocation allocation)
15295 {
15296  CallParams callParams;
15297  GetBasicParams(callParams);
15298 
15299  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15300  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15301  allocation);
15302  Flush();
15303 }
15304 
15305 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15306  VmaPool pool)
15307 {
15308  CallParams callParams;
15309  GetBasicParams(callParams);
15310 
15311  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15312  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15313  pool);
15314  Flush();
15315 }
15316 
15317 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15318  const VmaDefragmentationInfo2& info,
15320 {
15321  CallParams callParams;
15322  GetBasicParams(callParams);
15323 
15324  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15325  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15326  info.flags);
15327  PrintPointerList(info.allocationCount, info.pAllocations);
15328  fprintf(m_File, ",");
15329  PrintPointerList(info.poolCount, info.pPools);
15330  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15331  info.maxCpuBytesToMove,
15333  info.maxGpuBytesToMove,
15335  info.commandBuffer,
15336  ctx);
15337  Flush();
15338 }
15339 
15340 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15342 {
15343  CallParams callParams;
15344  GetBasicParams(callParams);
15345 
15346  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15347  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15348  ctx);
15349  Flush();
15350 }
15351 
15352 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15353  VmaPool pool,
15354  const char* name)
15355 {
15356  CallParams callParams;
15357  GetBasicParams(callParams);
15358 
15359  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15360  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15361  pool, name != VMA_NULL ? name : "");
15362  Flush();
15363 }
15364 
15365 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15366 {
15367  if(pUserData != VMA_NULL)
15368  {
15369  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15370  {
15371  m_Str = (const char*)pUserData;
15372  }
15373  else
15374  {
15375  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15376  snprintf(m_PtrStr, 17, "%p", pUserData);
15377  m_Str = m_PtrStr;
15378  }
15379  }
15380  else
15381  {
15382  m_Str = "";
15383  }
15384 }
15385 
15386 void VmaRecorder::WriteConfiguration(
15387  const VkPhysicalDeviceProperties& devProps,
15388  const VkPhysicalDeviceMemoryProperties& memProps,
15389  uint32_t vulkanApiVersion,
15390  bool dedicatedAllocationExtensionEnabled,
15391  bool bindMemory2ExtensionEnabled,
15392  bool memoryBudgetExtensionEnabled,
15393  bool deviceCoherentMemoryExtensionEnabled)
15394 {
15395  fprintf(m_File, "Config,Begin\n");
15396 
15397  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15398 
15399  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15400  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15401  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15402  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15403  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15404  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15405 
15406  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15407  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15408  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15409 
15410  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15411  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15412  {
15413  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15414  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15415  }
15416  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15417  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15418  {
15419  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15420  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15421  }
15422 
15423  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15424  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15425  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15426  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15427 
15428  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15429  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15430  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15431  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15432  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15433  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15434  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15435  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15436  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15437 
15438  fprintf(m_File, "Config,End\n");
15439 }
15440 
15441 void VmaRecorder::GetBasicParams(CallParams& outParams)
15442 {
15443  #if defined(_WIN32)
15444  outParams.threadId = GetCurrentThreadId();
15445  #else
15446  // Use C++11 features to get thread id and convert it to uint32_t.
15447  // There is room for optimization since sstream is quite slow.
15448  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
15449  std::thread::id thread_id = std::this_thread::get_id();
15450  stringstream thread_id_to_string_converter;
15451  thread_id_to_string_converter << thread_id;
15452  string thread_id_as_string = thread_id_to_string_converter.str();
15453  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
15454  #endif
15455 
15456  auto current_time = std::chrono::high_resolution_clock::now();
15457 
15458  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
15459 }
15460 
15461 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15462 {
15463  if(count)
15464  {
15465  fprintf(m_File, "%p", pItems[0]);
15466  for(uint64_t i = 1; i < count; ++i)
15467  {
15468  fprintf(m_File, " %p", pItems[i]);
15469  }
15470  }
15471 }
15472 
15473 void VmaRecorder::Flush()
15474 {
15475  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15476  {
15477  fflush(m_File);
15478  }
15479 }
15480 
15481 #endif // #if VMA_RECORDING_ENABLED
15482 
15484 // VmaAllocationObjectAllocator
15485 
15486 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15487  m_Allocator(pAllocationCallbacks, 1024)
15488 {
15489 }
15490 
15491 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15492 {
15493  VmaMutexLock mutexLock(m_Mutex);
15494  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15495 }
15496 
15497 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15498 {
15499  VmaMutexLock mutexLock(m_Mutex);
15500  m_Allocator.Free(hAlloc);
15501 }
15502 
15504 // VmaAllocator_T
15505 
15506 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15507  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15508  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15509  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15510  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15511  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15512  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15513  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15514  m_hDevice(pCreateInfo->device),
15515  m_hInstance(pCreateInfo->instance),
15516  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15517  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15518  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15519  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15520  m_HeapSizeLimitMask(0),
15521  m_PreferredLargeHeapBlockSize(0),
15522  m_PhysicalDevice(pCreateInfo->physicalDevice),
15523  m_CurrentFrameIndex(0),
15524  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15525  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15526  m_NextPoolId(0),
15527  m_GlobalMemoryTypeBits(UINT32_MAX)
15529  ,m_pRecorder(VMA_NULL)
15530 #endif
15531 {
15532  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15533  {
15534  m_UseKhrDedicatedAllocation = false;
15535  m_UseKhrBindMemory2 = false;
15536  }
15537 
15538  if(VMA_DEBUG_DETECT_CORRUPTION)
15539  {
15540  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15541  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15542  }
15543 
15544  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15545 
15546  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15547  {
15548 #if !(VMA_DEDICATED_ALLOCATION)
15550  {
15551  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15552  }
15553 #endif
15554 #if !(VMA_BIND_MEMORY2)
15555  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15556  {
15557  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15558  }
15559 #endif
15560  }
15561 #if !(VMA_MEMORY_BUDGET)
15562  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15563  {
15564  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15565  }
15566 #endif
15567 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15568  if(m_UseKhrBufferDeviceAddress)
15569  {
15570  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15571  }
15572 #endif
15573 #if VMA_VULKAN_VERSION < 1002000
15574  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15575  {
15576  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15577  }
15578 #endif
15579 #if VMA_VULKAN_VERSION < 1001000
15580  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15581  {
15582  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15583  }
15584 #endif
15585 
15586  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15587  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15588  memset(&m_MemProps, 0, sizeof(m_MemProps));
15589 
15590  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15591  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15592  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15593 
15594  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15595  {
15596  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15597  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15598  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15599  }
15600 
15601  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15602 
15603  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15604  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15605 
15606  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15607  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15608  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15609  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15610 
15611  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15612  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15613 
15614  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15615 
15616  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15617  {
15618  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15619  {
15620  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15621  if(limit != VK_WHOLE_SIZE)
15622  {
15623  m_HeapSizeLimitMask |= 1u << heapIndex;
15624  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15625  {
15626  m_MemProps.memoryHeaps[heapIndex].size = limit;
15627  }
15628  }
15629  }
15630  }
15631 
15632  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15633  {
15634  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15635 
15636  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15637  this,
15638  VK_NULL_HANDLE, // hParentPool
15639  memTypeIndex,
15640  preferredBlockSize,
15641  0,
15642  SIZE_MAX,
15643  GetBufferImageGranularity(),
15644  pCreateInfo->frameInUseCount,
15645  false, // explicitBlockSize
15646  false); // linearAlgorithm
15647  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15648  // becase minBlockCount is 0.
15649  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15650 
15651  }
15652 }
15653 
15654 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15655 {
15656  VkResult res = VK_SUCCESS;
15657 
15658  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15659  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15660  {
15661 #if VMA_RECORDING_ENABLED
15662  m_pRecorder = vma_new(this, VmaRecorder)();
15663  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15664  if(res != VK_SUCCESS)
15665  {
15666  return res;
15667  }
15668  m_pRecorder->WriteConfiguration(
15669  m_PhysicalDeviceProperties,
15670  m_MemProps,
15671  m_VulkanApiVersion,
15672  m_UseKhrDedicatedAllocation,
15673  m_UseKhrBindMemory2,
15674  m_UseExtMemoryBudget,
15675  m_UseAmdDeviceCoherentMemory);
15676  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15677 #else
15678  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15679  return VK_ERROR_FEATURE_NOT_PRESENT;
15680 #endif
15681  }
15682 
15683 #if VMA_MEMORY_BUDGET
15684  if(m_UseExtMemoryBudget)
15685  {
15686  UpdateVulkanBudget();
15687  }
15688 #endif // #if VMA_MEMORY_BUDGET
15689 
15690  return res;
15691 }
15692 
15693 VmaAllocator_T::~VmaAllocator_T()
15694 {
15695 #if VMA_RECORDING_ENABLED
15696  if(m_pRecorder != VMA_NULL)
15697  {
15698  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15699  vma_delete(this, m_pRecorder);
15700  }
15701 #endif
15702 
15703  VMA_ASSERT(m_Pools.empty());
15704 
15705  for(size_t i = GetMemoryTypeCount(); i--; )
15706  {
15707  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15708  {
15709  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15710  }
15711 
15712  vma_delete(this, m_pDedicatedAllocations[i]);
15713  vma_delete(this, m_pBlockVectors[i]);
15714  }
15715 }
15716 
15717 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15718 {
15719 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15720  ImportVulkanFunctions_Static();
15721 #endif
15722 
15723  if(pVulkanFunctions != VMA_NULL)
15724  {
15725  ImportVulkanFunctions_Custom(pVulkanFunctions);
15726  }
15727 
15728 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15729  ImportVulkanFunctions_Dynamic();
15730 #endif
15731 
15732  ValidateVulkanFunctions();
15733 }
15734 
15735 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15736 
15737 void VmaAllocator_T::ImportVulkanFunctions_Static()
15738 {
15739  // Vulkan 1.0
15740  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15741  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15742  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15743  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15744  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15745  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15746  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15747  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15748  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15749  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15750  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15751  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15752  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15753  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15754  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15755  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15756  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15757 
15758  // Vulkan 1.1
15759 #if VMA_VULKAN_VERSION >= 1001000
15760  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15761  {
15762  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15763  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15764  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15765  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15766  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15767  }
15768 #endif
15769 }
15770 
15771 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15772 
15773 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15774 {
15775  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15776 
15777 #define VMA_COPY_IF_NOT_NULL(funcName) \
15778  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15779 
15780  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15781  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15782  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15783  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15784  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15785  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15786  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15787  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15788  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15789  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15790  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15791  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15792  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15793  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15794  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15795  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15796  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15797 
15798 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15799  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15800  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15801 #endif
15802 
15803 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15804  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15805  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15806 #endif
15807 
15808 #if VMA_MEMORY_BUDGET
15809  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15810 #endif
15811 
15812 #undef VMA_COPY_IF_NOT_NULL
15813 }
15814 
15815 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15816 
15817 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
15818 {
15819 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
15820  if(m_VulkanFunctions.memberName == VMA_NULL) \
15821  m_VulkanFunctions.memberName = \
15822  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
15823 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
15824  if(m_VulkanFunctions.memberName == VMA_NULL) \
15825  m_VulkanFunctions.memberName = \
15826  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
15827 
15828  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
15829  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
15830  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
15831  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
15832  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
15833  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
15834  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
15835  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
15836  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
15837  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
15838  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
15839  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
15840  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
15841  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
15842  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
15843  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
15844  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
15845 
15846 #if VMA_VULKAN_VERSION >= 1001000
15847  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15848  {
15849  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
15850  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
15851  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
15852  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
15853  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
15854  }
15855 #endif
15856 
15857 #if VMA_DEDICATED_ALLOCATION
15858  if(m_UseKhrDedicatedAllocation)
15859  {
15860  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
15861  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
15862  }
15863 #endif
15864 
15865 #if VMA_BIND_MEMORY2
15866  if(m_UseKhrBindMemory2)
15867  {
15868  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
15869  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
15870  }
15871 #endif // #if VMA_BIND_MEMORY2
15872 
15873 #if VMA_MEMORY_BUDGET
15874  if(m_UseExtMemoryBudget)
15875  {
15876  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
15877  }
15878 #endif // #if VMA_MEMORY_BUDGET
15879 
15880 #undef VMA_FETCH_DEVICE_FUNC
15881 #undef VMA_FETCH_INSTANCE_FUNC
15882 }
15883 
15884 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15885 
15886 void VmaAllocator_T::ValidateVulkanFunctions()
15887 {
15888  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15889  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15890  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15891  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15892  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15893  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15894  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15895  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15896  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15897  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15898  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15899  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15900  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15901  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15902  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15903  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15904  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15905 
15906 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15907  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15908  {
15909  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15910  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15911  }
15912 #endif
15913 
15914 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15915  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15916  {
15917  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15918  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15919  }
15920 #endif
15921 
15922 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15923  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15924  {
15925  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15926  }
15927 #endif
15928 }
15929 
15930 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15931 {
15932  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15933  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15934  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15935  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15936 }
15937 
15938 VkResult VmaAllocator_T::AllocateMemoryOfType(
15939  VkDeviceSize size,
15940  VkDeviceSize alignment,
15941  bool dedicatedAllocation,
15942  VkBuffer dedicatedBuffer,
15943  VkBufferUsageFlags dedicatedBufferUsage,
15944  VkImage dedicatedImage,
15945  const VmaAllocationCreateInfo& createInfo,
15946  uint32_t memTypeIndex,
15947  VmaSuballocationType suballocType,
15948  size_t allocationCount,
15949  VmaAllocation* pAllocations)
15950 {
15951  VMA_ASSERT(pAllocations != VMA_NULL);
15952  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15953 
15954  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15955 
15956  // If memory type is not HOST_VISIBLE, disable MAPPED.
15957  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15958  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15959  {
15960  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15961  }
15962  // If memory is lazily allocated, it should be always dedicated.
15963  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15964  {
15966  }
15967 
15968  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15969  VMA_ASSERT(blockVector);
15970 
15971  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15972  bool preferDedicatedMemory =
15973  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15974  dedicatedAllocation ||
15975  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15976  size > preferredBlockSize / 2;
15977 
15978  if(preferDedicatedMemory &&
15979  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15980  finalCreateInfo.pool == VK_NULL_HANDLE)
15981  {
15983  }
15984 
15985  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15986  {
15987  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15988  {
15989  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15990  }
15991  else
15992  {
15993  return AllocateDedicatedMemory(
15994  size,
15995  suballocType,
15996  memTypeIndex,
15997  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15998  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15999  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16000  finalCreateInfo.pUserData,
16001  dedicatedBuffer,
16002  dedicatedBufferUsage,
16003  dedicatedImage,
16004  allocationCount,
16005  pAllocations);
16006  }
16007  }
16008  else
16009  {
16010  VkResult res = blockVector->Allocate(
16011  m_CurrentFrameIndex.load(),
16012  size,
16013  alignment,
16014  finalCreateInfo,
16015  suballocType,
16016  allocationCount,
16017  pAllocations);
16018  if(res == VK_SUCCESS)
16019  {
16020  return res;
16021  }
16022 
16023  // 5. Try dedicated memory.
16024  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16025  {
16026  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16027  }
16028  else
16029  {
16030  res = AllocateDedicatedMemory(
16031  size,
16032  suballocType,
16033  memTypeIndex,
16034  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16035  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16036  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16037  finalCreateInfo.pUserData,
16038  dedicatedBuffer,
16039  dedicatedBufferUsage,
16040  dedicatedImage,
16041  allocationCount,
16042  pAllocations);
16043  if(res == VK_SUCCESS)
16044  {
16045  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16046  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16047  return VK_SUCCESS;
16048  }
16049  else
16050  {
16051  // Everything failed: Return error code.
16052  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16053  return res;
16054  }
16055  }
16056  }
16057 }
16058 
16059 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16060  VkDeviceSize size,
16061  VmaSuballocationType suballocType,
16062  uint32_t memTypeIndex,
16063  bool withinBudget,
16064  bool map,
16065  bool isUserDataString,
16066  void* pUserData,
16067  VkBuffer dedicatedBuffer,
16068  VkBufferUsageFlags dedicatedBufferUsage,
16069  VkImage dedicatedImage,
16070  size_t allocationCount,
16071  VmaAllocation* pAllocations)
16072 {
16073  VMA_ASSERT(allocationCount > 0 && pAllocations);
16074 
16075  if(withinBudget)
16076  {
16077  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16078  VmaBudget heapBudget = {};
16079  GetBudget(&heapBudget, heapIndex, 1);
16080  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16081  {
16082  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16083  }
16084  }
16085 
16086  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16087  allocInfo.memoryTypeIndex = memTypeIndex;
16088  allocInfo.allocationSize = size;
16089 
16090 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16091  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16092  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16093  {
16094  if(dedicatedBuffer != VK_NULL_HANDLE)
16095  {
16096  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16097  dedicatedAllocInfo.buffer = dedicatedBuffer;
16098  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16099  }
16100  else if(dedicatedImage != VK_NULL_HANDLE)
16101  {
16102  dedicatedAllocInfo.image = dedicatedImage;
16103  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16104  }
16105  }
16106 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16107 
16108 #if VMA_BUFFER_DEVICE_ADDRESS
16109  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16110  if(m_UseKhrBufferDeviceAddress)
16111  {
16112  bool canContainBufferWithDeviceAddress = true;
16113  if(dedicatedBuffer != VK_NULL_HANDLE)
16114  {
16115  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16116  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16117  }
16118  else if(dedicatedImage != VK_NULL_HANDLE)
16119  {
16120  canContainBufferWithDeviceAddress = false;
16121  }
16122  if(canContainBufferWithDeviceAddress)
16123  {
16124  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16125  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16126  }
16127  }
16128 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16129 
16130  size_t allocIndex;
16131  VkResult res = VK_SUCCESS;
16132  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16133  {
16134  res = AllocateDedicatedMemoryPage(
16135  size,
16136  suballocType,
16137  memTypeIndex,
16138  allocInfo,
16139  map,
16140  isUserDataString,
16141  pUserData,
16142  pAllocations + allocIndex);
16143  if(res != VK_SUCCESS)
16144  {
16145  break;
16146  }
16147  }
16148 
16149  if(res == VK_SUCCESS)
16150  {
16151  // Register them in m_pDedicatedAllocations.
16152  {
16153  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16154  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16155  VMA_ASSERT(pDedicatedAllocations);
16156  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16157  {
16158  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
16159  }
16160  }
16161 
16162  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16163  }
16164  else
16165  {
16166  // Free all already created allocations.
16167  while(allocIndex--)
16168  {
16169  VmaAllocation currAlloc = pAllocations[allocIndex];
16170  VkDeviceMemory hMemory = currAlloc->GetMemory();
16171 
16172  /*
16173  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16174  before vkFreeMemory.
16175 
16176  if(currAlloc->GetMappedData() != VMA_NULL)
16177  {
16178  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16179  }
16180  */
16181 
16182  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16183  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16184  currAlloc->SetUserData(this, VMA_NULL);
16185  m_AllocationObjectAllocator.Free(currAlloc);
16186  }
16187 
16188  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16189  }
16190 
16191  return res;
16192 }
16193 
16194 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16195  VkDeviceSize size,
16196  VmaSuballocationType suballocType,
16197  uint32_t memTypeIndex,
16198  const VkMemoryAllocateInfo& allocInfo,
16199  bool map,
16200  bool isUserDataString,
16201  void* pUserData,
16202  VmaAllocation* pAllocation)
16203 {
16204  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16205  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16206  if(res < 0)
16207  {
16208  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16209  return res;
16210  }
16211 
16212  void* pMappedData = VMA_NULL;
16213  if(map)
16214  {
16215  res = (*m_VulkanFunctions.vkMapMemory)(
16216  m_hDevice,
16217  hMemory,
16218  0,
16219  VK_WHOLE_SIZE,
16220  0,
16221  &pMappedData);
16222  if(res < 0)
16223  {
16224  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16225  FreeVulkanMemory(memTypeIndex, size, hMemory);
16226  return res;
16227  }
16228  }
16229 
16230  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16231  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16232  (*pAllocation)->SetUserData(this, pUserData);
16233  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16234  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16235  {
16236  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16237  }
16238 
16239  return VK_SUCCESS;
16240 }
16241 
16242 void VmaAllocator_T::GetBufferMemoryRequirements(
16243  VkBuffer hBuffer,
16244  VkMemoryRequirements& memReq,
16245  bool& requiresDedicatedAllocation,
16246  bool& prefersDedicatedAllocation) const
16247 {
16248 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16249  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16250  {
16251  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16252  memReqInfo.buffer = hBuffer;
16253 
16254  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16255 
16256  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16257  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16258 
16259  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16260 
16261  memReq = memReq2.memoryRequirements;
16262  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16263  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16264  }
16265  else
16266 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16267  {
16268  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16269  requiresDedicatedAllocation = false;
16270  prefersDedicatedAllocation = false;
16271  }
16272 }
16273 
16274 void VmaAllocator_T::GetImageMemoryRequirements(
16275  VkImage hImage,
16276  VkMemoryRequirements& memReq,
16277  bool& requiresDedicatedAllocation,
16278  bool& prefersDedicatedAllocation) const
16279 {
16280 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16281  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16282  {
16283  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16284  memReqInfo.image = hImage;
16285 
16286  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16287 
16288  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16289  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16290 
16291  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16292 
16293  memReq = memReq2.memoryRequirements;
16294  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16295  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16296  }
16297  else
16298 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16299  {
16300  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16301  requiresDedicatedAllocation = false;
16302  prefersDedicatedAllocation = false;
16303  }
16304 }
16305 
16306 VkResult VmaAllocator_T::AllocateMemory(
16307  const VkMemoryRequirements& vkMemReq,
16308  bool requiresDedicatedAllocation,
16309  bool prefersDedicatedAllocation,
16310  VkBuffer dedicatedBuffer,
16311  VkBufferUsageFlags dedicatedBufferUsage,
16312  VkImage dedicatedImage,
16313  const VmaAllocationCreateInfo& createInfo,
16314  VmaSuballocationType suballocType,
16315  size_t allocationCount,
16316  VmaAllocation* pAllocations)
16317 {
16318  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16319 
16320  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16321 
16322  if(vkMemReq.size == 0)
16323  {
16324  return VK_ERROR_VALIDATION_FAILED_EXT;
16325  }
16326  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16327  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16328  {
16329  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16330  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16331  }
16332  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16334  {
16335  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16336  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16337  }
16338  if(requiresDedicatedAllocation)
16339  {
16340  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16341  {
16342  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16343  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16344  }
16345  if(createInfo.pool != VK_NULL_HANDLE)
16346  {
16347  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16348  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16349  }
16350  }
16351  if((createInfo.pool != VK_NULL_HANDLE) &&
16352  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16353  {
16354  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16355  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16356  }
16357 
16358  if(createInfo.pool != VK_NULL_HANDLE)
16359  {
16360  const VkDeviceSize alignmentForPool = VMA_MAX(
16361  vkMemReq.alignment,
16362  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
16363 
16364  VmaAllocationCreateInfo createInfoForPool = createInfo;
16365  // If memory type is not HOST_VISIBLE, disable MAPPED.
16366  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16367  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16368  {
16369  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16370  }
16371 
16372  return createInfo.pool->m_BlockVector.Allocate(
16373  m_CurrentFrameIndex.load(),
16374  vkMemReq.size,
16375  alignmentForPool,
16376  createInfoForPool,
16377  suballocType,
16378  allocationCount,
16379  pAllocations);
16380  }
16381  else
16382  {
16383  // Bit mask of memory Vulkan types acceptable for this allocation.
16384  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16385  uint32_t memTypeIndex = UINT32_MAX;
16386  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16387  if(res == VK_SUCCESS)
16388  {
16389  VkDeviceSize alignmentForMemType = VMA_MAX(
16390  vkMemReq.alignment,
16391  GetMemoryTypeMinAlignment(memTypeIndex));
16392 
16393  res = AllocateMemoryOfType(
16394  vkMemReq.size,
16395  alignmentForMemType,
16396  requiresDedicatedAllocation || prefersDedicatedAllocation,
16397  dedicatedBuffer,
16398  dedicatedBufferUsage,
16399  dedicatedImage,
16400  createInfo,
16401  memTypeIndex,
16402  suballocType,
16403  allocationCount,
16404  pAllocations);
16405  // Succeeded on first try.
16406  if(res == VK_SUCCESS)
16407  {
16408  return res;
16409  }
16410  // Allocation from this memory type failed. Try other compatible memory types.
16411  else
16412  {
16413  for(;;)
16414  {
16415  // Remove old memTypeIndex from list of possibilities.
16416  memoryTypeBits &= ~(1u << memTypeIndex);
16417  // Find alternative memTypeIndex.
16418  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16419  if(res == VK_SUCCESS)
16420  {
16421  alignmentForMemType = VMA_MAX(
16422  vkMemReq.alignment,
16423  GetMemoryTypeMinAlignment(memTypeIndex));
16424 
16425  res = AllocateMemoryOfType(
16426  vkMemReq.size,
16427  alignmentForMemType,
16428  requiresDedicatedAllocation || prefersDedicatedAllocation,
16429  dedicatedBuffer,
16430  dedicatedBufferUsage,
16431  dedicatedImage,
16432  createInfo,
16433  memTypeIndex,
16434  suballocType,
16435  allocationCount,
16436  pAllocations);
16437  // Allocation from this alternative memory type succeeded.
16438  if(res == VK_SUCCESS)
16439  {
16440  return res;
16441  }
16442  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16443  }
16444  // No other matching memory type index could be found.
16445  else
16446  {
16447  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16449  }
16450  }
16451  }
16452  }
16453  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16454  else
16455  return res;
16456  }
16457 }
16458 
16459 void VmaAllocator_T::FreeMemory(
16460  size_t allocationCount,
16461  const VmaAllocation* pAllocations)
16462 {
16463  VMA_ASSERT(pAllocations);
16464 
16465  for(size_t allocIndex = allocationCount; allocIndex--; )
16466  {
16467  VmaAllocation allocation = pAllocations[allocIndex];
16468 
16469  if(allocation != VK_NULL_HANDLE)
16470  {
16471  if(TouchAllocation(allocation))
16472  {
16473  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16474  {
16475  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16476  }
16477 
16478  switch(allocation->GetType())
16479  {
16480  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16481  {
16482  VmaBlockVector* pBlockVector = VMA_NULL;
16483  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16484  if(hPool != VK_NULL_HANDLE)
16485  {
16486  pBlockVector = &hPool->m_BlockVector;
16487  }
16488  else
16489  {
16490  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16491  pBlockVector = m_pBlockVectors[memTypeIndex];
16492  }
16493  pBlockVector->Free(allocation);
16494  }
16495  break;
16496  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16497  FreeDedicatedMemory(allocation);
16498  break;
16499  default:
16500  VMA_ASSERT(0);
16501  }
16502  }
16503 
16504  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16505  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16506  allocation->SetUserData(this, VMA_NULL);
16507  m_AllocationObjectAllocator.Free(allocation);
16508  }
16509  }
16510 }
16511 
16512 VkResult VmaAllocator_T::ResizeAllocation(
16513  const VmaAllocation alloc,
16514  VkDeviceSize newSize)
16515 {
16516  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16517  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16518  {
16519  return VK_ERROR_VALIDATION_FAILED_EXT;
16520  }
16521  if(newSize == alloc->GetSize())
16522  {
16523  return VK_SUCCESS;
16524  }
16525  return VK_ERROR_OUT_OF_POOL_MEMORY;
16526 }
16527 
16528 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16529 {
16530  // Initialize.
16531  InitStatInfo(pStats->total);
16532  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16533  InitStatInfo(pStats->memoryType[i]);
16534  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16535  InitStatInfo(pStats->memoryHeap[i]);
16536 
16537  // Process default pools.
16538  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16539  {
16540  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16541  VMA_ASSERT(pBlockVector);
16542  pBlockVector->AddStats(pStats);
16543  }
16544 
16545  // Process custom pools.
16546  {
16547  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16548  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16549  {
16550  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16551  }
16552  }
16553 
16554  // Process dedicated allocations.
16555  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16556  {
16557  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16558  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16559  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16560  VMA_ASSERT(pDedicatedAllocVector);
16561  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16562  {
16563  VmaStatInfo allocationStatInfo;
16564  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16565  VmaAddStatInfo(pStats->total, allocationStatInfo);
16566  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16567  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16568  }
16569  }
16570 
16571  // Postprocess.
16572  VmaPostprocessCalcStatInfo(pStats->total);
16573  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16574  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16575  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16576  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16577 }
16578 
16579 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16580 {
16581 #if VMA_MEMORY_BUDGET
16582  if(m_UseExtMemoryBudget)
16583  {
16584  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16585  {
16586  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16587  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16588  {
16589  const uint32_t heapIndex = firstHeap + i;
16590 
16591  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16592  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16593 
16594  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16595  {
16596  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16597  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16598  }
16599  else
16600  {
16601  outBudget->usage = 0;
16602  }
16603 
16604  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16605  outBudget->budget = VMA_MIN(
16606  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16607  }
16608  }
16609  else
16610  {
16611  UpdateVulkanBudget(); // Outside of mutex lock
16612  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16613  }
16614  }
16615  else
16616 #endif
16617  {
16618  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16619  {
16620  const uint32_t heapIndex = firstHeap + i;
16621 
16622  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16623  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16624 
16625  outBudget->usage = outBudget->blockBytes;
16626  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16627  }
16628  }
16629 }
16630 
16631 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16632 
16633 VkResult VmaAllocator_T::DefragmentationBegin(
16634  const VmaDefragmentationInfo2& info,
16635  VmaDefragmentationStats* pStats,
16636  VmaDefragmentationContext* pContext)
16637 {
16638  if(info.pAllocationsChanged != VMA_NULL)
16639  {
16640  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16641  }
16642 
16643  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16644  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16645 
16646  (*pContext)->AddPools(info.poolCount, info.pPools);
16647  (*pContext)->AddAllocations(
16649 
16650  VkResult res = (*pContext)->Defragment(
16653  info.commandBuffer, pStats, info.flags);
16654 
16655  if(res != VK_NOT_READY)
16656  {
16657  vma_delete(this, *pContext);
16658  *pContext = VMA_NULL;
16659  }
16660 
16661  return res;
16662 }
16663 
16664 VkResult VmaAllocator_T::DefragmentationEnd(
16665  VmaDefragmentationContext context)
16666 {
16667  vma_delete(this, context);
16668  return VK_SUCCESS;
16669 }
16670 
16671 VkResult VmaAllocator_T::DefragmentationPassBegin(
16673  VmaDefragmentationContext context)
16674 {
16675  return context->DefragmentPassBegin(pInfo);
16676 }
16677 VkResult VmaAllocator_T::DefragmentationPassEnd(
16678  VmaDefragmentationContext context)
16679 {
16680  return context->DefragmentPassEnd();
16681 
16682 }
16683 
16684 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16685 {
16686  if(hAllocation->CanBecomeLost())
16687  {
16688  /*
16689  Warning: This is a carefully designed algorithm.
16690  Do not modify unless you really know what you're doing :)
16691  */
16692  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16693  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16694  for(;;)
16695  {
16696  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16697  {
16698  pAllocationInfo->memoryType = UINT32_MAX;
16699  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16700  pAllocationInfo->offset = 0;
16701  pAllocationInfo->size = hAllocation->GetSize();
16702  pAllocationInfo->pMappedData = VMA_NULL;
16703  pAllocationInfo->pUserData = hAllocation->GetUserData();
16704  return;
16705  }
16706  else if(localLastUseFrameIndex == localCurrFrameIndex)
16707  {
16708  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16709  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16710  pAllocationInfo->offset = hAllocation->GetOffset();
16711  pAllocationInfo->size = hAllocation->GetSize();
16712  pAllocationInfo->pMappedData = VMA_NULL;
16713  pAllocationInfo->pUserData = hAllocation->GetUserData();
16714  return;
16715  }
16716  else // Last use time earlier than current time.
16717  {
16718  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16719  {
16720  localLastUseFrameIndex = localCurrFrameIndex;
16721  }
16722  }
16723  }
16724  }
16725  else
16726  {
16727 #if VMA_STATS_STRING_ENABLED
16728  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16729  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16730  for(;;)
16731  {
16732  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16733  if(localLastUseFrameIndex == localCurrFrameIndex)
16734  {
16735  break;
16736  }
16737  else // Last use time earlier than current time.
16738  {
16739  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16740  {
16741  localLastUseFrameIndex = localCurrFrameIndex;
16742  }
16743  }
16744  }
16745 #endif
16746 
16747  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16748  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16749  pAllocationInfo->offset = hAllocation->GetOffset();
16750  pAllocationInfo->size = hAllocation->GetSize();
16751  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16752  pAllocationInfo->pUserData = hAllocation->GetUserData();
16753  }
16754 }
16755 
16756 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16757 {
16758  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16759  if(hAllocation->CanBecomeLost())
16760  {
16761  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16762  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16763  for(;;)
16764  {
16765  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16766  {
16767  return false;
16768  }
16769  else if(localLastUseFrameIndex == localCurrFrameIndex)
16770  {
16771  return true;
16772  }
16773  else // Last use time earlier than current time.
16774  {
16775  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16776  {
16777  localLastUseFrameIndex = localCurrFrameIndex;
16778  }
16779  }
16780  }
16781  }
16782  else
16783  {
16784 #if VMA_STATS_STRING_ENABLED
16785  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16786  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16787  for(;;)
16788  {
16789  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16790  if(localLastUseFrameIndex == localCurrFrameIndex)
16791  {
16792  break;
16793  }
16794  else // Last use time earlier than current time.
16795  {
16796  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16797  {
16798  localLastUseFrameIndex = localCurrFrameIndex;
16799  }
16800  }
16801  }
16802 #endif
16803 
16804  return true;
16805  }
16806 }
16807 
16808 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16809 {
16810  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16811 
16812  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16813 
16814  if(newCreateInfo.maxBlockCount == 0)
16815  {
16816  newCreateInfo.maxBlockCount = SIZE_MAX;
16817  }
16818  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16819  {
16820  return VK_ERROR_INITIALIZATION_FAILED;
16821  }
16822  // Memory type index out of range or forbidden.
16823  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16824  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16825  {
16826  return VK_ERROR_FEATURE_NOT_PRESENT;
16827  }
16828 
16829  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16830 
16831  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16832 
16833  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16834  if(res != VK_SUCCESS)
16835  {
16836  vma_delete(this, *pPool);
16837  *pPool = VMA_NULL;
16838  return res;
16839  }
16840 
16841  // Add to m_Pools.
16842  {
16843  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16844  (*pPool)->SetId(m_NextPoolId++);
16845  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16846  }
16847 
16848  return VK_SUCCESS;
16849 }
16850 
16851 void VmaAllocator_T::DestroyPool(VmaPool pool)
16852 {
16853  // Remove from m_Pools.
16854  {
16855  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16856  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16857  VMA_ASSERT(success && "Pool not found in Allocator.");
16858  }
16859 
16860  vma_delete(this, pool);
16861 }
16862 
16863 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16864 {
16865  pool->m_BlockVector.GetPoolStats(pPoolStats);
16866 }
16867 
16868 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16869 {
16870  m_CurrentFrameIndex.store(frameIndex);
16871 
16872 #if VMA_MEMORY_BUDGET
16873  if(m_UseExtMemoryBudget)
16874  {
16875  UpdateVulkanBudget();
16876  }
16877 #endif // #if VMA_MEMORY_BUDGET
16878 }
16879 
16880 void VmaAllocator_T::MakePoolAllocationsLost(
16881  VmaPool hPool,
16882  size_t* pLostAllocationCount)
16883 {
16884  hPool->m_BlockVector.MakePoolAllocationsLost(
16885  m_CurrentFrameIndex.load(),
16886  pLostAllocationCount);
16887 }
16888 
16889 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16890 {
16891  return hPool->m_BlockVector.CheckCorruption();
16892 }
16893 
16894 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16895 {
16896  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16897 
16898  // Process default pools.
16899  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16900  {
16901  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16902  {
16903  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16904  VMA_ASSERT(pBlockVector);
16905  VkResult localRes = pBlockVector->CheckCorruption();
16906  switch(localRes)
16907  {
16908  case VK_ERROR_FEATURE_NOT_PRESENT:
16909  break;
16910  case VK_SUCCESS:
16911  finalRes = VK_SUCCESS;
16912  break;
16913  default:
16914  return localRes;
16915  }
16916  }
16917  }
16918 
16919  // Process custom pools.
16920  {
16921  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16922  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16923  {
16924  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16925  {
16926  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16927  switch(localRes)
16928  {
16929  case VK_ERROR_FEATURE_NOT_PRESENT:
16930  break;
16931  case VK_SUCCESS:
16932  finalRes = VK_SUCCESS;
16933  break;
16934  default:
16935  return localRes;
16936  }
16937  }
16938  }
16939  }
16940 
16941  return finalRes;
16942 }
16943 
16944 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16945 {
16946  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16947  (*pAllocation)->InitLost();
16948 }
16949 
16950 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16951 {
16952  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16953 
16954  // HeapSizeLimit is in effect for this heap.
16955  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16956  {
16957  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16958  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16959  for(;;)
16960  {
16961  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16962  if(blockBytesAfterAllocation > heapSize)
16963  {
16964  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16965  }
16966  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16967  {
16968  break;
16969  }
16970  }
16971  }
16972  else
16973  {
16974  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16975  }
16976 
16977  // VULKAN CALL vkAllocateMemory.
16978  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16979 
16980  if(res == VK_SUCCESS)
16981  {
16982 #if VMA_MEMORY_BUDGET
16983  ++m_Budget.m_OperationsSinceBudgetFetch;
16984 #endif
16985 
16986  // Informative callback.
16987  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16988  {
16989  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
16990  }
16991  }
16992  else
16993  {
16994  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16995  }
16996 
16997  return res;
16998 }
16999 
17000 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17001 {
17002  // Informative callback.
17003  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17004  {
17005  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17006  }
17007 
17008  // VULKAN CALL vkFreeMemory.
17009  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17010 
17011  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17012 }
17013 
17014 VkResult VmaAllocator_T::BindVulkanBuffer(
17015  VkDeviceMemory memory,
17016  VkDeviceSize memoryOffset,
17017  VkBuffer buffer,
17018  const void* pNext)
17019 {
17020  if(pNext != VMA_NULL)
17021  {
17022 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17023  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17024  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17025  {
17026  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17027  bindBufferMemoryInfo.pNext = pNext;
17028  bindBufferMemoryInfo.buffer = buffer;
17029  bindBufferMemoryInfo.memory = memory;
17030  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17031  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17032  }
17033  else
17034 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17035  {
17036  return VK_ERROR_EXTENSION_NOT_PRESENT;
17037  }
17038  }
17039  else
17040  {
17041  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17042  }
17043 }
17044 
17045 VkResult VmaAllocator_T::BindVulkanImage(
17046  VkDeviceMemory memory,
17047  VkDeviceSize memoryOffset,
17048  VkImage image,
17049  const void* pNext)
17050 {
17051  if(pNext != VMA_NULL)
17052  {
17053 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17054  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17055  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17056  {
17057  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17058  bindBufferMemoryInfo.pNext = pNext;
17059  bindBufferMemoryInfo.image = image;
17060  bindBufferMemoryInfo.memory = memory;
17061  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17062  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17063  }
17064  else
17065 #endif // #if VMA_BIND_MEMORY2
17066  {
17067  return VK_ERROR_EXTENSION_NOT_PRESENT;
17068  }
17069  }
17070  else
17071  {
17072  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17073  }
17074 }
17075 
17076 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17077 {
17078  if(hAllocation->CanBecomeLost())
17079  {
17080  return VK_ERROR_MEMORY_MAP_FAILED;
17081  }
17082 
17083  switch(hAllocation->GetType())
17084  {
17085  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17086  {
17087  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17088  char *pBytes = VMA_NULL;
17089  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17090  if(res == VK_SUCCESS)
17091  {
17092  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17093  hAllocation->BlockAllocMap();
17094  }
17095  return res;
17096  }
17097  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17098  return hAllocation->DedicatedAllocMap(this, ppData);
17099  default:
17100  VMA_ASSERT(0);
17101  return VK_ERROR_MEMORY_MAP_FAILED;
17102  }
17103 }
17104 
17105 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17106 {
17107  switch(hAllocation->GetType())
17108  {
17109  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17110  {
17111  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17112  hAllocation->BlockAllocUnmap();
17113  pBlock->Unmap(this, 1);
17114  }
17115  break;
17116  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17117  hAllocation->DedicatedAllocUnmap(this);
17118  break;
17119  default:
17120  VMA_ASSERT(0);
17121  }
17122 }
17123 
17124 VkResult VmaAllocator_T::BindBufferMemory(
17125  VmaAllocation hAllocation,
17126  VkDeviceSize allocationLocalOffset,
17127  VkBuffer hBuffer,
17128  const void* pNext)
17129 {
17130  VkResult res = VK_SUCCESS;
17131  switch(hAllocation->GetType())
17132  {
17133  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17134  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17135  break;
17136  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17137  {
17138  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17139  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17140  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17141  break;
17142  }
17143  default:
17144  VMA_ASSERT(0);
17145  }
17146  return res;
17147 }
17148 
17149 VkResult VmaAllocator_T::BindImageMemory(
17150  VmaAllocation hAllocation,
17151  VkDeviceSize allocationLocalOffset,
17152  VkImage hImage,
17153  const void* pNext)
17154 {
17155  VkResult res = VK_SUCCESS;
17156  switch(hAllocation->GetType())
17157  {
17158  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17159  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17160  break;
17161  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17162  {
17163  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17164  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17165  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17166  break;
17167  }
17168  default:
17169  VMA_ASSERT(0);
17170  }
17171  return res;
17172 }
17173 
17174 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17175  VmaAllocation hAllocation,
17176  VkDeviceSize offset, VkDeviceSize size,
17177  VMA_CACHE_OPERATION op)
17178 {
17179  VkResult res = VK_SUCCESS;
17180 
17181  VkMappedMemoryRange memRange = {};
17182  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17183  {
17184  switch(op)
17185  {
17186  case VMA_CACHE_FLUSH:
17187  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17188  break;
17189  case VMA_CACHE_INVALIDATE:
17190  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17191  break;
17192  default:
17193  VMA_ASSERT(0);
17194  }
17195  }
17196  // else: Just ignore this call.
17197  return res;
17198 }
17199 
17200 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17201  uint32_t allocationCount,
17202  const VmaAllocation* allocations,
17203  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17204  VMA_CACHE_OPERATION op)
17205 {
17206  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17207  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17208  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17209 
17210  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17211  {
17212  const VmaAllocation alloc = allocations[allocIndex];
17213  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17214  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17215  VkMappedMemoryRange newRange;
17216  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17217  {
17218  ranges.push_back(newRange);
17219  }
17220  }
17221 
17222  VkResult res = VK_SUCCESS;
17223  if(!ranges.empty())
17224  {
17225  switch(op)
17226  {
17227  case VMA_CACHE_FLUSH:
17228  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17229  break;
17230  case VMA_CACHE_INVALIDATE:
17231  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17232  break;
17233  default:
17234  VMA_ASSERT(0);
17235  }
17236  }
17237  // else: Just ignore this call.
17238  return res;
17239 }
17240 
17241 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17242 {
17243  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17244 
17245  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17246  {
17247  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17248  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
17249  VMA_ASSERT(pDedicatedAllocations);
17250  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
17251  VMA_ASSERT(success);
17252  }
17253 
17254  VkDeviceMemory hMemory = allocation->GetMemory();
17255 
17256  /*
17257  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17258  before vkFreeMemory.
17259 
17260  if(allocation->GetMappedData() != VMA_NULL)
17261  {
17262  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17263  }
17264  */
17265 
17266  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17267 
17268  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17269 }
17270 
17271 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17272 {
17273  VkBufferCreateInfo dummyBufCreateInfo;
17274  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17275 
17276  uint32_t memoryTypeBits = 0;
17277 
17278  // Create buffer.
17279  VkBuffer buf = VK_NULL_HANDLE;
17280  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17281  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17282  if(res == VK_SUCCESS)
17283  {
17284  // Query for supported memory types.
17285  VkMemoryRequirements memReq;
17286  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17287  memoryTypeBits = memReq.memoryTypeBits;
17288 
17289  // Destroy buffer.
17290  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17291  }
17292 
17293  return memoryTypeBits;
17294 }
17295 
17296 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17297 {
17298  // Make sure memory information is already fetched.
17299  VMA_ASSERT(GetMemoryTypeCount() > 0);
17300 
17301  uint32_t memoryTypeBits = UINT32_MAX;
17302 
17303  if(!m_UseAmdDeviceCoherentMemory)
17304  {
17305  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17306  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17307  {
17308  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17309  {
17310  memoryTypeBits &= ~(1u << memTypeIndex);
17311  }
17312  }
17313  }
17314 
17315  return memoryTypeBits;
17316 }
17317 
17318 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17319  VmaAllocation allocation,
17320  VkDeviceSize offset, VkDeviceSize size,
17321  VkMappedMemoryRange& outRange) const
17322 {
17323  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17324  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17325  {
17326  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17327  const VkDeviceSize allocationSize = allocation->GetSize();
17328  VMA_ASSERT(offset <= allocationSize);
17329 
17330  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17331  outRange.pNext = VMA_NULL;
17332  outRange.memory = allocation->GetMemory();
17333 
17334  switch(allocation->GetType())
17335  {
17336  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17337  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17338  if(size == VK_WHOLE_SIZE)
17339  {
17340  outRange.size = allocationSize - outRange.offset;
17341  }
17342  else
17343  {
17344  VMA_ASSERT(offset + size <= allocationSize);
17345  outRange.size = VMA_MIN(
17346  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17347  allocationSize - outRange.offset);
17348  }
17349  break;
17350  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17351  {
17352  // 1. Still within this allocation.
17353  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17354  if(size == VK_WHOLE_SIZE)
17355  {
17356  size = allocationSize - offset;
17357  }
17358  else
17359  {
17360  VMA_ASSERT(offset + size <= allocationSize);
17361  }
17362  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17363 
17364  // 2. Adjust to whole block.
17365  const VkDeviceSize allocationOffset = allocation->GetOffset();
17366  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17367  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17368  outRange.offset += allocationOffset;
17369  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17370 
17371  break;
17372  }
17373  default:
17374  VMA_ASSERT(0);
17375  }
17376  return true;
17377  }
17378  return false;
17379 }
17380 
17381 #if VMA_MEMORY_BUDGET
17382 
17383 void VmaAllocator_T::UpdateVulkanBudget()
17384 {
17385  VMA_ASSERT(m_UseExtMemoryBudget);
17386 
17387  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17388 
17389  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17390  VmaPnextChainPushFront(&memProps, &budgetProps);
17391 
17392  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17393 
17394  {
17395  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17396 
17397  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17398  {
17399  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17400  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17401  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17402 
17403  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17404  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17405  {
17406  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17407  }
17408  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17409  {
17410  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17411  }
17412  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17413  {
17414  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17415  }
17416  }
17417  m_Budget.m_OperationsSinceBudgetFetch = 0;
17418  }
17419 }
17420 
17421 #endif // #if VMA_MEMORY_BUDGET
17422 
17423 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17424 {
17425  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17426  !hAllocation->CanBecomeLost() &&
17427  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17428  {
17429  void* pData = VMA_NULL;
17430  VkResult res = Map(hAllocation, &pData);
17431  if(res == VK_SUCCESS)
17432  {
17433  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17434  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17435  Unmap(hAllocation);
17436  }
17437  else
17438  {
17439  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17440  }
17441  }
17442 }
17443 
17444 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
17445 {
17446  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
17447  if(memoryTypeBits == UINT32_MAX)
17448  {
17449  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
17450  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
17451  }
17452  return memoryTypeBits;
17453 }
17454 
17455 #if VMA_STATS_STRING_ENABLED
17456 
17457 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
17458 {
17459  bool dedicatedAllocationsStarted = false;
17460  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17461  {
17462  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17463  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17464  VMA_ASSERT(pDedicatedAllocVector);
17465  if(pDedicatedAllocVector->empty() == false)
17466  {
17467  if(dedicatedAllocationsStarted == false)
17468  {
17469  dedicatedAllocationsStarted = true;
17470  json.WriteString("DedicatedAllocations");
17471  json.BeginObject();
17472  }
17473 
17474  json.BeginString("Type ");
17475  json.ContinueString(memTypeIndex);
17476  json.EndString();
17477 
17478  json.BeginArray();
17479 
17480  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17481  {
17482  json.BeginObject(true);
17483  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17484  hAlloc->PrintParameters(json);
17485  json.EndObject();
17486  }
17487 
17488  json.EndArray();
17489  }
17490  }
17491  if(dedicatedAllocationsStarted)
17492  {
17493  json.EndObject();
17494  }
17495 
17496  {
17497  bool allocationsStarted = false;
17498  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17499  {
17500  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17501  {
17502  if(allocationsStarted == false)
17503  {
17504  allocationsStarted = true;
17505  json.WriteString("DefaultPools");
17506  json.BeginObject();
17507  }
17508 
17509  json.BeginString("Type ");
17510  json.ContinueString(memTypeIndex);
17511  json.EndString();
17512 
17513  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17514  }
17515  }
17516  if(allocationsStarted)
17517  {
17518  json.EndObject();
17519  }
17520  }
17521 
17522  // Custom pools
17523  {
17524  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17525  const size_t poolCount = m_Pools.size();
17526  if(poolCount > 0)
17527  {
17528  json.WriteString("Pools");
17529  json.BeginObject();
17530  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17531  {
17532  json.BeginString();
17533  json.ContinueString(m_Pools[poolIndex]->GetId());
17534  json.EndString();
17535 
17536  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17537  }
17538  json.EndObject();
17539  }
17540  }
17541 }
17542 
17543 #endif // #if VMA_STATS_STRING_ENABLED
17544 
17546 // Public interface
17547 
17548 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17549  const VmaAllocatorCreateInfo* pCreateInfo,
17550  VmaAllocator* pAllocator)
17551 {
17552  VMA_ASSERT(pCreateInfo && pAllocator);
17553  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17554  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17555  VMA_DEBUG_LOG("vmaCreateAllocator");
17556  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17557  return (*pAllocator)->Init(pCreateInfo);
17558 }
17559 
17560 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17561  VmaAllocator allocator)
17562 {
17563  if(allocator != VK_NULL_HANDLE)
17564  {
17565  VMA_DEBUG_LOG("vmaDestroyAllocator");
17566  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17567  vma_delete(&allocationCallbacks, allocator);
17568  }
17569 }
17570 
17571 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17572 {
17573  VMA_ASSERT(allocator && pAllocatorInfo);
17574  pAllocatorInfo->instance = allocator->m_hInstance;
17575  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17576  pAllocatorInfo->device = allocator->m_hDevice;
17577 }
17578 
17579 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17580  VmaAllocator allocator,
17581  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17582 {
17583  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17584  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17585 }
17586 
17587 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17588  VmaAllocator allocator,
17589  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17590 {
17591  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17592  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17593 }
17594 
17595 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17596  VmaAllocator allocator,
17597  uint32_t memoryTypeIndex,
17598  VkMemoryPropertyFlags* pFlags)
17599 {
17600  VMA_ASSERT(allocator && pFlags);
17601  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17602  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17603 }
17604 
17605 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17606  VmaAllocator allocator,
17607  uint32_t frameIndex)
17608 {
17609  VMA_ASSERT(allocator);
17610  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17611 
17612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17613 
17614  allocator->SetCurrentFrameIndex(frameIndex);
17615 }
17616 
17617 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17618  VmaAllocator allocator,
17619  VmaStats* pStats)
17620 {
17621  VMA_ASSERT(allocator && pStats);
17622  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17623  allocator->CalculateStats(pStats);
17624 }
17625 
17626 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17627  VmaAllocator allocator,
17628  VmaBudget* pBudget)
17629 {
17630  VMA_ASSERT(allocator && pBudget);
17631  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17632  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17633 }
17634 
17635 #if VMA_STATS_STRING_ENABLED
17636 
17637 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17638  VmaAllocator allocator,
17639  char** ppStatsString,
17640  VkBool32 detailedMap)
17641 {
17642  VMA_ASSERT(allocator && ppStatsString);
17643  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17644 
17645  VmaStringBuilder sb(allocator);
17646  {
17647  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17648  json.BeginObject();
17649 
17650  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17651  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17652 
17653  VmaStats stats;
17654  allocator->CalculateStats(&stats);
17655 
17656  json.WriteString("Total");
17657  VmaPrintStatInfo(json, stats.total);
17658 
17659  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17660  {
17661  json.BeginString("Heap ");
17662  json.ContinueString(heapIndex);
17663  json.EndString();
17664  json.BeginObject();
17665 
17666  json.WriteString("Size");
17667  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17668 
17669  json.WriteString("Flags");
17670  json.BeginArray(true);
17671  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17672  {
17673  json.WriteString("DEVICE_LOCAL");
17674  }
17675  json.EndArray();
17676 
17677  json.WriteString("Budget");
17678  json.BeginObject();
17679  {
17680  json.WriteString("BlockBytes");
17681  json.WriteNumber(budget[heapIndex].blockBytes);
17682  json.WriteString("AllocationBytes");
17683  json.WriteNumber(budget[heapIndex].allocationBytes);
17684  json.WriteString("Usage");
17685  json.WriteNumber(budget[heapIndex].usage);
17686  json.WriteString("Budget");
17687  json.WriteNumber(budget[heapIndex].budget);
17688  }
17689  json.EndObject();
17690 
17691  if(stats.memoryHeap[heapIndex].blockCount > 0)
17692  {
17693  json.WriteString("Stats");
17694  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17695  }
17696 
17697  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17698  {
17699  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17700  {
17701  json.BeginString("Type ");
17702  json.ContinueString(typeIndex);
17703  json.EndString();
17704 
17705  json.BeginObject();
17706 
17707  json.WriteString("Flags");
17708  json.BeginArray(true);
17709  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17710  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17711  {
17712  json.WriteString("DEVICE_LOCAL");
17713  }
17714  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17715  {
17716  json.WriteString("HOST_VISIBLE");
17717  }
17718  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17719  {
17720  json.WriteString("HOST_COHERENT");
17721  }
17722  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17723  {
17724  json.WriteString("HOST_CACHED");
17725  }
17726  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17727  {
17728  json.WriteString("LAZILY_ALLOCATED");
17729  }
17730  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17731  {
17732  json.WriteString(" PROTECTED");
17733  }
17734  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17735  {
17736  json.WriteString(" DEVICE_COHERENT");
17737  }
17738  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17739  {
17740  json.WriteString(" DEVICE_UNCACHED");
17741  }
17742  json.EndArray();
17743 
17744  if(stats.memoryType[typeIndex].blockCount > 0)
17745  {
17746  json.WriteString("Stats");
17747  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17748  }
17749 
17750  json.EndObject();
17751  }
17752  }
17753 
17754  json.EndObject();
17755  }
17756  if(detailedMap == VK_TRUE)
17757  {
17758  allocator->PrintDetailedMap(json);
17759  }
17760 
17761  json.EndObject();
17762  }
17763 
17764  const size_t len = sb.GetLength();
17765  char* const pChars = vma_new_array(allocator, char, len + 1);
17766  if(len > 0)
17767  {
17768  memcpy(pChars, sb.GetData(), len);
17769  }
17770  pChars[len] = '\0';
17771  *ppStatsString = pChars;
17772 }
17773 
17774 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17775  VmaAllocator allocator,
17776  char* pStatsString)
17777 {
17778  if(pStatsString != VMA_NULL)
17779  {
17780  VMA_ASSERT(allocator);
17781  size_t len = strlen(pStatsString);
17782  vma_delete_array(allocator, pStatsString, len + 1);
17783  }
17784 }
17785 
17786 #endif // #if VMA_STATS_STRING_ENABLED
17787 
17788 /*
17789 This function is not protected by any mutex because it just reads immutable data.
17790 */
17791 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17792  VmaAllocator allocator,
17793  uint32_t memoryTypeBits,
17794  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17795  uint32_t* pMemoryTypeIndex)
17796 {
17797  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17798  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17799  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17800 
17801  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17802 
17803  if(pAllocationCreateInfo->memoryTypeBits != 0)
17804  {
17805  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17806  }
17807 
17808  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17809  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17810  uint32_t notPreferredFlags = 0;
17811 
17812  // Convert usage to requiredFlags and preferredFlags.
17813  switch(pAllocationCreateInfo->usage)
17814  {
17816  break;
17818  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17819  {
17820  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17821  }
17822  break;
17824  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17825  break;
17827  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17828  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17829  {
17830  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17831  }
17832  break;
17834  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17835  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17836  break;
17838  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17839  break;
17841  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17842  break;
17843  default:
17844  VMA_ASSERT(0);
17845  break;
17846  }
17847 
17848  // Avoid DEVICE_COHERENT unless explicitly requested.
17849  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17850  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17851  {
17852  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17853  }
17854 
17855  *pMemoryTypeIndex = UINT32_MAX;
17856  uint32_t minCost = UINT32_MAX;
17857  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17858  memTypeIndex < allocator->GetMemoryTypeCount();
17859  ++memTypeIndex, memTypeBit <<= 1)
17860  {
17861  // This memory type is acceptable according to memoryTypeBits bitmask.
17862  if((memTypeBit & memoryTypeBits) != 0)
17863  {
17864  const VkMemoryPropertyFlags currFlags =
17865  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17866  // This memory type contains requiredFlags.
17867  if((requiredFlags & ~currFlags) == 0)
17868  {
17869  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17870  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17871  VmaCountBitsSet(currFlags & notPreferredFlags);
17872  // Remember memory type with lowest cost.
17873  if(currCost < minCost)
17874  {
17875  *pMemoryTypeIndex = memTypeIndex;
17876  if(currCost == 0)
17877  {
17878  return VK_SUCCESS;
17879  }
17880  minCost = currCost;
17881  }
17882  }
17883  }
17884  }
17885  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17886 }
17887 
17888 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17889  VmaAllocator allocator,
17890  const VkBufferCreateInfo* pBufferCreateInfo,
17891  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17892  uint32_t* pMemoryTypeIndex)
17893 {
17894  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17895  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17896  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17897  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17898 
17899  const VkDevice hDev = allocator->m_hDevice;
17900  VkBuffer hBuffer = VK_NULL_HANDLE;
17901  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17902  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17903  if(res == VK_SUCCESS)
17904  {
17905  VkMemoryRequirements memReq = {};
17906  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17907  hDev, hBuffer, &memReq);
17908 
17909  res = vmaFindMemoryTypeIndex(
17910  allocator,
17911  memReq.memoryTypeBits,
17912  pAllocationCreateInfo,
17913  pMemoryTypeIndex);
17914 
17915  allocator->GetVulkanFunctions().vkDestroyBuffer(
17916  hDev, hBuffer, allocator->GetAllocationCallbacks());
17917  }
17918  return res;
17919 }
17920 
17921 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17922  VmaAllocator allocator,
17923  const VkImageCreateInfo* pImageCreateInfo,
17924  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17925  uint32_t* pMemoryTypeIndex)
17926 {
17927  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17928  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17929  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17930  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17931 
17932  const VkDevice hDev = allocator->m_hDevice;
17933  VkImage hImage = VK_NULL_HANDLE;
17934  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17935  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17936  if(res == VK_SUCCESS)
17937  {
17938  VkMemoryRequirements memReq = {};
17939  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17940  hDev, hImage, &memReq);
17941 
17942  res = vmaFindMemoryTypeIndex(
17943  allocator,
17944  memReq.memoryTypeBits,
17945  pAllocationCreateInfo,
17946  pMemoryTypeIndex);
17947 
17948  allocator->GetVulkanFunctions().vkDestroyImage(
17949  hDev, hImage, allocator->GetAllocationCallbacks());
17950  }
17951  return res;
17952 }
17953 
17954 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17955  VmaAllocator allocator,
17956  const VmaPoolCreateInfo* pCreateInfo,
17957  VmaPool* pPool)
17958 {
17959  VMA_ASSERT(allocator && pCreateInfo && pPool);
17960 
17961  VMA_DEBUG_LOG("vmaCreatePool");
17962 
17963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17964 
17965  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17966 
17967 #if VMA_RECORDING_ENABLED
17968  if(allocator->GetRecorder() != VMA_NULL)
17969  {
17970  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17971  }
17972 #endif
17973 
17974  return res;
17975 }
17976 
17977 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17978  VmaAllocator allocator,
17979  VmaPool pool)
17980 {
17981  VMA_ASSERT(allocator);
17982 
17983  if(pool == VK_NULL_HANDLE)
17984  {
17985  return;
17986  }
17987 
17988  VMA_DEBUG_LOG("vmaDestroyPool");
17989 
17990  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17991 
17992 #if VMA_RECORDING_ENABLED
17993  if(allocator->GetRecorder() != VMA_NULL)
17994  {
17995  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17996  }
17997 #endif
17998 
17999  allocator->DestroyPool(pool);
18000 }
18001 
18002 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18003  VmaAllocator allocator,
18004  VmaPool pool,
18005  VmaPoolStats* pPoolStats)
18006 {
18007  VMA_ASSERT(allocator && pool && pPoolStats);
18008 
18009  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18010 
18011  allocator->GetPoolStats(pool, pPoolStats);
18012 }
18013 
18014 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18015  VmaAllocator allocator,
18016  VmaPool pool,
18017  size_t* pLostAllocationCount)
18018 {
18019  VMA_ASSERT(allocator && pool);
18020 
18021  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18022 
18023 #if VMA_RECORDING_ENABLED
18024  if(allocator->GetRecorder() != VMA_NULL)
18025  {
18026  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18027  }
18028 #endif
18029 
18030  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18031 }
18032 
18033 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18034 {
18035  VMA_ASSERT(allocator && pool);
18036 
18037  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18038 
18039  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18040 
18041  return allocator->CheckPoolCorruption(pool);
18042 }
18043 
18044 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18045  VmaAllocator allocator,
18046  VmaPool pool,
18047  const char** ppName)
18048 {
18049  VMA_ASSERT(allocator && pool && ppName);
18050 
18051  VMA_DEBUG_LOG("vmaGetPoolName");
18052 
18053  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18054 
18055  *ppName = pool->GetName();
18056 }
18057 
18058 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18059  VmaAllocator allocator,
18060  VmaPool pool,
18061  const char* pName)
18062 {
18063  VMA_ASSERT(allocator && pool);
18064 
18065  VMA_DEBUG_LOG("vmaSetPoolName");
18066 
18067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18068 
18069  pool->SetName(pName);
18070 
18071 #if VMA_RECORDING_ENABLED
18072  if(allocator->GetRecorder() != VMA_NULL)
18073  {
18074  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18075  }
18076 #endif
18077 }
18078 
18079 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18080  VmaAllocator allocator,
18081  const VkMemoryRequirements* pVkMemoryRequirements,
18082  const VmaAllocationCreateInfo* pCreateInfo,
18083  VmaAllocation* pAllocation,
18084  VmaAllocationInfo* pAllocationInfo)
18085 {
18086  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18087 
18088  VMA_DEBUG_LOG("vmaAllocateMemory");
18089 
18090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18091 
18092  VkResult result = allocator->AllocateMemory(
18093  *pVkMemoryRequirements,
18094  false, // requiresDedicatedAllocation
18095  false, // prefersDedicatedAllocation
18096  VK_NULL_HANDLE, // dedicatedBuffer
18097  UINT32_MAX, // dedicatedBufferUsage
18098  VK_NULL_HANDLE, // dedicatedImage
18099  *pCreateInfo,
18100  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18101  1, // allocationCount
18102  pAllocation);
18103 
18104 #if VMA_RECORDING_ENABLED
18105  if(allocator->GetRecorder() != VMA_NULL)
18106  {
18107  allocator->GetRecorder()->RecordAllocateMemory(
18108  allocator->GetCurrentFrameIndex(),
18109  *pVkMemoryRequirements,
18110  *pCreateInfo,
18111  *pAllocation);
18112  }
18113 #endif
18114 
18115  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18116  {
18117  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18118  }
18119 
18120  return result;
18121 }
18122 
18123 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18124  VmaAllocator allocator,
18125  const VkMemoryRequirements* pVkMemoryRequirements,
18126  const VmaAllocationCreateInfo* pCreateInfo,
18127  size_t allocationCount,
18128  VmaAllocation* pAllocations,
18129  VmaAllocationInfo* pAllocationInfo)
18130 {
18131  if(allocationCount == 0)
18132  {
18133  return VK_SUCCESS;
18134  }
18135 
18136  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18137 
18138  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18139 
18140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18141 
18142  VkResult result = allocator->AllocateMemory(
18143  *pVkMemoryRequirements,
18144  false, // requiresDedicatedAllocation
18145  false, // prefersDedicatedAllocation
18146  VK_NULL_HANDLE, // dedicatedBuffer
18147  UINT32_MAX, // dedicatedBufferUsage
18148  VK_NULL_HANDLE, // dedicatedImage
18149  *pCreateInfo,
18150  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18151  allocationCount,
18152  pAllocations);
18153 
18154 #if VMA_RECORDING_ENABLED
18155  if(allocator->GetRecorder() != VMA_NULL)
18156  {
18157  allocator->GetRecorder()->RecordAllocateMemoryPages(
18158  allocator->GetCurrentFrameIndex(),
18159  *pVkMemoryRequirements,
18160  *pCreateInfo,
18161  (uint64_t)allocationCount,
18162  pAllocations);
18163  }
18164 #endif
18165 
18166  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18167  {
18168  for(size_t i = 0; i < allocationCount; ++i)
18169  {
18170  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18171  }
18172  }
18173 
18174  return result;
18175 }
18176 
18177 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18178  VmaAllocator allocator,
18179  VkBuffer buffer,
18180  const VmaAllocationCreateInfo* pCreateInfo,
18181  VmaAllocation* pAllocation,
18182  VmaAllocationInfo* pAllocationInfo)
18183 {
18184  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18185 
18186  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18187 
18188  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18189 
18190  VkMemoryRequirements vkMemReq = {};
18191  bool requiresDedicatedAllocation = false;
18192  bool prefersDedicatedAllocation = false;
18193  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18194  requiresDedicatedAllocation,
18195  prefersDedicatedAllocation);
18196 
18197  VkResult result = allocator->AllocateMemory(
18198  vkMemReq,
18199  requiresDedicatedAllocation,
18200  prefersDedicatedAllocation,
18201  buffer, // dedicatedBuffer
18202  UINT32_MAX, // dedicatedBufferUsage
18203  VK_NULL_HANDLE, // dedicatedImage
18204  *pCreateInfo,
18205  VMA_SUBALLOCATION_TYPE_BUFFER,
18206  1, // allocationCount
18207  pAllocation);
18208 
18209 #if VMA_RECORDING_ENABLED
18210  if(allocator->GetRecorder() != VMA_NULL)
18211  {
18212  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18213  allocator->GetCurrentFrameIndex(),
18214  vkMemReq,
18215  requiresDedicatedAllocation,
18216  prefersDedicatedAllocation,
18217  *pCreateInfo,
18218  *pAllocation);
18219  }
18220 #endif
18221 
18222  if(pAllocationInfo && result == VK_SUCCESS)
18223  {
18224  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18225  }
18226 
18227  return result;
18228 }
18229 
18230 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18231  VmaAllocator allocator,
18232  VkImage image,
18233  const VmaAllocationCreateInfo* pCreateInfo,
18234  VmaAllocation* pAllocation,
18235  VmaAllocationInfo* pAllocationInfo)
18236 {
18237  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18238 
18239  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18240 
18241  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18242 
18243  VkMemoryRequirements vkMemReq = {};
18244  bool requiresDedicatedAllocation = false;
18245  bool prefersDedicatedAllocation = false;
18246  allocator->GetImageMemoryRequirements(image, vkMemReq,
18247  requiresDedicatedAllocation, prefersDedicatedAllocation);
18248 
18249  VkResult result = allocator->AllocateMemory(
18250  vkMemReq,
18251  requiresDedicatedAllocation,
18252  prefersDedicatedAllocation,
18253  VK_NULL_HANDLE, // dedicatedBuffer
18254  UINT32_MAX, // dedicatedBufferUsage
18255  image, // dedicatedImage
18256  *pCreateInfo,
18257  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18258  1, // allocationCount
18259  pAllocation);
18260 
18261 #if VMA_RECORDING_ENABLED
18262  if(allocator->GetRecorder() != VMA_NULL)
18263  {
18264  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18265  allocator->GetCurrentFrameIndex(),
18266  vkMemReq,
18267  requiresDedicatedAllocation,
18268  prefersDedicatedAllocation,
18269  *pCreateInfo,
18270  *pAllocation);
18271  }
18272 #endif
18273 
18274  if(pAllocationInfo && result == VK_SUCCESS)
18275  {
18276  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18277  }
18278 
18279  return result;
18280 }
18281 
18282 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18283  VmaAllocator allocator,
18284  VmaAllocation allocation)
18285 {
18286  VMA_ASSERT(allocator);
18287 
18288  if(allocation == VK_NULL_HANDLE)
18289  {
18290  return;
18291  }
18292 
18293  VMA_DEBUG_LOG("vmaFreeMemory");
18294 
18295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18296 
18297 #if VMA_RECORDING_ENABLED
18298  if(allocator->GetRecorder() != VMA_NULL)
18299  {
18300  allocator->GetRecorder()->RecordFreeMemory(
18301  allocator->GetCurrentFrameIndex(),
18302  allocation);
18303  }
18304 #endif
18305 
18306  allocator->FreeMemory(
18307  1, // allocationCount
18308  &allocation);
18309 }
18310 
18311 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18312  VmaAllocator allocator,
18313  size_t allocationCount,
18314  const VmaAllocation* pAllocations)
18315 {
18316  if(allocationCount == 0)
18317  {
18318  return;
18319  }
18320 
18321  VMA_ASSERT(allocator);
18322 
18323  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18324 
18325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18326 
18327 #if VMA_RECORDING_ENABLED
18328  if(allocator->GetRecorder() != VMA_NULL)
18329  {
18330  allocator->GetRecorder()->RecordFreeMemoryPages(
18331  allocator->GetCurrentFrameIndex(),
18332  (uint64_t)allocationCount,
18333  pAllocations);
18334  }
18335 #endif
18336 
18337  allocator->FreeMemory(allocationCount, pAllocations);
18338 }
18339 
18340 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
18341  VmaAllocator allocator,
18342  VmaAllocation allocation,
18343  VkDeviceSize newSize)
18344 {
18345  VMA_ASSERT(allocator && allocation);
18346 
18347  VMA_DEBUG_LOG("vmaResizeAllocation");
18348 
18349  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18350 
18351  return allocator->ResizeAllocation(allocation, newSize);
18352 }
18353 
18354 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18355  VmaAllocator allocator,
18356  VmaAllocation allocation,
18357  VmaAllocationInfo* pAllocationInfo)
18358 {
18359  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18360 
18361  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18362 
18363 #if VMA_RECORDING_ENABLED
18364  if(allocator->GetRecorder() != VMA_NULL)
18365  {
18366  allocator->GetRecorder()->RecordGetAllocationInfo(
18367  allocator->GetCurrentFrameIndex(),
18368  allocation);
18369  }
18370 #endif
18371 
18372  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18373 }
18374 
18375 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18376  VmaAllocator allocator,
18377  VmaAllocation allocation)
18378 {
18379  VMA_ASSERT(allocator && allocation);
18380 
18381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18382 
18383 #if VMA_RECORDING_ENABLED
18384  if(allocator->GetRecorder() != VMA_NULL)
18385  {
18386  allocator->GetRecorder()->RecordTouchAllocation(
18387  allocator->GetCurrentFrameIndex(),
18388  allocation);
18389  }
18390 #endif
18391 
18392  return allocator->TouchAllocation(allocation);
18393 }
18394 
18395 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18396  VmaAllocator allocator,
18397  VmaAllocation allocation,
18398  void* pUserData)
18399 {
18400  VMA_ASSERT(allocator && allocation);
18401 
18402  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18403 
18404  allocation->SetUserData(allocator, pUserData);
18405 
18406 #if VMA_RECORDING_ENABLED
18407  if(allocator->GetRecorder() != VMA_NULL)
18408  {
18409  allocator->GetRecorder()->RecordSetAllocationUserData(
18410  allocator->GetCurrentFrameIndex(),
18411  allocation,
18412  pUserData);
18413  }
18414 #endif
18415 }
18416 
18417 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18418  VmaAllocator allocator,
18419  VmaAllocation* pAllocation)
18420 {
18421  VMA_ASSERT(allocator && pAllocation);
18422 
18423  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18424 
18425  allocator->CreateLostAllocation(pAllocation);
18426 
18427 #if VMA_RECORDING_ENABLED
18428  if(allocator->GetRecorder() != VMA_NULL)
18429  {
18430  allocator->GetRecorder()->RecordCreateLostAllocation(
18431  allocator->GetCurrentFrameIndex(),
18432  *pAllocation);
18433  }
18434 #endif
18435 }
18436 
18437 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18438  VmaAllocator allocator,
18439  VmaAllocation allocation,
18440  void** ppData)
18441 {
18442  VMA_ASSERT(allocator && allocation && ppData);
18443 
18444  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18445 
18446  VkResult res = allocator->Map(allocation, ppData);
18447 
18448 #if VMA_RECORDING_ENABLED
18449  if(allocator->GetRecorder() != VMA_NULL)
18450  {
18451  allocator->GetRecorder()->RecordMapMemory(
18452  allocator->GetCurrentFrameIndex(),
18453  allocation);
18454  }
18455 #endif
18456 
18457  return res;
18458 }
18459 
18460 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18461  VmaAllocator allocator,
18462  VmaAllocation allocation)
18463 {
18464  VMA_ASSERT(allocator && allocation);
18465 
18466  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18467 
18468 #if VMA_RECORDING_ENABLED
18469  if(allocator->GetRecorder() != VMA_NULL)
18470  {
18471  allocator->GetRecorder()->RecordUnmapMemory(
18472  allocator->GetCurrentFrameIndex(),
18473  allocation);
18474  }
18475 #endif
18476 
18477  allocator->Unmap(allocation);
18478 }
18479 
18480 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18481 {
18482  VMA_ASSERT(allocator && allocation);
18483 
18484  VMA_DEBUG_LOG("vmaFlushAllocation");
18485 
18486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18487 
18488  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18489 
18490 #if VMA_RECORDING_ENABLED
18491  if(allocator->GetRecorder() != VMA_NULL)
18492  {
18493  allocator->GetRecorder()->RecordFlushAllocation(
18494  allocator->GetCurrentFrameIndex(),
18495  allocation, offset, size);
18496  }
18497 #endif
18498 
18499  return res;
18500 }
18501 
18502 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18503 {
18504  VMA_ASSERT(allocator && allocation);
18505 
18506  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18507 
18508  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18509 
18510  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18511 
18512 #if VMA_RECORDING_ENABLED
18513  if(allocator->GetRecorder() != VMA_NULL)
18514  {
18515  allocator->GetRecorder()->RecordInvalidateAllocation(
18516  allocator->GetCurrentFrameIndex(),
18517  allocation, offset, size);
18518  }
18519 #endif
18520 
18521  return res;
18522 }
18523 
18524 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
18525  VmaAllocator allocator,
18526  uint32_t allocationCount,
18527  const VmaAllocation* allocations,
18528  const VkDeviceSize* offsets,
18529  const VkDeviceSize* sizes)
18530 {
18531  VMA_ASSERT(allocator);
18532 
18533  if(allocationCount == 0)
18534  {
18535  return VK_SUCCESS;
18536  }
18537 
18538  VMA_ASSERT(allocations);
18539 
18540  VMA_DEBUG_LOG("vmaFlushAllocations");
18541 
18542  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18543 
18544  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
18545 
18546 #if VMA_RECORDING_ENABLED
18547  if(allocator->GetRecorder() != VMA_NULL)
18548  {
18549  //TODO
18550  }
18551 #endif
18552 
18553  return res;
18554 }
18555 
18556 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
18557  VmaAllocator allocator,
18558  uint32_t allocationCount,
18559  const VmaAllocation* allocations,
18560  const VkDeviceSize* offsets,
18561  const VkDeviceSize* sizes)
18562 {
18563  VMA_ASSERT(allocator);
18564 
18565  if(allocationCount == 0)
18566  {
18567  return VK_SUCCESS;
18568  }
18569 
18570  VMA_ASSERT(allocations);
18571 
18572  VMA_DEBUG_LOG("vmaInvalidateAllocations");
18573 
18574  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18575 
18576  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
18577 
18578 #if VMA_RECORDING_ENABLED
18579  if(allocator->GetRecorder() != VMA_NULL)
18580  {
18581  //TODO
18582  }
18583 #endif
18584 
18585  return res;
18586 }
18587 
18588 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18589 {
18590  VMA_ASSERT(allocator);
18591 
18592  VMA_DEBUG_LOG("vmaCheckCorruption");
18593 
18594  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18595 
18596  return allocator->CheckCorruption(memoryTypeBits);
18597 }
18598 
18599 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18600  VmaAllocator allocator,
18601  const VmaAllocation* pAllocations,
18602  size_t allocationCount,
18603  VkBool32* pAllocationsChanged,
18604  const VmaDefragmentationInfo *pDefragmentationInfo,
18605  VmaDefragmentationStats* pDefragmentationStats)
18606 {
18607  // Deprecated interface, reimplemented using new one.
18608 
18609  VmaDefragmentationInfo2 info2 = {};
18610  info2.allocationCount = (uint32_t)allocationCount;
18611  info2.pAllocations = pAllocations;
18612  info2.pAllocationsChanged = pAllocationsChanged;
18613  if(pDefragmentationInfo != VMA_NULL)
18614  {
18615  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18616  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18617  }
18618  else
18619  {
18620  info2.maxCpuAllocationsToMove = UINT32_MAX;
18621  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18622  }
18623  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18624 
18626  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18627  if(res == VK_NOT_READY)
18628  {
18629  res = vmaDefragmentationEnd( allocator, ctx);
18630  }
18631  return res;
18632 }
18633 
18634 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18635  VmaAllocator allocator,
18636  const VmaDefragmentationInfo2* pInfo,
18637  VmaDefragmentationStats* pStats,
18638  VmaDefragmentationContext *pContext)
18639 {
18640  VMA_ASSERT(allocator && pInfo && pContext);
18641 
18642  // Degenerate case: Nothing to defragment.
18643  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18644  {
18645  return VK_SUCCESS;
18646  }
18647 
18648  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18649  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18650  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18651  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18652 
18653  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18654 
18655  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18656 
18657  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18658 
18659 #if VMA_RECORDING_ENABLED
18660  if(allocator->GetRecorder() != VMA_NULL)
18661  {
18662  allocator->GetRecorder()->RecordDefragmentationBegin(
18663  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18664  }
18665 #endif
18666 
18667  return res;
18668 }
18669 
18670 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18671  VmaAllocator allocator,
18672  VmaDefragmentationContext context)
18673 {
18674  VMA_ASSERT(allocator);
18675 
18676  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18677 
18678  if(context != VK_NULL_HANDLE)
18679  {
18680  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18681 
18682 #if VMA_RECORDING_ENABLED
18683  if(allocator->GetRecorder() != VMA_NULL)
18684  {
18685  allocator->GetRecorder()->RecordDefragmentationEnd(
18686  allocator->GetCurrentFrameIndex(), context);
18687  }
18688 #endif
18689 
18690  return allocator->DefragmentationEnd(context);
18691  }
18692  else
18693  {
18694  return VK_SUCCESS;
18695  }
18696 }
18697 
18698 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18699  VmaAllocator allocator,
18700  VmaDefragmentationContext context,
18702  )
18703 {
18704  VMA_ASSERT(allocator);
18705  VMA_ASSERT(pInfo);
18706 
18707  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18708 
18709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18710 
18711  if(context == VK_NULL_HANDLE)
18712  {
18713  pInfo->moveCount = 0;
18714  return VK_SUCCESS;
18715  }
18716 
18717  return allocator->DefragmentationPassBegin(pInfo, context);
18718 }
18719 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18720  VmaAllocator allocator,
18721  VmaDefragmentationContext context)
18722 {
18723  VMA_ASSERT(allocator);
18724 
18725  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18726  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18727 
18728  if(context == VK_NULL_HANDLE)
18729  return VK_SUCCESS;
18730 
18731  return allocator->DefragmentationPassEnd(context);
18732 }
18733 
18734 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18735  VmaAllocator allocator,
18736  VmaAllocation allocation,
18737  VkBuffer buffer)
18738 {
18739  VMA_ASSERT(allocator && allocation && buffer);
18740 
18741  VMA_DEBUG_LOG("vmaBindBufferMemory");
18742 
18743  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18744 
18745  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18746 }
18747 
18748 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18749  VmaAllocator allocator,
18750  VmaAllocation allocation,
18751  VkDeviceSize allocationLocalOffset,
18752  VkBuffer buffer,
18753  const void* pNext)
18754 {
18755  VMA_ASSERT(allocator && allocation && buffer);
18756 
18757  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18758 
18759  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18760 
18761  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18762 }
18763 
18764 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18765  VmaAllocator allocator,
18766  VmaAllocation allocation,
18767  VkImage image)
18768 {
18769  VMA_ASSERT(allocator && allocation && image);
18770 
18771  VMA_DEBUG_LOG("vmaBindImageMemory");
18772 
18773  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18774 
18775  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18776 }
18777 
18778 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18779  VmaAllocator allocator,
18780  VmaAllocation allocation,
18781  VkDeviceSize allocationLocalOffset,
18782  VkImage image,
18783  const void* pNext)
18784 {
18785  VMA_ASSERT(allocator && allocation && image);
18786 
18787  VMA_DEBUG_LOG("vmaBindImageMemory2");
18788 
18789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18790 
18791  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18792 }
18793 
18794 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18795  VmaAllocator allocator,
18796  const VkBufferCreateInfo* pBufferCreateInfo,
18797  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18798  VkBuffer* pBuffer,
18799  VmaAllocation* pAllocation,
18800  VmaAllocationInfo* pAllocationInfo)
18801 {
18802  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18803 
18804  if(pBufferCreateInfo->size == 0)
18805  {
18806  return VK_ERROR_VALIDATION_FAILED_EXT;
18807  }
18808  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18809  !allocator->m_UseKhrBufferDeviceAddress)
18810  {
18811  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18812  return VK_ERROR_VALIDATION_FAILED_EXT;
18813  }
18814 
18815  VMA_DEBUG_LOG("vmaCreateBuffer");
18816 
18817  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18818 
18819  *pBuffer = VK_NULL_HANDLE;
18820  *pAllocation = VK_NULL_HANDLE;
18821 
18822  // 1. Create VkBuffer.
18823  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18824  allocator->m_hDevice,
18825  pBufferCreateInfo,
18826  allocator->GetAllocationCallbacks(),
18827  pBuffer);
18828  if(res >= 0)
18829  {
18830  // 2. vkGetBufferMemoryRequirements.
18831  VkMemoryRequirements vkMemReq = {};
18832  bool requiresDedicatedAllocation = false;
18833  bool prefersDedicatedAllocation = false;
18834  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18835  requiresDedicatedAllocation, prefersDedicatedAllocation);
18836 
18837  // 3. Allocate memory using allocator.
18838  res = allocator->AllocateMemory(
18839  vkMemReq,
18840  requiresDedicatedAllocation,
18841  prefersDedicatedAllocation,
18842  *pBuffer, // dedicatedBuffer
18843  pBufferCreateInfo->usage, // dedicatedBufferUsage
18844  VK_NULL_HANDLE, // dedicatedImage
18845  *pAllocationCreateInfo,
18846  VMA_SUBALLOCATION_TYPE_BUFFER,
18847  1, // allocationCount
18848  pAllocation);
18849 
18850 #if VMA_RECORDING_ENABLED
18851  if(allocator->GetRecorder() != VMA_NULL)
18852  {
18853  allocator->GetRecorder()->RecordCreateBuffer(
18854  allocator->GetCurrentFrameIndex(),
18855  *pBufferCreateInfo,
18856  *pAllocationCreateInfo,
18857  *pAllocation);
18858  }
18859 #endif
18860 
18861  if(res >= 0)
18862  {
18863  // 3. Bind buffer with memory.
18864  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18865  {
18866  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18867  }
18868  if(res >= 0)
18869  {
18870  // All steps succeeded.
18871  #if VMA_STATS_STRING_ENABLED
18872  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18873  #endif
18874  if(pAllocationInfo != VMA_NULL)
18875  {
18876  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18877  }
18878 
18879  return VK_SUCCESS;
18880  }
18881  allocator->FreeMemory(
18882  1, // allocationCount
18883  pAllocation);
18884  *pAllocation = VK_NULL_HANDLE;
18885  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18886  *pBuffer = VK_NULL_HANDLE;
18887  return res;
18888  }
18889  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18890  *pBuffer = VK_NULL_HANDLE;
18891  return res;
18892  }
18893  return res;
18894 }
18895 
18896 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18897  VmaAllocator allocator,
18898  VkBuffer buffer,
18899  VmaAllocation allocation)
18900 {
18901  VMA_ASSERT(allocator);
18902 
18903  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18904  {
18905  return;
18906  }
18907 
18908  VMA_DEBUG_LOG("vmaDestroyBuffer");
18909 
18910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18911 
18912 #if VMA_RECORDING_ENABLED
18913  if(allocator->GetRecorder() != VMA_NULL)
18914  {
18915  allocator->GetRecorder()->RecordDestroyBuffer(
18916  allocator->GetCurrentFrameIndex(),
18917  allocation);
18918  }
18919 #endif
18920 
18921  if(buffer != VK_NULL_HANDLE)
18922  {
18923  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18924  }
18925 
18926  if(allocation != VK_NULL_HANDLE)
18927  {
18928  allocator->FreeMemory(
18929  1, // allocationCount
18930  &allocation);
18931  }
18932 }
18933 
18934 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18935  VmaAllocator allocator,
18936  const VkImageCreateInfo* pImageCreateInfo,
18937  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18938  VkImage* pImage,
18939  VmaAllocation* pAllocation,
18940  VmaAllocationInfo* pAllocationInfo)
18941 {
18942  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18943 
18944  if(pImageCreateInfo->extent.width == 0 ||
18945  pImageCreateInfo->extent.height == 0 ||
18946  pImageCreateInfo->extent.depth == 0 ||
18947  pImageCreateInfo->mipLevels == 0 ||
18948  pImageCreateInfo->arrayLayers == 0)
18949  {
18950  return VK_ERROR_VALIDATION_FAILED_EXT;
18951  }
18952 
18953  VMA_DEBUG_LOG("vmaCreateImage");
18954 
18955  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18956 
18957  *pImage = VK_NULL_HANDLE;
18958  *pAllocation = VK_NULL_HANDLE;
18959 
18960  // 1. Create VkImage.
18961  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18962  allocator->m_hDevice,
18963  pImageCreateInfo,
18964  allocator->GetAllocationCallbacks(),
18965  pImage);
18966  if(res >= 0)
18967  {
18968  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18969  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18970  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18971 
18972  // 2. Allocate memory using allocator.
18973  VkMemoryRequirements vkMemReq = {};
18974  bool requiresDedicatedAllocation = false;
18975  bool prefersDedicatedAllocation = false;
18976  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18977  requiresDedicatedAllocation, prefersDedicatedAllocation);
18978 
18979  res = allocator->AllocateMemory(
18980  vkMemReq,
18981  requiresDedicatedAllocation,
18982  prefersDedicatedAllocation,
18983  VK_NULL_HANDLE, // dedicatedBuffer
18984  UINT32_MAX, // dedicatedBufferUsage
18985  *pImage, // dedicatedImage
18986  *pAllocationCreateInfo,
18987  suballocType,
18988  1, // allocationCount
18989  pAllocation);
18990 
18991 #if VMA_RECORDING_ENABLED
18992  if(allocator->GetRecorder() != VMA_NULL)
18993  {
18994  allocator->GetRecorder()->RecordCreateImage(
18995  allocator->GetCurrentFrameIndex(),
18996  *pImageCreateInfo,
18997  *pAllocationCreateInfo,
18998  *pAllocation);
18999  }
19000 #endif
19001 
19002  if(res >= 0)
19003  {
19004  // 3. Bind image with memory.
19005  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19006  {
19007  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19008  }
19009  if(res >= 0)
19010  {
19011  // All steps succeeded.
19012  #if VMA_STATS_STRING_ENABLED
19013  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19014  #endif
19015  if(pAllocationInfo != VMA_NULL)
19016  {
19017  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19018  }
19019 
19020  return VK_SUCCESS;
19021  }
19022  allocator->FreeMemory(
19023  1, // allocationCount
19024  pAllocation);
19025  *pAllocation = VK_NULL_HANDLE;
19026  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19027  *pImage = VK_NULL_HANDLE;
19028  return res;
19029  }
19030  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19031  *pImage = VK_NULL_HANDLE;
19032  return res;
19033  }
19034  return res;
19035 }
19036 
19037 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19038  VmaAllocator allocator,
19039  VkImage image,
19040  VmaAllocation allocation)
19041 {
19042  VMA_ASSERT(allocator);
19043 
19044  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19045  {
19046  return;
19047  }
19048 
19049  VMA_DEBUG_LOG("vmaDestroyImage");
19050 
19051  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19052 
19053 #if VMA_RECORDING_ENABLED
19054  if(allocator->GetRecorder() != VMA_NULL)
19055  {
19056  allocator->GetRecorder()->RecordDestroyImage(
19057  allocator->GetCurrentFrameIndex(),
19058  allocation);
19059  }
19060 #endif
19061 
19062  if(image != VK_NULL_HANDLE)
19063  {
19064  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19065  }
19066  if(allocation != VK_NULL_HANDLE)
19067  {
19068  allocator->FreeMemory(
19069  1, // allocationCount
19070  &allocation);
19071  }
19072 }
19073 
19074 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2255
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2212
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2100
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2249
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2210
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2275
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2872
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3485
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2301
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2163
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2474
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3553
VmaDefragmentationInfo2::pPools
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3519
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2618
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2944
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2701
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2111
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2950
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2681
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2241
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2096
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2668
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2281
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2247
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2215
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3087
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2441
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2775
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2447
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2214
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1912
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3554
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3563
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2712
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2373
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2498
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2226
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2270
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2642
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2383
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3471
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2963
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2705
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2136
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3580
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2696
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2557
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2437
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2900
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2912
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3503
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3551
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2953
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2203
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2201
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3469
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3092
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2624
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2211
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2893
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaInvalidateAllocations
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2768
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2455
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2631
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2151
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3482
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2219
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3534
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3584
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2947
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2213
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2251
vmaFlushAllocation
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2588
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2610
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3470
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2339
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2218
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2220
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3548
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2452
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2917
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2353
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2435
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3588
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3562
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2578
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaFlushAllocations
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2445
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2199
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2443
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2284
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2106
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2925
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2897
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2098
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2604
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2675
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2903
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2620
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2439
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2217
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3479
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3575
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2477
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2368
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2891
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2749
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2722
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2446
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2287
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2726
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2278
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2326
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3552
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2509
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2209
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3112
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2272
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3473
VmaDefragmentationInfo2::pAllocations
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3494
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2224
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2102
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2447
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2595
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2719
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2716
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2181
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2744
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2446
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3103
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2257
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2216
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3117
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2855
vmaInvalidateAllocation
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2454
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2332
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2966
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3561
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2453
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2733
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2344
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2561
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3539
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2223
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2939
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2225
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3524
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3078
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2655
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2222
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2075
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2956
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2837
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3586
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2447
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2686
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2082
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2738
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2221
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2736
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2754
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3500
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3590
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2762
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2378
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3592
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2265
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2446
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3073
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2883
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2488
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2731
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2887
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3529
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3570
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2692
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.