Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1889 /*
1890 Define this macro to 0/1 to disable/enable support for recording functionality,
1891 available through VmaAllocatorCreateInfo::pRecordSettings.
1892 */
1893 #ifndef VMA_RECORDING_ENABLED
1894  #define VMA_RECORDING_ENABLED 0
1895 #endif
1896 
1897 #ifndef NOMINMAX
1898  #define NOMINMAX // For windows.h
1899 #endif
1900 
1901 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
1902  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
1903  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
1904  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1905  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1906  extern PFN_vkAllocateMemory vkAllocateMemory;
1907  extern PFN_vkFreeMemory vkFreeMemory;
1908  extern PFN_vkMapMemory vkMapMemory;
1909  extern PFN_vkUnmapMemory vkUnmapMemory;
1910  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1911  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1912  extern PFN_vkBindBufferMemory vkBindBufferMemory;
1913  extern PFN_vkBindImageMemory vkBindImageMemory;
1914  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1915  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1916  extern PFN_vkCreateBuffer vkCreateBuffer;
1917  extern PFN_vkDestroyBuffer vkDestroyBuffer;
1918  extern PFN_vkCreateImage vkCreateImage;
1919  extern PFN_vkDestroyImage vkDestroyImage;
1920  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1921  #if VMA_VULKAN_VERSION >= 1001000
1922  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
1923  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
1924  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
1925  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
1926  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
1927  #endif // #if VMA_VULKAN_VERSION >= 1001000
1928 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
1929 
1930 #ifndef VULKAN_H_
1931  #include <vulkan/vulkan.h>
1932 #endif
1933 
1934 #if VMA_RECORDING_ENABLED
1935  #if defined(_WIN32)
1936  #include <windows.h>
1937  #else
1938  #error VMA Recording functionality is not yet available for non-Windows platforms
1939  #endif
1940 #endif
1941 
1942 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1943 // where AAA = major, BBB = minor, CCC = patch.
1944 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1945 #if !defined(VMA_VULKAN_VERSION)
1946  #if defined(VK_VERSION_1_2)
1947  #define VMA_VULKAN_VERSION 1002000
1948  #elif defined(VK_VERSION_1_1)
1949  #define VMA_VULKAN_VERSION 1001000
1950  #else
1951  #define VMA_VULKAN_VERSION 1000000
1952  #endif
1953 #endif
1954 
1955 #if !defined(VMA_DEDICATED_ALLOCATION)
1956  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1957  #define VMA_DEDICATED_ALLOCATION 1
1958  #else
1959  #define VMA_DEDICATED_ALLOCATION 0
1960  #endif
1961 #endif
1962 
1963 #if !defined(VMA_BIND_MEMORY2)
1964  #if VK_KHR_bind_memory2
1965  #define VMA_BIND_MEMORY2 1
1966  #else
1967  #define VMA_BIND_MEMORY2 0
1968  #endif
1969 #endif
1970 
1971 #if !defined(VMA_MEMORY_BUDGET)
1972  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1973  #define VMA_MEMORY_BUDGET 1
1974  #else
1975  #define VMA_MEMORY_BUDGET 0
1976  #endif
1977 #endif
1978 
1979 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
1980 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
1981  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
1982  #define VMA_BUFFER_DEVICE_ADDRESS 1
1983  #else
1984  #define VMA_BUFFER_DEVICE_ADDRESS 0
1985  #endif
1986 #endif
1987 
1988 // Define these macros to decorate all public functions with additional code,
1989 // before and after returned type, appropriately. This may be useful for
1990 // exporing the functions when compiling VMA as a separate library. Example:
1991 // #define VMA_CALL_PRE __declspec(dllexport)
1992 // #define VMA_CALL_POST __cdecl
1993 #ifndef VMA_CALL_PRE
1994  #define VMA_CALL_PRE
1995 #endif
1996 #ifndef VMA_CALL_POST
1997  #define VMA_CALL_POST
1998 #endif
1999 
2000 // Define this macro to decorate pointers with an attribute specifying the
2001 // length of the array they point to if they are not null.
2002 //
2003 // The length may be one of
2004 // - The name of another parameter in the argument list where the pointer is declared
2005 // - The name of another member in the struct where the pointer is declared
2006 // - The name of a member of a struct type, meaning the value of that member in
2007 // the context of the call. For example
2008 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2009 // this means the number of memory heaps available in the device associated
2010 // with the VmaAllocator being dealt with.
2011 #ifndef VMA_LEN_IF_NOT_NULL
2012  #define VMA_LEN_IF_NOT_NULL(len)
2013 #endif
2014 
2015 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2016 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2017 #ifndef VMA_NULLABLE
2018  #ifdef __clang__
2019  #define VMA_NULLABLE _Nullable
2020  #else
2021  #define VMA_NULLABLE
2022  #endif
2023 #endif
2024 
2025 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2026 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2027 #ifndef VMA_NOT_NULL
2028  #ifdef __clang__
2029  #define VMA_NOT_NULL _Nonnull
2030  #else
2031  #define VMA_NOT_NULL
2032  #endif
2033 #endif
2034 
2035 // If non-dispatchable handles are represented as pointers then we can give
2036 // then nullability annotations
2037 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2038  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2039  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2040  #else
2041  #define VMA_NOT_NULL_NON_DISPATCHABLE
2042  #endif
2043 #endif
2044 
2045 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2046  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2047  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2048  #else
2049  #define VMA_NULLABLE_NON_DISPATCHABLE
2050  #endif
2051 #endif
2052 
2062 VK_DEFINE_HANDLE(VmaAllocator)
2063 
2064 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2066  VmaAllocator VMA_NOT_NULL allocator,
2067  uint32_t memoryType,
2068  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2069  VkDeviceSize size,
2070  void* VMA_NULLABLE pUserData);
2072 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2073  VmaAllocator VMA_NOT_NULL allocator,
2074  uint32_t memoryType,
2075  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2076  VkDeviceSize size,
2077  void* VMA_NULLABLE pUserData);
2078 
2092  void* VMA_NULLABLE pUserData;
2094 
2190 
2193 typedef VkFlags VmaAllocatorCreateFlags;
2194 
2199 typedef struct VmaVulkanFunctions {
2200  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2201  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2202  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2203  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2204  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2205  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2206  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2207  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2208  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2209  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2210  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2211  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2212  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2213  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2214  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2215  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2216  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2217 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2218  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2219  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2220 #endif
2221 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2222  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2223  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2224 #endif
2225 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2226  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2227 #endif
2229 
2231 typedef enum VmaRecordFlagBits {
2238 
2241 typedef VkFlags VmaRecordFlags;
2242 
2244 typedef struct VmaRecordSettings
2245 {
2255  const char* VMA_NOT_NULL pFilePath;
2257 
2260 {
2264 
2265  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2267 
2268  VkDevice VMA_NOT_NULL device;
2270 
2273 
2274  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2276 
2316  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2317 
2329  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2334  VkInstance VMA_NOT_NULL_NON_DISPATCHABLE instance;
2345 
2347 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2348  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2349  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2350 
2352 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2353  VmaAllocator VMA_NULLABLE allocator);
2354 
2357 typedef struct VmaAllocatorInfo
2358 {
2363  VkInstance VMA_NOT_NULL instance;
2368  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2373  VkDevice VMA_NOT_NULL device;
2375 
2381 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2382 
2387 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2388  VmaAllocator VMA_NOT_NULL allocator,
2389  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2390 
2395 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2396  VmaAllocator VMA_NOT_NULL allocator,
2397  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2398 
2405 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2406  VmaAllocator VMA_NOT_NULL allocator,
2407  uint32_t memoryTypeIndex,
2408  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2409 
2418 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2419  VmaAllocator VMA_NOT_NULL allocator,
2420  uint32_t frameIndex);
2421 
2424 typedef struct VmaStatInfo
2425 {
2427  uint32_t blockCount;
2433  VkDeviceSize usedBytes;
2435  VkDeviceSize unusedBytes;
2436  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2437  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2438 } VmaStatInfo;
2439 
2441 typedef struct VmaStats
2442 {
2443  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2444  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2446 } VmaStats;
2447 
2457 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2458  VmaAllocator VMA_NOT_NULL allocator,
2459  VmaStats* VMA_NOT_NULL pStats);
2460 
2463 typedef struct VmaBudget
2464 {
2467  VkDeviceSize blockBytes;
2468 
2478  VkDeviceSize allocationBytes;
2479 
2488  VkDeviceSize usage;
2489 
2499  VkDeviceSize budget;
2500 } VmaBudget;
2501 
2512 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2513  VmaAllocator VMA_NOT_NULL allocator,
2514  VmaBudget* VMA_NOT_NULL pBudget);
2515 
2516 #ifndef VMA_STATS_STRING_ENABLED
2517 #define VMA_STATS_STRING_ENABLED 1
2518 #endif
2519 
2520 #if VMA_STATS_STRING_ENABLED
2521 
2523 
2525 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2526  VmaAllocator VMA_NOT_NULL allocator,
2527  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2528  VkBool32 detailedMap);
2529 
2530 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2531  VmaAllocator VMA_NOT_NULL allocator,
2532  char* VMA_NULLABLE pStatsString);
2533 
2534 #endif // #if VMA_STATS_STRING_ENABLED
2535 
2544 VK_DEFINE_HANDLE(VmaPool)
2545 
2546 typedef enum VmaMemoryUsage
2547 {
2609 
2611 } VmaMemoryUsage;
2612 
2622 
2687 
2703 
2713 
2720 
2724 
2726 {
2739  VkMemoryPropertyFlags requiredFlags;
2744  VkMemoryPropertyFlags preferredFlags;
2752  uint32_t memoryTypeBits;
2758  VmaPool VMA_NULLABLE pool;
2765  void* VMA_NULLABLE pUserData;
2767 
2784 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2785  VmaAllocator VMA_NOT_NULL allocator,
2786  uint32_t memoryTypeBits,
2787  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2788  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2789 
2802 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2803  VmaAllocator VMA_NOT_NULL allocator,
2804  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2805  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2806  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2807 
2820 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2821  VmaAllocator VMA_NOT_NULL allocator,
2822  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2823  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2824  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2825 
2846 
2863 
2874 
2880 
2883 typedef VkFlags VmaPoolCreateFlags;
2884 
2887 typedef struct VmaPoolCreateInfo {
2902  VkDeviceSize blockSize;
2931 
2934 typedef struct VmaPoolStats {
2937  VkDeviceSize size;
2940  VkDeviceSize unusedSize;
2953  VkDeviceSize unusedRangeSizeMax;
2956  size_t blockCount;
2957 } VmaPoolStats;
2958 
2965 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2966  VmaAllocator VMA_NOT_NULL allocator,
2967  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
2968  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
2969 
2972 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2973  VmaAllocator VMA_NOT_NULL allocator,
2974  VmaPool VMA_NULLABLE pool);
2975 
2982 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2983  VmaAllocator VMA_NOT_NULL allocator,
2984  VmaPool VMA_NOT_NULL pool,
2985  VmaPoolStats* VMA_NOT_NULL pPoolStats);
2986 
2993 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2994  VmaAllocator VMA_NOT_NULL allocator,
2995  VmaPool VMA_NOT_NULL pool,
2996  size_t* VMA_NULLABLE pLostAllocationCount);
2997 
3012 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3013 
3020 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3021  VmaAllocator VMA_NOT_NULL allocator,
3022  VmaPool VMA_NOT_NULL pool,
3023  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3024 
3030 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3031  VmaAllocator VMA_NOT_NULL allocator,
3032  VmaPool VMA_NOT_NULL pool,
3033  const char* VMA_NULLABLE pName);
3034 
3059 VK_DEFINE_HANDLE(VmaAllocation)
3060 
3061 
3063 typedef struct VmaAllocationInfo {
3068  uint32_t memoryType;
3077  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3082  VkDeviceSize offset;
3087  VkDeviceSize size;
3096  void* VMA_NULLABLE pMappedData;
3101  void* VMA_NULLABLE pUserData;
3103 
3114 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3115  VmaAllocator VMA_NOT_NULL allocator,
3116  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3117  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3118  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3119  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3120 
3140 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3141  VmaAllocator VMA_NOT_NULL allocator,
3142  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3143  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3144  size_t allocationCount,
3145  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3146  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3147 
3154 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3155  VmaAllocator VMA_NOT_NULL allocator,
3156  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3157  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3158  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3159  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3160 
3162 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3163  VmaAllocator VMA_NOT_NULL allocator,
3164  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3165  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3166  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3167  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3168 
3173 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3174  VmaAllocator VMA_NOT_NULL allocator,
3175  const VmaAllocation VMA_NULLABLE allocation);
3176 
3187 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3188  VmaAllocator VMA_NOT_NULL allocator,
3189  size_t allocationCount,
3190  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3191 
3199 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3200  VmaAllocator VMA_NOT_NULL allocator,
3201  VmaAllocation VMA_NOT_NULL allocation,
3202  VkDeviceSize newSize);
3203 
3220 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3221  VmaAllocator VMA_NOT_NULL allocator,
3222  VmaAllocation VMA_NOT_NULL allocation,
3223  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3224 
3239 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3240  VmaAllocator VMA_NOT_NULL allocator,
3241  VmaAllocation VMA_NOT_NULL allocation);
3242 
3256 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3257  VmaAllocator VMA_NOT_NULL allocator,
3258  VmaAllocation VMA_NOT_NULL allocation,
3259  void* VMA_NULLABLE pUserData);
3260 
3271 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3272  VmaAllocator VMA_NOT_NULL allocator,
3273  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3274 
3313 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3314  VmaAllocator VMA_NOT_NULL allocator,
3315  VmaAllocation VMA_NOT_NULL allocation,
3316  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3317 
3326 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3327  VmaAllocator VMA_NOT_NULL allocator,
3328  VmaAllocation VMA_NOT_NULL allocation);
3329 
3348 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(
3349  VmaAllocator VMA_NOT_NULL allocator,
3350  VmaAllocation VMA_NOT_NULL allocation,
3351  VkDeviceSize offset,
3352  VkDeviceSize size);
3353 
3372 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(
3373  VmaAllocator VMA_NOT_NULL allocator,
3374  VmaAllocation VMA_NOT_NULL allocation,
3375  VkDeviceSize offset,
3376  VkDeviceSize size);
3377 
3394 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3395 
3402 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3403 
3404 typedef enum VmaDefragmentationFlagBits {
3409 typedef VkFlags VmaDefragmentationFlags;
3410 
3415 typedef struct VmaDefragmentationInfo2 {
3430  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3436  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3439  uint32_t poolCount;
3455  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3460  VkDeviceSize maxCpuBytesToMove;
3470  VkDeviceSize maxGpuBytesToMove;
3484  VkCommandBuffer VMA_NULLABLE_NON_DISPATCHABLE commandBuffer;
3486 
3489  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3490  VkDeviceSize offset;
3492 
3498  uint32_t moveCount;
3499  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3501 
3506 typedef struct VmaDefragmentationInfo {
3511  VkDeviceSize maxBytesToMove;
3518 
3520 typedef struct VmaDefragmentationStats {
3522  VkDeviceSize bytesMoved;
3524  VkDeviceSize bytesFreed;
3530 
3560 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3561  VmaAllocator VMA_NOT_NULL allocator,
3562  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3563  VmaDefragmentationStats* VMA_NULLABLE pStats,
3564  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3565 
3571 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3572  VmaAllocator VMA_NOT_NULL allocator,
3573  VmaDefragmentationContext VMA_NULLABLE context);
3574 
3575 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3576  VmaAllocator VMA_NOT_NULL allocator,
3577  VmaDefragmentationContext VMA_NULLABLE context,
3578  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3579 );
3580 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3581  VmaAllocator VMA_NOT_NULL allocator,
3582  VmaDefragmentationContext VMA_NULLABLE context
3583 );
3584 
3625 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3626  VmaAllocator VMA_NOT_NULL allocator,
3627  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3628  size_t allocationCount,
3629  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3630  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3631  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3632 
3645 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3646  VmaAllocator VMA_NOT_NULL allocator,
3647  VmaAllocation VMA_NOT_NULL allocation,
3648  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3649 
3660 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3661  VmaAllocator VMA_NOT_NULL allocator,
3662  VmaAllocation VMA_NOT_NULL allocation,
3663  VkDeviceSize allocationLocalOffset,
3664  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3665  const void* VMA_NULLABLE pNext);
3666 
3679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3680  VmaAllocator VMA_NOT_NULL allocator,
3681  VmaAllocation VMA_NOT_NULL allocation,
3682  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3683 
3694 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3695  VmaAllocator VMA_NOT_NULL allocator,
3696  VmaAllocation VMA_NOT_NULL allocation,
3697  VkDeviceSize allocationLocalOffset,
3698  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3699  const void* VMA_NULLABLE pNext);
3700 
3727 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3728  VmaAllocator VMA_NOT_NULL allocator,
3729  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3730  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3731  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3732  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3733  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3734 
3746 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3747  VmaAllocator VMA_NOT_NULL allocator,
3748  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3749  VmaAllocation VMA_NULLABLE allocation);
3750 
3752 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3753  VmaAllocator VMA_NOT_NULL allocator,
3754  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3755  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3756  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3757  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3758  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3759 
3771 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3772  VmaAllocator VMA_NOT_NULL allocator,
3773  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
3774  VmaAllocation VMA_NULLABLE allocation);
3775 
3776 #ifdef __cplusplus
3777 }
3778 #endif
3779 
3780 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3781 
3782 // For Visual Studio IntelliSense.
3783 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3784 #define VMA_IMPLEMENTATION
3785 #endif
3786 
3787 #ifdef VMA_IMPLEMENTATION
3788 #undef VMA_IMPLEMENTATION
3789 
3790 #include <cstdint>
3791 #include <cstdlib>
3792 #include <cstring>
3793 #include <utility>
3794 
3795 /*******************************************************************************
3796 CONFIGURATION SECTION
3797 
3798 Define some of these macros before each #include of this header or change them
3799 here if you need other then default behavior depending on your environment.
3800 */
3801 
3802 /*
3803 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3804 internally, like:
3805 
3806  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3807 */
3808 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3809  #define VMA_STATIC_VULKAN_FUNCTIONS 1
3810 #endif
3811 
3812 /*
3813 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3814 internally, like:
3815 
3816  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
3817 */
3818 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
3819  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
3820 #endif
3821 
3822 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3823 //#define VMA_USE_STL_CONTAINERS 1
3824 
3825 /* Set this macro to 1 to make the library including and using STL containers:
3826 std::pair, std::vector, std::list, std::unordered_map.
3827 
3828 Set it to 0 or undefined to make the library using its own implementation of
3829 the containers.
3830 */
3831 #if VMA_USE_STL_CONTAINERS
3832  #define VMA_USE_STL_VECTOR 1
3833  #define VMA_USE_STL_UNORDERED_MAP 1
3834  #define VMA_USE_STL_LIST 1
3835 #endif
3836 
3837 #ifndef VMA_USE_STL_SHARED_MUTEX
3838  // Compiler conforms to C++17.
3839  #if __cplusplus >= 201703L
3840  #define VMA_USE_STL_SHARED_MUTEX 1
3841  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3842  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3843  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3844  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3845  #define VMA_USE_STL_SHARED_MUTEX 1
3846  #else
3847  #define VMA_USE_STL_SHARED_MUTEX 0
3848  #endif
3849 #endif
3850 
3851 /*
3852 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3853 Library has its own container implementation.
3854 */
3855 #if VMA_USE_STL_VECTOR
3856  #include <vector>
3857 #endif
3858 
3859 #if VMA_USE_STL_UNORDERED_MAP
3860  #include <unordered_map>
3861 #endif
3862 
3863 #if VMA_USE_STL_LIST
3864  #include <list>
3865 #endif
3866 
3867 /*
3868 Following headers are used in this CONFIGURATION section only, so feel free to
3869 remove them if not needed.
3870 */
3871 #include <cassert> // for assert
3872 #include <algorithm> // for min, max
3873 #include <mutex>
3874 
3875 #ifndef VMA_NULL
3876  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3877  #define VMA_NULL nullptr
3878 #endif
3879 
3880 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3881 #include <cstdlib>
3882 void *aligned_alloc(size_t alignment, size_t size)
3883 {
3884  // alignment must be >= sizeof(void*)
3885  if(alignment < sizeof(void*))
3886  {
3887  alignment = sizeof(void*);
3888  }
3889 
3890  return memalign(alignment, size);
3891 }
3892 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3893 #include <cstdlib>
3894 void *aligned_alloc(size_t alignment, size_t size)
3895 {
3896  // alignment must be >= sizeof(void*)
3897  if(alignment < sizeof(void*))
3898  {
3899  alignment = sizeof(void*);
3900  }
3901 
3902  void *pointer;
3903  if(posix_memalign(&pointer, alignment, size) == 0)
3904  return pointer;
3905  return VMA_NULL;
3906 }
3907 #endif
3908 
3909 // If your compiler is not compatible with C++11 and definition of
3910 // aligned_alloc() function is missing, uncommeting following line may help:
3911 
3912 //#include <malloc.h>
3913 
3914 // Normal assert to check for programmer's errors, especially in Debug configuration.
3915 #ifndef VMA_ASSERT
3916  #ifdef NDEBUG
3917  #define VMA_ASSERT(expr)
3918  #else
3919  #define VMA_ASSERT(expr) assert(expr)
3920  #endif
3921 #endif
3922 
3923 // Assert that will be called very often, like inside data structures e.g. operator[].
3924 // Making it non-empty can make program slow.
3925 #ifndef VMA_HEAVY_ASSERT
3926  #ifdef NDEBUG
3927  #define VMA_HEAVY_ASSERT(expr)
3928  #else
3929  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3930  #endif
3931 #endif
3932 
3933 #ifndef VMA_ALIGN_OF
3934  #define VMA_ALIGN_OF(type) (__alignof(type))
3935 #endif
3936 
3937 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3938  #if defined(_WIN32)
3939  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3940  #else
3941  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3942  #endif
3943 #endif
3944 
3945 #ifndef VMA_SYSTEM_FREE
3946  #if defined(_WIN32)
3947  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3948  #else
3949  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3950  #endif
3951 #endif
3952 
3953 #ifndef VMA_MIN
3954  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3955 #endif
3956 
3957 #ifndef VMA_MAX
3958  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3959 #endif
3960 
3961 #ifndef VMA_SWAP
3962  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3963 #endif
3964 
3965 #ifndef VMA_SORT
3966  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3967 #endif
3968 
3969 #ifndef VMA_DEBUG_LOG
3970  #define VMA_DEBUG_LOG(format, ...)
3971  /*
3972  #define VMA_DEBUG_LOG(format, ...) do { \
3973  printf(format, __VA_ARGS__); \
3974  printf("\n"); \
3975  } while(false)
3976  */
3977 #endif
3978 
3979 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3980 #if VMA_STATS_STRING_ENABLED
3981  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3982  {
3983  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3984  }
3985  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3986  {
3987  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3988  }
3989  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3990  {
3991  snprintf(outStr, strLen, "%p", ptr);
3992  }
3993 #endif
3994 
3995 #ifndef VMA_MUTEX
3996  class VmaMutex
3997  {
3998  public:
3999  void Lock() { m_Mutex.lock(); }
4000  void Unlock() { m_Mutex.unlock(); }
4001  bool TryLock() { return m_Mutex.try_lock(); }
4002  private:
4003  std::mutex m_Mutex;
4004  };
4005  #define VMA_MUTEX VmaMutex
4006 #endif
4007 
4008 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4009 #ifndef VMA_RW_MUTEX
4010  #if VMA_USE_STL_SHARED_MUTEX
4011  // Use std::shared_mutex from C++17.
4012  #include <shared_mutex>
4013  class VmaRWMutex
4014  {
4015  public:
4016  void LockRead() { m_Mutex.lock_shared(); }
4017  void UnlockRead() { m_Mutex.unlock_shared(); }
4018  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4019  void LockWrite() { m_Mutex.lock(); }
4020  void UnlockWrite() { m_Mutex.unlock(); }
4021  bool TryLockWrite() { return m_Mutex.try_lock(); }
4022  private:
4023  std::shared_mutex m_Mutex;
4024  };
4025  #define VMA_RW_MUTEX VmaRWMutex
4026  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4027  // Use SRWLOCK from WinAPI.
4028  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4029  class VmaRWMutex
4030  {
4031  public:
4032  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4033  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4034  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4035  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4036  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4037  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4038  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4039  private:
4040  SRWLOCK m_Lock;
4041  };
4042  #define VMA_RW_MUTEX VmaRWMutex
4043  #else
4044  // Less efficient fallback: Use normal mutex.
4045  class VmaRWMutex
4046  {
4047  public:
4048  void LockRead() { m_Mutex.Lock(); }
4049  void UnlockRead() { m_Mutex.Unlock(); }
4050  bool TryLockRead() { return m_Mutex.TryLock(); }
4051  void LockWrite() { m_Mutex.Lock(); }
4052  void UnlockWrite() { m_Mutex.Unlock(); }
4053  bool TryLockWrite() { return m_Mutex.TryLock(); }
4054  private:
4055  VMA_MUTEX m_Mutex;
4056  };
4057  #define VMA_RW_MUTEX VmaRWMutex
4058  #endif // #if VMA_USE_STL_SHARED_MUTEX
4059 #endif // #ifndef VMA_RW_MUTEX
4060 
4061 /*
4062 If providing your own implementation, you need to implement a subset of std::atomic.
4063 */
4064 #ifndef VMA_ATOMIC_UINT32
4065  #include <atomic>
4066  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4067 #endif
4068 
4069 #ifndef VMA_ATOMIC_UINT64
4070  #include <atomic>
4071  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4072 #endif
4073 
4074 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4075 
4079  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4080 #endif
4081 
4082 #ifndef VMA_DEBUG_ALIGNMENT
4083 
4087  #define VMA_DEBUG_ALIGNMENT (1)
4088 #endif
4089 
4090 #ifndef VMA_DEBUG_MARGIN
4091 
4095  #define VMA_DEBUG_MARGIN (0)
4096 #endif
4097 
4098 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4099 
4103  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4104 #endif
4105 
4106 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4107 
4112  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4113 #endif
4114 
4115 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4116 
4120  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4121 #endif
4122 
4123 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4124 
4128  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4129 #endif
4130 
4131 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4132  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4134 #endif
4135 
4136 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4137  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4139 #endif
4140 
4141 #ifndef VMA_CLASS_NO_COPY
4142  #define VMA_CLASS_NO_COPY(className) \
4143  private: \
4144  className(const className&) = delete; \
4145  className& operator=(const className&) = delete;
4146 #endif
4147 
4148 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4149 
4150 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4151 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4152 
4153 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4154 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4155 
4156 /*******************************************************************************
4157 END OF CONFIGURATION
4158 */
4159 
4160 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4161 
4162 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4163 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4164 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4165 
4166 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4167 
4168 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4169  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4170 
4171 // Returns number of bits set to 1 in (v).
4172 static inline uint32_t VmaCountBitsSet(uint32_t v)
4173 {
4174  uint32_t c = v - ((v >> 1) & 0x55555555);
4175  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4176  c = ((c >> 4) + c) & 0x0F0F0F0F;
4177  c = ((c >> 8) + c) & 0x00FF00FF;
4178  c = ((c >> 16) + c) & 0x0000FFFF;
4179  return c;
4180 }
4181 
4182 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4183 // Use types like uint32_t, uint64_t as T.
4184 template <typename T>
4185 static inline T VmaAlignUp(T val, T align)
4186 {
4187  return (val + align - 1) / align * align;
4188 }
4189 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4190 // Use types like uint32_t, uint64_t as T.
4191 template <typename T>
4192 static inline T VmaAlignDown(T val, T align)
4193 {
4194  return val / align * align;
4195 }
4196 
4197 // Division with mathematical rounding to nearest number.
4198 template <typename T>
4199 static inline T VmaRoundDiv(T x, T y)
4200 {
4201  return (x + (y / (T)2)) / y;
4202 }
4203 
4204 /*
4205 Returns true if given number is a power of two.
4206 T must be unsigned integer number or signed integer but always nonnegative.
4207 For 0 returns true.
4208 */
4209 template <typename T>
4210 inline bool VmaIsPow2(T x)
4211 {
4212  return (x & (x-1)) == 0;
4213 }
4214 
4215 // Returns smallest power of 2 greater or equal to v.
4216 static inline uint32_t VmaNextPow2(uint32_t v)
4217 {
4218  v--;
4219  v |= v >> 1;
4220  v |= v >> 2;
4221  v |= v >> 4;
4222  v |= v >> 8;
4223  v |= v >> 16;
4224  v++;
4225  return v;
4226 }
4227 static inline uint64_t VmaNextPow2(uint64_t v)
4228 {
4229  v--;
4230  v |= v >> 1;
4231  v |= v >> 2;
4232  v |= v >> 4;
4233  v |= v >> 8;
4234  v |= v >> 16;
4235  v |= v >> 32;
4236  v++;
4237  return v;
4238 }
4239 
4240 // Returns largest power of 2 less or equal to v.
4241 static inline uint32_t VmaPrevPow2(uint32_t v)
4242 {
4243  v |= v >> 1;
4244  v |= v >> 2;
4245  v |= v >> 4;
4246  v |= v >> 8;
4247  v |= v >> 16;
4248  v = v ^ (v >> 1);
4249  return v;
4250 }
4251 static inline uint64_t VmaPrevPow2(uint64_t v)
4252 {
4253  v |= v >> 1;
4254  v |= v >> 2;
4255  v |= v >> 4;
4256  v |= v >> 8;
4257  v |= v >> 16;
4258  v |= v >> 32;
4259  v = v ^ (v >> 1);
4260  return v;
4261 }
4262 
4263 static inline bool VmaStrIsEmpty(const char* pStr)
4264 {
4265  return pStr == VMA_NULL || *pStr == '\0';
4266 }
4267 
4268 #if VMA_STATS_STRING_ENABLED
4269 
4270 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4271 {
4272  switch(algorithm)
4273  {
4275  return "Linear";
4277  return "Buddy";
4278  case 0:
4279  return "Default";
4280  default:
4281  VMA_ASSERT(0);
4282  return "";
4283  }
4284 }
4285 
4286 #endif // #if VMA_STATS_STRING_ENABLED
4287 
4288 #ifndef VMA_SORT
4289 
4290 template<typename Iterator, typename Compare>
4291 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4292 {
4293  Iterator centerValue = end; --centerValue;
4294  Iterator insertIndex = beg;
4295  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4296  {
4297  if(cmp(*memTypeIndex, *centerValue))
4298  {
4299  if(insertIndex != memTypeIndex)
4300  {
4301  VMA_SWAP(*memTypeIndex, *insertIndex);
4302  }
4303  ++insertIndex;
4304  }
4305  }
4306  if(insertIndex != centerValue)
4307  {
4308  VMA_SWAP(*insertIndex, *centerValue);
4309  }
4310  return insertIndex;
4311 }
4312 
4313 template<typename Iterator, typename Compare>
4314 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4315 {
4316  if(beg < end)
4317  {
4318  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4319  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4320  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4321  }
4322 }
4323 
4324 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4325 
4326 #endif // #ifndef VMA_SORT
4327 
4328 /*
4329 Returns true if two memory blocks occupy overlapping pages.
4330 ResourceA must be in less memory offset than ResourceB.
4331 
4332 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4333 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4334 */
4335 static inline bool VmaBlocksOnSamePage(
4336  VkDeviceSize resourceAOffset,
4337  VkDeviceSize resourceASize,
4338  VkDeviceSize resourceBOffset,
4339  VkDeviceSize pageSize)
4340 {
4341  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4342  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4343  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4344  VkDeviceSize resourceBStart = resourceBOffset;
4345  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4346  return resourceAEndPage == resourceBStartPage;
4347 }
4348 
4349 enum VmaSuballocationType
4350 {
4351  VMA_SUBALLOCATION_TYPE_FREE = 0,
4352  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4353  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4354  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4355  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4356  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4357  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4358 };
4359 
4360 /*
4361 Returns true if given suballocation types could conflict and must respect
4362 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4363 or linear image and another one is optimal image. If type is unknown, behave
4364 conservatively.
4365 */
4366 static inline bool VmaIsBufferImageGranularityConflict(
4367  VmaSuballocationType suballocType1,
4368  VmaSuballocationType suballocType2)
4369 {
4370  if(suballocType1 > suballocType2)
4371  {
4372  VMA_SWAP(suballocType1, suballocType2);
4373  }
4374 
4375  switch(suballocType1)
4376  {
4377  case VMA_SUBALLOCATION_TYPE_FREE:
4378  return false;
4379  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4380  return true;
4381  case VMA_SUBALLOCATION_TYPE_BUFFER:
4382  return
4383  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4384  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4385  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4386  return
4387  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4388  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4389  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4390  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4391  return
4392  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4393  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4394  return false;
4395  default:
4396  VMA_ASSERT(0);
4397  return true;
4398  }
4399 }
4400 
4401 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4402 {
4403 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4404  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4405  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4406  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4407  {
4408  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4409  }
4410 #else
4411  // no-op
4412 #endif
4413 }
4414 
4415 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4416 {
4417 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4418  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4419  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4420  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4421  {
4422  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4423  {
4424  return false;
4425  }
4426  }
4427 #endif
4428  return true;
4429 }
4430 
4431 /*
4432 Fills structure with parameters of an example buffer to be used for transfers
4433 during GPU memory defragmentation.
4434 */
4435 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4436 {
4437  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4438  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4439  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4440  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4441 }
4442 
4443 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4444 struct VmaMutexLock
4445 {
4446  VMA_CLASS_NO_COPY(VmaMutexLock)
4447 public:
4448  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4449  m_pMutex(useMutex ? &mutex : VMA_NULL)
4450  { if(m_pMutex) { m_pMutex->Lock(); } }
4451  ~VmaMutexLock()
4452  { if(m_pMutex) { m_pMutex->Unlock(); } }
4453 private:
4454  VMA_MUTEX* m_pMutex;
4455 };
4456 
4457 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4458 struct VmaMutexLockRead
4459 {
4460  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4461 public:
4462  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4463  m_pMutex(useMutex ? &mutex : VMA_NULL)
4464  { if(m_pMutex) { m_pMutex->LockRead(); } }
4465  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4466 private:
4467  VMA_RW_MUTEX* m_pMutex;
4468 };
4469 
4470 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4471 struct VmaMutexLockWrite
4472 {
4473  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4474 public:
4475  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4476  m_pMutex(useMutex ? &mutex : VMA_NULL)
4477  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4478  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4479 private:
4480  VMA_RW_MUTEX* m_pMutex;
4481 };
4482 
4483 #if VMA_DEBUG_GLOBAL_MUTEX
4484  static VMA_MUTEX gDebugGlobalMutex;
4485  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4486 #else
4487  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4488 #endif
4489 
4490 // Minimum size of a free suballocation to register it in the free suballocation collection.
4491 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4492 
4493 /*
4494 Performs binary search and returns iterator to first element that is greater or
4495 equal to (key), according to comparison (cmp).
4496 
4497 Cmp should return true if first argument is less than second argument.
4498 
4499 Returned value is the found element, if present in the collection or place where
4500 new element with value (key) should be inserted.
4501 */
4502 template <typename CmpLess, typename IterT, typename KeyT>
4503 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4504 {
4505  size_t down = 0, up = (end - beg);
4506  while(down < up)
4507  {
4508  const size_t mid = (down + up) / 2;
4509  if(cmp(*(beg+mid), key))
4510  {
4511  down = mid + 1;
4512  }
4513  else
4514  {
4515  up = mid;
4516  }
4517  }
4518  return beg + down;
4519 }
4520 
4521 template<typename CmpLess, typename IterT, typename KeyT>
4522 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4523 {
4524  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4525  beg, end, value, cmp);
4526  if(it == end ||
4527  (!cmp(*it, value) && !cmp(value, *it)))
4528  {
4529  return it;
4530  }
4531  return end;
4532 }
4533 
4534 /*
4535 Returns true if all pointers in the array are not-null and unique.
4536 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4537 T must be pointer type, e.g. VmaAllocation, VmaPool.
4538 */
4539 template<typename T>
4540 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4541 {
4542  for(uint32_t i = 0; i < count; ++i)
4543  {
4544  const T iPtr = arr[i];
4545  if(iPtr == VMA_NULL)
4546  {
4547  return false;
4548  }
4549  for(uint32_t j = i + 1; j < count; ++j)
4550  {
4551  if(iPtr == arr[j])
4552  {
4553  return false;
4554  }
4555  }
4556  }
4557  return true;
4558 }
4559 
4560 template<typename MainT, typename NewT>
4561 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4562 {
4563  newStruct->pNext = mainStruct->pNext;
4564  mainStruct->pNext = newStruct;
4565 }
4566 
4568 // Memory allocation
4569 
4570 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4571 {
4572  if((pAllocationCallbacks != VMA_NULL) &&
4573  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4574  {
4575  return (*pAllocationCallbacks->pfnAllocation)(
4576  pAllocationCallbacks->pUserData,
4577  size,
4578  alignment,
4579  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4580  }
4581  else
4582  {
4583  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4584  }
4585 }
4586 
4587 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4588 {
4589  if((pAllocationCallbacks != VMA_NULL) &&
4590  (pAllocationCallbacks->pfnFree != VMA_NULL))
4591  {
4592  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4593  }
4594  else
4595  {
4596  VMA_SYSTEM_FREE(ptr);
4597  }
4598 }
4599 
4600 template<typename T>
4601 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4602 {
4603  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4604 }
4605 
4606 template<typename T>
4607 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4608 {
4609  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4610 }
4611 
4612 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4613 
4614 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4615 
4616 template<typename T>
4617 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4618 {
4619  ptr->~T();
4620  VmaFree(pAllocationCallbacks, ptr);
4621 }
4622 
4623 template<typename T>
4624 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4625 {
4626  if(ptr != VMA_NULL)
4627  {
4628  for(size_t i = count; i--; )
4629  {
4630  ptr[i].~T();
4631  }
4632  VmaFree(pAllocationCallbacks, ptr);
4633  }
4634 }
4635 
4636 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4637 {
4638  if(srcStr != VMA_NULL)
4639  {
4640  const size_t len = strlen(srcStr);
4641  char* const result = vma_new_array(allocs, char, len + 1);
4642  memcpy(result, srcStr, len + 1);
4643  return result;
4644  }
4645  else
4646  {
4647  return VMA_NULL;
4648  }
4649 }
4650 
4651 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4652 {
4653  if(str != VMA_NULL)
4654  {
4655  const size_t len = strlen(str);
4656  vma_delete_array(allocs, str, len + 1);
4657  }
4658 }
4659 
4660 // STL-compatible allocator.
4661 template<typename T>
4662 class VmaStlAllocator
4663 {
4664 public:
4665  const VkAllocationCallbacks* const m_pCallbacks;
4666  typedef T value_type;
4667 
4668  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4669  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4670 
4671  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4672  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4673 
4674  template<typename U>
4675  bool operator==(const VmaStlAllocator<U>& rhs) const
4676  {
4677  return m_pCallbacks == rhs.m_pCallbacks;
4678  }
4679  template<typename U>
4680  bool operator!=(const VmaStlAllocator<U>& rhs) const
4681  {
4682  return m_pCallbacks != rhs.m_pCallbacks;
4683  }
4684 
4685  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4686 };
4687 
4688 #if VMA_USE_STL_VECTOR
4689 
4690 #define VmaVector std::vector
4691 
4692 template<typename T, typename allocatorT>
4693 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4694 {
4695  vec.insert(vec.begin() + index, item);
4696 }
4697 
4698 template<typename T, typename allocatorT>
4699 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4700 {
4701  vec.erase(vec.begin() + index);
4702 }
4703 
4704 #else // #if VMA_USE_STL_VECTOR
4705 
4706 /* Class with interface compatible with subset of std::vector.
4707 T must be POD because constructors and destructors are not called and memcpy is
4708 used for these objects. */
4709 template<typename T, typename AllocatorT>
4710 class VmaVector
4711 {
4712 public:
4713  typedef T value_type;
4714 
4715  VmaVector(const AllocatorT& allocator) :
4716  m_Allocator(allocator),
4717  m_pArray(VMA_NULL),
4718  m_Count(0),
4719  m_Capacity(0)
4720  {
4721  }
4722 
4723  VmaVector(size_t count, const AllocatorT& allocator) :
4724  m_Allocator(allocator),
4725  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4726  m_Count(count),
4727  m_Capacity(count)
4728  {
4729  }
4730 
4731  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4732  // value is unused.
4733  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4734  : VmaVector(count, allocator) {}
4735 
4736  VmaVector(const VmaVector<T, AllocatorT>& src) :
4737  m_Allocator(src.m_Allocator),
4738  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4739  m_Count(src.m_Count),
4740  m_Capacity(src.m_Count)
4741  {
4742  if(m_Count != 0)
4743  {
4744  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4745  }
4746  }
4747 
4748  ~VmaVector()
4749  {
4750  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4751  }
4752 
4753  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4754  {
4755  if(&rhs != this)
4756  {
4757  resize(rhs.m_Count);
4758  if(m_Count != 0)
4759  {
4760  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4761  }
4762  }
4763  return *this;
4764  }
4765 
4766  bool empty() const { return m_Count == 0; }
4767  size_t size() const { return m_Count; }
4768  T* data() { return m_pArray; }
4769  const T* data() const { return m_pArray; }
4770 
4771  T& operator[](size_t index)
4772  {
4773  VMA_HEAVY_ASSERT(index < m_Count);
4774  return m_pArray[index];
4775  }
4776  const T& operator[](size_t index) const
4777  {
4778  VMA_HEAVY_ASSERT(index < m_Count);
4779  return m_pArray[index];
4780  }
4781 
4782  T& front()
4783  {
4784  VMA_HEAVY_ASSERT(m_Count > 0);
4785  return m_pArray[0];
4786  }
4787  const T& front() const
4788  {
4789  VMA_HEAVY_ASSERT(m_Count > 0);
4790  return m_pArray[0];
4791  }
4792  T& back()
4793  {
4794  VMA_HEAVY_ASSERT(m_Count > 0);
4795  return m_pArray[m_Count - 1];
4796  }
4797  const T& back() const
4798  {
4799  VMA_HEAVY_ASSERT(m_Count > 0);
4800  return m_pArray[m_Count - 1];
4801  }
4802 
4803  void reserve(size_t newCapacity, bool freeMemory = false)
4804  {
4805  newCapacity = VMA_MAX(newCapacity, m_Count);
4806 
4807  if((newCapacity < m_Capacity) && !freeMemory)
4808  {
4809  newCapacity = m_Capacity;
4810  }
4811 
4812  if(newCapacity != m_Capacity)
4813  {
4814  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4815  if(m_Count != 0)
4816  {
4817  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4818  }
4819  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4820  m_Capacity = newCapacity;
4821  m_pArray = newArray;
4822  }
4823  }
4824 
4825  void resize(size_t newCount, bool freeMemory = false)
4826  {
4827  size_t newCapacity = m_Capacity;
4828  if(newCount > m_Capacity)
4829  {
4830  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4831  }
4832  else if(freeMemory)
4833  {
4834  newCapacity = newCount;
4835  }
4836 
4837  if(newCapacity != m_Capacity)
4838  {
4839  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4840  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4841  if(elementsToCopy != 0)
4842  {
4843  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4844  }
4845  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4846  m_Capacity = newCapacity;
4847  m_pArray = newArray;
4848  }
4849 
4850  m_Count = newCount;
4851  }
4852 
4853  void clear(bool freeMemory = false)
4854  {
4855  resize(0, freeMemory);
4856  }
4857 
4858  void insert(size_t index, const T& src)
4859  {
4860  VMA_HEAVY_ASSERT(index <= m_Count);
4861  const size_t oldCount = size();
4862  resize(oldCount + 1);
4863  if(index < oldCount)
4864  {
4865  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4866  }
4867  m_pArray[index] = src;
4868  }
4869 
4870  void remove(size_t index)
4871  {
4872  VMA_HEAVY_ASSERT(index < m_Count);
4873  const size_t oldCount = size();
4874  if(index < oldCount - 1)
4875  {
4876  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4877  }
4878  resize(oldCount - 1);
4879  }
4880 
4881  void push_back(const T& src)
4882  {
4883  const size_t newIndex = size();
4884  resize(newIndex + 1);
4885  m_pArray[newIndex] = src;
4886  }
4887 
4888  void pop_back()
4889  {
4890  VMA_HEAVY_ASSERT(m_Count > 0);
4891  resize(size() - 1);
4892  }
4893 
4894  void push_front(const T& src)
4895  {
4896  insert(0, src);
4897  }
4898 
4899  void pop_front()
4900  {
4901  VMA_HEAVY_ASSERT(m_Count > 0);
4902  remove(0);
4903  }
4904 
4905  typedef T* iterator;
4906 
4907  iterator begin() { return m_pArray; }
4908  iterator end() { return m_pArray + m_Count; }
4909 
4910 private:
4911  AllocatorT m_Allocator;
4912  T* m_pArray;
4913  size_t m_Count;
4914  size_t m_Capacity;
4915 };
4916 
4917 template<typename T, typename allocatorT>
4918 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4919 {
4920  vec.insert(index, item);
4921 }
4922 
4923 template<typename T, typename allocatorT>
4924 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4925 {
4926  vec.remove(index);
4927 }
4928 
4929 #endif // #if VMA_USE_STL_VECTOR
4930 
4931 template<typename CmpLess, typename VectorT>
4932 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4933 {
4934  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4935  vector.data(),
4936  vector.data() + vector.size(),
4937  value,
4938  CmpLess()) - vector.data();
4939  VmaVectorInsert(vector, indexToInsert, value);
4940  return indexToInsert;
4941 }
4942 
4943 template<typename CmpLess, typename VectorT>
4944 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4945 {
4946  CmpLess comparator;
4947  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4948  vector.begin(),
4949  vector.end(),
4950  value,
4951  comparator);
4952  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4953  {
4954  size_t indexToRemove = it - vector.begin();
4955  VmaVectorRemove(vector, indexToRemove);
4956  return true;
4957  }
4958  return false;
4959 }
4960 
4962 // class VmaPoolAllocator
4963 
4964 /*
4965 Allocator for objects of type T using a list of arrays (pools) to speed up
4966 allocation. Number of elements that can be allocated is not bounded because
4967 allocator can create multiple blocks.
4968 */
4969 template<typename T>
4970 class VmaPoolAllocator
4971 {
4972  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4973 public:
4974  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4975  ~VmaPoolAllocator();
4976  template<typename... Types> T* Alloc(Types... args);
4977  void Free(T* ptr);
4978 
4979 private:
4980  union Item
4981  {
4982  uint32_t NextFreeIndex;
4983  alignas(T) char Value[sizeof(T)];
4984  };
4985 
4986  struct ItemBlock
4987  {
4988  Item* pItems;
4989  uint32_t Capacity;
4990  uint32_t FirstFreeIndex;
4991  };
4992 
4993  const VkAllocationCallbacks* m_pAllocationCallbacks;
4994  const uint32_t m_FirstBlockCapacity;
4995  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4996 
4997  ItemBlock& CreateNewBlock();
4998 };
4999 
5000 template<typename T>
5001 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5002  m_pAllocationCallbacks(pAllocationCallbacks),
5003  m_FirstBlockCapacity(firstBlockCapacity),
5004  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5005 {
5006  VMA_ASSERT(m_FirstBlockCapacity > 1);
5007 }
5008 
5009 template<typename T>
5010 VmaPoolAllocator<T>::~VmaPoolAllocator()
5011 {
5012  for(size_t i = m_ItemBlocks.size(); i--; )
5013  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5014  m_ItemBlocks.clear();
5015 }
5016 
5017 template<typename T>
5018 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5019 {
5020  for(size_t i = m_ItemBlocks.size(); i--; )
5021  {
5022  ItemBlock& block = m_ItemBlocks[i];
5023  // This block has some free items: Use first one.
5024  if(block.FirstFreeIndex != UINT32_MAX)
5025  {
5026  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5027  block.FirstFreeIndex = pItem->NextFreeIndex;
5028  T* result = (T*)&pItem->Value;
5029  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5030  return result;
5031  }
5032  }
5033 
5034  // No block has free item: Create new one and use it.
5035  ItemBlock& newBlock = CreateNewBlock();
5036  Item* const pItem = &newBlock.pItems[0];
5037  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5038  T* result = (T*)&pItem->Value;
5039  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5040  return result;
5041 }
5042 
5043 template<typename T>
5044 void VmaPoolAllocator<T>::Free(T* ptr)
5045 {
5046  // Search all memory blocks to find ptr.
5047  for(size_t i = m_ItemBlocks.size(); i--; )
5048  {
5049  ItemBlock& block = m_ItemBlocks[i];
5050 
5051  // Casting to union.
5052  Item* pItemPtr;
5053  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5054 
5055  // Check if pItemPtr is in address range of this block.
5056  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5057  {
5058  ptr->~T(); // Explicit destructor call.
5059  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5060  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5061  block.FirstFreeIndex = index;
5062  return;
5063  }
5064  }
5065  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5066 }
5067 
5068 template<typename T>
5069 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5070 {
5071  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5072  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5073 
5074  const ItemBlock newBlock = {
5075  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5076  newBlockCapacity,
5077  0 };
5078 
5079  m_ItemBlocks.push_back(newBlock);
5080 
5081  // Setup singly-linked list of all free items in this block.
5082  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5083  newBlock.pItems[i].NextFreeIndex = i + 1;
5084  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5085  return m_ItemBlocks.back();
5086 }
5087 
5089 // class VmaRawList, VmaList
5090 
5091 #if VMA_USE_STL_LIST
5092 
5093 #define VmaList std::list
5094 
5095 #else // #if VMA_USE_STL_LIST
5096 
5097 template<typename T>
5098 struct VmaListItem
5099 {
5100  VmaListItem* pPrev;
5101  VmaListItem* pNext;
5102  T Value;
5103 };
5104 
5105 // Doubly linked list.
5106 template<typename T>
5107 class VmaRawList
5108 {
5109  VMA_CLASS_NO_COPY(VmaRawList)
5110 public:
5111  typedef VmaListItem<T> ItemType;
5112 
5113  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5114  ~VmaRawList();
5115  void Clear();
5116 
5117  size_t GetCount() const { return m_Count; }
5118  bool IsEmpty() const { return m_Count == 0; }
5119 
5120  ItemType* Front() { return m_pFront; }
5121  const ItemType* Front() const { return m_pFront; }
5122  ItemType* Back() { return m_pBack; }
5123  const ItemType* Back() const { return m_pBack; }
5124 
5125  ItemType* PushBack();
5126  ItemType* PushFront();
5127  ItemType* PushBack(const T& value);
5128  ItemType* PushFront(const T& value);
5129  void PopBack();
5130  void PopFront();
5131 
5132  // Item can be null - it means PushBack.
5133  ItemType* InsertBefore(ItemType* pItem);
5134  // Item can be null - it means PushFront.
5135  ItemType* InsertAfter(ItemType* pItem);
5136 
5137  ItemType* InsertBefore(ItemType* pItem, const T& value);
5138  ItemType* InsertAfter(ItemType* pItem, const T& value);
5139 
5140  void Remove(ItemType* pItem);
5141 
5142 private:
5143  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5144  VmaPoolAllocator<ItemType> m_ItemAllocator;
5145  ItemType* m_pFront;
5146  ItemType* m_pBack;
5147  size_t m_Count;
5148 };
5149 
5150 template<typename T>
5151 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5152  m_pAllocationCallbacks(pAllocationCallbacks),
5153  m_ItemAllocator(pAllocationCallbacks, 128),
5154  m_pFront(VMA_NULL),
5155  m_pBack(VMA_NULL),
5156  m_Count(0)
5157 {
5158 }
5159 
5160 template<typename T>
5161 VmaRawList<T>::~VmaRawList()
5162 {
5163  // Intentionally not calling Clear, because that would be unnecessary
5164  // computations to return all items to m_ItemAllocator as free.
5165 }
5166 
5167 template<typename T>
5168 void VmaRawList<T>::Clear()
5169 {
5170  if(IsEmpty() == false)
5171  {
5172  ItemType* pItem = m_pBack;
5173  while(pItem != VMA_NULL)
5174  {
5175  ItemType* const pPrevItem = pItem->pPrev;
5176  m_ItemAllocator.Free(pItem);
5177  pItem = pPrevItem;
5178  }
5179  m_pFront = VMA_NULL;
5180  m_pBack = VMA_NULL;
5181  m_Count = 0;
5182  }
5183 }
5184 
5185 template<typename T>
5186 VmaListItem<T>* VmaRawList<T>::PushBack()
5187 {
5188  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5189  pNewItem->pNext = VMA_NULL;
5190  if(IsEmpty())
5191  {
5192  pNewItem->pPrev = VMA_NULL;
5193  m_pFront = pNewItem;
5194  m_pBack = pNewItem;
5195  m_Count = 1;
5196  }
5197  else
5198  {
5199  pNewItem->pPrev = m_pBack;
5200  m_pBack->pNext = pNewItem;
5201  m_pBack = pNewItem;
5202  ++m_Count;
5203  }
5204  return pNewItem;
5205 }
5206 
5207 template<typename T>
5208 VmaListItem<T>* VmaRawList<T>::PushFront()
5209 {
5210  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5211  pNewItem->pPrev = VMA_NULL;
5212  if(IsEmpty())
5213  {
5214  pNewItem->pNext = VMA_NULL;
5215  m_pFront = pNewItem;
5216  m_pBack = pNewItem;
5217  m_Count = 1;
5218  }
5219  else
5220  {
5221  pNewItem->pNext = m_pFront;
5222  m_pFront->pPrev = pNewItem;
5223  m_pFront = pNewItem;
5224  ++m_Count;
5225  }
5226  return pNewItem;
5227 }
5228 
5229 template<typename T>
5230 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5231 {
5232  ItemType* const pNewItem = PushBack();
5233  pNewItem->Value = value;
5234  return pNewItem;
5235 }
5236 
5237 template<typename T>
5238 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5239 {
5240  ItemType* const pNewItem = PushFront();
5241  pNewItem->Value = value;
5242  return pNewItem;
5243 }
5244 
5245 template<typename T>
5246 void VmaRawList<T>::PopBack()
5247 {
5248  VMA_HEAVY_ASSERT(m_Count > 0);
5249  ItemType* const pBackItem = m_pBack;
5250  ItemType* const pPrevItem = pBackItem->pPrev;
5251  if(pPrevItem != VMA_NULL)
5252  {
5253  pPrevItem->pNext = VMA_NULL;
5254  }
5255  m_pBack = pPrevItem;
5256  m_ItemAllocator.Free(pBackItem);
5257  --m_Count;
5258 }
5259 
5260 template<typename T>
5261 void VmaRawList<T>::PopFront()
5262 {
5263  VMA_HEAVY_ASSERT(m_Count > 0);
5264  ItemType* const pFrontItem = m_pFront;
5265  ItemType* const pNextItem = pFrontItem->pNext;
5266  if(pNextItem != VMA_NULL)
5267  {
5268  pNextItem->pPrev = VMA_NULL;
5269  }
5270  m_pFront = pNextItem;
5271  m_ItemAllocator.Free(pFrontItem);
5272  --m_Count;
5273 }
5274 
5275 template<typename T>
5276 void VmaRawList<T>::Remove(ItemType* pItem)
5277 {
5278  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5279  VMA_HEAVY_ASSERT(m_Count > 0);
5280 
5281  if(pItem->pPrev != VMA_NULL)
5282  {
5283  pItem->pPrev->pNext = pItem->pNext;
5284  }
5285  else
5286  {
5287  VMA_HEAVY_ASSERT(m_pFront == pItem);
5288  m_pFront = pItem->pNext;
5289  }
5290 
5291  if(pItem->pNext != VMA_NULL)
5292  {
5293  pItem->pNext->pPrev = pItem->pPrev;
5294  }
5295  else
5296  {
5297  VMA_HEAVY_ASSERT(m_pBack == pItem);
5298  m_pBack = pItem->pPrev;
5299  }
5300 
5301  m_ItemAllocator.Free(pItem);
5302  --m_Count;
5303 }
5304 
5305 template<typename T>
5306 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5307 {
5308  if(pItem != VMA_NULL)
5309  {
5310  ItemType* const prevItem = pItem->pPrev;
5311  ItemType* const newItem = m_ItemAllocator.Alloc();
5312  newItem->pPrev = prevItem;
5313  newItem->pNext = pItem;
5314  pItem->pPrev = newItem;
5315  if(prevItem != VMA_NULL)
5316  {
5317  prevItem->pNext = newItem;
5318  }
5319  else
5320  {
5321  VMA_HEAVY_ASSERT(m_pFront == pItem);
5322  m_pFront = newItem;
5323  }
5324  ++m_Count;
5325  return newItem;
5326  }
5327  else
5328  return PushBack();
5329 }
5330 
5331 template<typename T>
5332 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5333 {
5334  if(pItem != VMA_NULL)
5335  {
5336  ItemType* const nextItem = pItem->pNext;
5337  ItemType* const newItem = m_ItemAllocator.Alloc();
5338  newItem->pNext = nextItem;
5339  newItem->pPrev = pItem;
5340  pItem->pNext = newItem;
5341  if(nextItem != VMA_NULL)
5342  {
5343  nextItem->pPrev = newItem;
5344  }
5345  else
5346  {
5347  VMA_HEAVY_ASSERT(m_pBack == pItem);
5348  m_pBack = newItem;
5349  }
5350  ++m_Count;
5351  return newItem;
5352  }
5353  else
5354  return PushFront();
5355 }
5356 
5357 template<typename T>
5358 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5359 {
5360  ItemType* const newItem = InsertBefore(pItem);
5361  newItem->Value = value;
5362  return newItem;
5363 }
5364 
5365 template<typename T>
5366 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5367 {
5368  ItemType* const newItem = InsertAfter(pItem);
5369  newItem->Value = value;
5370  return newItem;
5371 }
5372 
5373 template<typename T, typename AllocatorT>
5374 class VmaList
5375 {
5376  VMA_CLASS_NO_COPY(VmaList)
5377 public:
5378  class iterator
5379  {
5380  public:
5381  iterator() :
5382  m_pList(VMA_NULL),
5383  m_pItem(VMA_NULL)
5384  {
5385  }
5386 
5387  T& operator*() const
5388  {
5389  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5390  return m_pItem->Value;
5391  }
5392  T* operator->() const
5393  {
5394  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5395  return &m_pItem->Value;
5396  }
5397 
5398  iterator& operator++()
5399  {
5400  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5401  m_pItem = m_pItem->pNext;
5402  return *this;
5403  }
5404  iterator& operator--()
5405  {
5406  if(m_pItem != VMA_NULL)
5407  {
5408  m_pItem = m_pItem->pPrev;
5409  }
5410  else
5411  {
5412  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5413  m_pItem = m_pList->Back();
5414  }
5415  return *this;
5416  }
5417 
5418  iterator operator++(int)
5419  {
5420  iterator result = *this;
5421  ++*this;
5422  return result;
5423  }
5424  iterator operator--(int)
5425  {
5426  iterator result = *this;
5427  --*this;
5428  return result;
5429  }
5430 
5431  bool operator==(const iterator& rhs) const
5432  {
5433  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5434  return m_pItem == rhs.m_pItem;
5435  }
5436  bool operator!=(const iterator& rhs) const
5437  {
5438  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5439  return m_pItem != rhs.m_pItem;
5440  }
5441 
5442  private:
5443  VmaRawList<T>* m_pList;
5444  VmaListItem<T>* m_pItem;
5445 
5446  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5447  m_pList(pList),
5448  m_pItem(pItem)
5449  {
5450  }
5451 
5452  friend class VmaList<T, AllocatorT>;
5453  };
5454 
5455  class const_iterator
5456  {
5457  public:
5458  const_iterator() :
5459  m_pList(VMA_NULL),
5460  m_pItem(VMA_NULL)
5461  {
5462  }
5463 
5464  const_iterator(const iterator& src) :
5465  m_pList(src.m_pList),
5466  m_pItem(src.m_pItem)
5467  {
5468  }
5469 
5470  const T& operator*() const
5471  {
5472  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5473  return m_pItem->Value;
5474  }
5475  const T* operator->() const
5476  {
5477  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5478  return &m_pItem->Value;
5479  }
5480 
5481  const_iterator& operator++()
5482  {
5483  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5484  m_pItem = m_pItem->pNext;
5485  return *this;
5486  }
5487  const_iterator& operator--()
5488  {
5489  if(m_pItem != VMA_NULL)
5490  {
5491  m_pItem = m_pItem->pPrev;
5492  }
5493  else
5494  {
5495  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5496  m_pItem = m_pList->Back();
5497  }
5498  return *this;
5499  }
5500 
5501  const_iterator operator++(int)
5502  {
5503  const_iterator result = *this;
5504  ++*this;
5505  return result;
5506  }
5507  const_iterator operator--(int)
5508  {
5509  const_iterator result = *this;
5510  --*this;
5511  return result;
5512  }
5513 
5514  bool operator==(const const_iterator& rhs) const
5515  {
5516  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5517  return m_pItem == rhs.m_pItem;
5518  }
5519  bool operator!=(const const_iterator& rhs) const
5520  {
5521  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5522  return m_pItem != rhs.m_pItem;
5523  }
5524 
5525  private:
5526  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5527  m_pList(pList),
5528  m_pItem(pItem)
5529  {
5530  }
5531 
5532  const VmaRawList<T>* m_pList;
5533  const VmaListItem<T>* m_pItem;
5534 
5535  friend class VmaList<T, AllocatorT>;
5536  };
5537 
5538  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5539 
5540  bool empty() const { return m_RawList.IsEmpty(); }
5541  size_t size() const { return m_RawList.GetCount(); }
5542 
5543  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5544  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5545 
5546  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5547  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5548 
5549  void clear() { m_RawList.Clear(); }
5550  void push_back(const T& value) { m_RawList.PushBack(value); }
5551  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5552  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5553 
5554 private:
5555  VmaRawList<T> m_RawList;
5556 };
5557 
5558 #endif // #if VMA_USE_STL_LIST
5559 
5561 // class VmaMap
5562 
5563 // Unused in this version.
5564 #if 0
5565 
5566 #if VMA_USE_STL_UNORDERED_MAP
5567 
5568 #define VmaPair std::pair
5569 
5570 #define VMA_MAP_TYPE(KeyT, ValueT) \
5571  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5572 
5573 #else // #if VMA_USE_STL_UNORDERED_MAP
5574 
5575 template<typename T1, typename T2>
5576 struct VmaPair
5577 {
5578  T1 first;
5579  T2 second;
5580 
5581  VmaPair() : first(), second() { }
5582  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5583 };
5584 
5585 /* Class compatible with subset of interface of std::unordered_map.
5586 KeyT, ValueT must be POD because they will be stored in VmaVector.
5587 */
5588 template<typename KeyT, typename ValueT>
5589 class VmaMap
5590 {
5591 public:
5592  typedef VmaPair<KeyT, ValueT> PairType;
5593  typedef PairType* iterator;
5594 
5595  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5596 
5597  iterator begin() { return m_Vector.begin(); }
5598  iterator end() { return m_Vector.end(); }
5599 
5600  void insert(const PairType& pair);
5601  iterator find(const KeyT& key);
5602  void erase(iterator it);
5603 
5604 private:
5605  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5606 };
5607 
5608 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5609 
5610 template<typename FirstT, typename SecondT>
5611 struct VmaPairFirstLess
5612 {
5613  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5614  {
5615  return lhs.first < rhs.first;
5616  }
5617  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5618  {
5619  return lhs.first < rhsFirst;
5620  }
5621 };
5622 
5623 template<typename KeyT, typename ValueT>
5624 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5625 {
5626  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5627  m_Vector.data(),
5628  m_Vector.data() + m_Vector.size(),
5629  pair,
5630  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5631  VmaVectorInsert(m_Vector, indexToInsert, pair);
5632 }
5633 
5634 template<typename KeyT, typename ValueT>
5635 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5636 {
5637  PairType* it = VmaBinaryFindFirstNotLess(
5638  m_Vector.data(),
5639  m_Vector.data() + m_Vector.size(),
5640  key,
5641  VmaPairFirstLess<KeyT, ValueT>());
5642  if((it != m_Vector.end()) && (it->first == key))
5643  {
5644  return it;
5645  }
5646  else
5647  {
5648  return m_Vector.end();
5649  }
5650 }
5651 
5652 template<typename KeyT, typename ValueT>
5653 void VmaMap<KeyT, ValueT>::erase(iterator it)
5654 {
5655  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5656 }
5657 
5658 #endif // #if VMA_USE_STL_UNORDERED_MAP
5659 
5660 #endif // #if 0
5661 
5663 
5664 class VmaDeviceMemoryBlock;
5665 
5666 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5667 
5668 struct VmaAllocation_T
5669 {
5670 private:
5671  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5672 
5673  enum FLAGS
5674  {
5675  FLAG_USER_DATA_STRING = 0x01,
5676  };
5677 
5678 public:
5679  enum ALLOCATION_TYPE
5680  {
5681  ALLOCATION_TYPE_NONE,
5682  ALLOCATION_TYPE_BLOCK,
5683  ALLOCATION_TYPE_DEDICATED,
5684  };
5685 
5686  /*
5687  This struct is allocated using VmaPoolAllocator.
5688  */
5689 
5690  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5691  m_Alignment{1},
5692  m_Size{0},
5693  m_pUserData{VMA_NULL},
5694  m_LastUseFrameIndex{currentFrameIndex},
5695  m_MemoryTypeIndex{0},
5696  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5697  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5698  m_MapCount{0},
5699  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5700  {
5701 #if VMA_STATS_STRING_ENABLED
5702  m_CreationFrameIndex = currentFrameIndex;
5703  m_BufferImageUsage = 0;
5704 #endif
5705  }
5706 
5707  ~VmaAllocation_T()
5708  {
5709  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5710 
5711  // Check if owned string was freed.
5712  VMA_ASSERT(m_pUserData == VMA_NULL);
5713  }
5714 
5715  void InitBlockAllocation(
5716  VmaDeviceMemoryBlock* block,
5717  VkDeviceSize offset,
5718  VkDeviceSize alignment,
5719  VkDeviceSize size,
5720  uint32_t memoryTypeIndex,
5721  VmaSuballocationType suballocationType,
5722  bool mapped,
5723  bool canBecomeLost)
5724  {
5725  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5726  VMA_ASSERT(block != VMA_NULL);
5727  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5728  m_Alignment = alignment;
5729  m_Size = size;
5730  m_MemoryTypeIndex = memoryTypeIndex;
5731  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5732  m_SuballocationType = (uint8_t)suballocationType;
5733  m_BlockAllocation.m_Block = block;
5734  m_BlockAllocation.m_Offset = offset;
5735  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5736  }
5737 
5738  void InitLost()
5739  {
5740  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5741  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5742  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5743  m_MemoryTypeIndex = 0;
5744  m_BlockAllocation.m_Block = VMA_NULL;
5745  m_BlockAllocation.m_Offset = 0;
5746  m_BlockAllocation.m_CanBecomeLost = true;
5747  }
5748 
5749  void ChangeBlockAllocation(
5750  VmaAllocator hAllocator,
5751  VmaDeviceMemoryBlock* block,
5752  VkDeviceSize offset);
5753 
5754  void ChangeOffset(VkDeviceSize newOffset);
5755 
5756  // pMappedData not null means allocation is created with MAPPED flag.
5757  void InitDedicatedAllocation(
5758  uint32_t memoryTypeIndex,
5759  VkDeviceMemory hMemory,
5760  VmaSuballocationType suballocationType,
5761  void* pMappedData,
5762  VkDeviceSize size)
5763  {
5764  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5765  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5766  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5767  m_Alignment = 0;
5768  m_Size = size;
5769  m_MemoryTypeIndex = memoryTypeIndex;
5770  m_SuballocationType = (uint8_t)suballocationType;
5771  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5772  m_DedicatedAllocation.m_hMemory = hMemory;
5773  m_DedicatedAllocation.m_pMappedData = pMappedData;
5774  }
5775 
5776  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5777  VkDeviceSize GetAlignment() const { return m_Alignment; }
5778  VkDeviceSize GetSize() const { return m_Size; }
5779  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5780  void* GetUserData() const { return m_pUserData; }
5781  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5782  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5783 
5784  VmaDeviceMemoryBlock* GetBlock() const
5785  {
5786  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5787  return m_BlockAllocation.m_Block;
5788  }
5789  VkDeviceSize GetOffset() const;
5790  VkDeviceMemory GetMemory() const;
5791  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5792  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5793  void* GetMappedData() const;
5794  bool CanBecomeLost() const;
5795 
5796  uint32_t GetLastUseFrameIndex() const
5797  {
5798  return m_LastUseFrameIndex.load();
5799  }
5800  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5801  {
5802  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5803  }
5804  /*
5805  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5806  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5807  - Else, returns false.
5808 
5809  If hAllocation is already lost, assert - you should not call it then.
5810  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5811  */
5812  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5813 
5814  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5815  {
5816  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5817  outInfo.blockCount = 1;
5818  outInfo.allocationCount = 1;
5819  outInfo.unusedRangeCount = 0;
5820  outInfo.usedBytes = m_Size;
5821  outInfo.unusedBytes = 0;
5822  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5823  outInfo.unusedRangeSizeMin = UINT64_MAX;
5824  outInfo.unusedRangeSizeMax = 0;
5825  }
5826 
5827  void BlockAllocMap();
5828  void BlockAllocUnmap();
5829  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5830  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5831 
5832 #if VMA_STATS_STRING_ENABLED
5833  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5834  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5835 
5836  void InitBufferImageUsage(uint32_t bufferImageUsage)
5837  {
5838  VMA_ASSERT(m_BufferImageUsage == 0);
5839  m_BufferImageUsage = bufferImageUsage;
5840  }
5841 
5842  void PrintParameters(class VmaJsonWriter& json) const;
5843 #endif
5844 
5845 private:
5846  VkDeviceSize m_Alignment;
5847  VkDeviceSize m_Size;
5848  void* m_pUserData;
5849  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5850  uint32_t m_MemoryTypeIndex;
5851  uint8_t m_Type; // ALLOCATION_TYPE
5852  uint8_t m_SuballocationType; // VmaSuballocationType
5853  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5854  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5855  uint8_t m_MapCount;
5856  uint8_t m_Flags; // enum FLAGS
5857 
5858  // Allocation out of VmaDeviceMemoryBlock.
5859  struct BlockAllocation
5860  {
5861  VmaDeviceMemoryBlock* m_Block;
5862  VkDeviceSize m_Offset;
5863  bool m_CanBecomeLost;
5864  };
5865 
5866  // Allocation for an object that has its own private VkDeviceMemory.
5867  struct DedicatedAllocation
5868  {
5869  VkDeviceMemory m_hMemory;
5870  void* m_pMappedData; // Not null means memory is mapped.
5871  };
5872 
5873  union
5874  {
5875  // Allocation out of VmaDeviceMemoryBlock.
5876  BlockAllocation m_BlockAllocation;
5877  // Allocation for an object that has its own private VkDeviceMemory.
5878  DedicatedAllocation m_DedicatedAllocation;
5879  };
5880 
5881 #if VMA_STATS_STRING_ENABLED
5882  uint32_t m_CreationFrameIndex;
5883  uint32_t m_BufferImageUsage; // 0 if unknown.
5884 #endif
5885 
5886  void FreeUserDataString(VmaAllocator hAllocator);
5887 };
5888 
5889 /*
5890 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5891 allocated memory block or free.
5892 */
5893 struct VmaSuballocation
5894 {
5895  VkDeviceSize offset;
5896  VkDeviceSize size;
5897  VmaAllocation hAllocation;
5898  VmaSuballocationType type;
5899 };
5900 
5901 // Comparator for offsets.
5902 struct VmaSuballocationOffsetLess
5903 {
5904  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5905  {
5906  return lhs.offset < rhs.offset;
5907  }
5908 };
5909 struct VmaSuballocationOffsetGreater
5910 {
5911  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5912  {
5913  return lhs.offset > rhs.offset;
5914  }
5915 };
5916 
5917 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5918 
5919 // Cost of one additional allocation lost, as equivalent in bytes.
5920 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5921 
5922 enum class VmaAllocationRequestType
5923 {
5924  Normal,
5925  // Used by "Linear" algorithm.
5926  UpperAddress,
5927  EndOf1st,
5928  EndOf2nd,
5929 };
5930 
5931 /*
5932 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5933 
5934 If canMakeOtherLost was false:
5935 - item points to a FREE suballocation.
5936 - itemsToMakeLostCount is 0.
5937 
5938 If canMakeOtherLost was true:
5939 - item points to first of sequence of suballocations, which are either FREE,
5940  or point to VmaAllocations that can become lost.
5941 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5942  the requested allocation to succeed.
5943 */
5944 struct VmaAllocationRequest
5945 {
5946  VkDeviceSize offset;
5947  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5948  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5949  VmaSuballocationList::iterator item;
5950  size_t itemsToMakeLostCount;
5951  void* customData;
5952  VmaAllocationRequestType type;
5953 
5954  VkDeviceSize CalcCost() const
5955  {
5956  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5957  }
5958 };
5959 
5960 /*
5961 Data structure used for bookkeeping of allocations and unused ranges of memory
5962 in a single VkDeviceMemory block.
5963 */
5964 class VmaBlockMetadata
5965 {
5966 public:
5967  VmaBlockMetadata(VmaAllocator hAllocator);
5968  virtual ~VmaBlockMetadata() { }
5969  virtual void Init(VkDeviceSize size) { m_Size = size; }
5970 
5971  // Validates all data structures inside this object. If not valid, returns false.
5972  virtual bool Validate() const = 0;
5973  VkDeviceSize GetSize() const { return m_Size; }
5974  virtual size_t GetAllocationCount() const = 0;
5975  virtual VkDeviceSize GetSumFreeSize() const = 0;
5976  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5977  // Returns true if this block is empty - contains only single free suballocation.
5978  virtual bool IsEmpty() const = 0;
5979 
5980  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5981  // Shouldn't modify blockCount.
5982  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5983 
5984 #if VMA_STATS_STRING_ENABLED
5985  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5986 #endif
5987 
5988  // Tries to find a place for suballocation with given parameters inside this block.
5989  // If succeeded, fills pAllocationRequest and returns true.
5990  // If failed, returns false.
5991  virtual bool CreateAllocationRequest(
5992  uint32_t currentFrameIndex,
5993  uint32_t frameInUseCount,
5994  VkDeviceSize bufferImageGranularity,
5995  VkDeviceSize allocSize,
5996  VkDeviceSize allocAlignment,
5997  bool upperAddress,
5998  VmaSuballocationType allocType,
5999  bool canMakeOtherLost,
6000  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6001  uint32_t strategy,
6002  VmaAllocationRequest* pAllocationRequest) = 0;
6003 
6004  virtual bool MakeRequestedAllocationsLost(
6005  uint32_t currentFrameIndex,
6006  uint32_t frameInUseCount,
6007  VmaAllocationRequest* pAllocationRequest) = 0;
6008 
6009  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6010 
6011  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6012 
6013  // Makes actual allocation based on request. Request must already be checked and valid.
6014  virtual void Alloc(
6015  const VmaAllocationRequest& request,
6016  VmaSuballocationType type,
6017  VkDeviceSize allocSize,
6018  VmaAllocation hAllocation) = 0;
6019 
6020  // Frees suballocation assigned to given memory region.
6021  virtual void Free(const VmaAllocation allocation) = 0;
6022  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6023 
6024 protected:
6025  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6026 
6027 #if VMA_STATS_STRING_ENABLED
6028  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6029  VkDeviceSize unusedBytes,
6030  size_t allocationCount,
6031  size_t unusedRangeCount) const;
6032  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6033  VkDeviceSize offset,
6034  VmaAllocation hAllocation) const;
6035  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6036  VkDeviceSize offset,
6037  VkDeviceSize size) const;
6038  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6039 #endif
6040 
6041 private:
6042  VkDeviceSize m_Size;
6043  const VkAllocationCallbacks* m_pAllocationCallbacks;
6044 };
6045 
6046 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6047  VMA_ASSERT(0 && "Validation failed: " #cond); \
6048  return false; \
6049  } } while(false)
6050 
6051 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6052 {
6053  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6054 public:
6055  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6056  virtual ~VmaBlockMetadata_Generic();
6057  virtual void Init(VkDeviceSize size);
6058 
6059  virtual bool Validate() const;
6060  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6061  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6062  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6063  virtual bool IsEmpty() const;
6064 
6065  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6066  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6067 
6068 #if VMA_STATS_STRING_ENABLED
6069  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6070 #endif
6071 
6072  virtual bool CreateAllocationRequest(
6073  uint32_t currentFrameIndex,
6074  uint32_t frameInUseCount,
6075  VkDeviceSize bufferImageGranularity,
6076  VkDeviceSize allocSize,
6077  VkDeviceSize allocAlignment,
6078  bool upperAddress,
6079  VmaSuballocationType allocType,
6080  bool canMakeOtherLost,
6081  uint32_t strategy,
6082  VmaAllocationRequest* pAllocationRequest);
6083 
6084  virtual bool MakeRequestedAllocationsLost(
6085  uint32_t currentFrameIndex,
6086  uint32_t frameInUseCount,
6087  VmaAllocationRequest* pAllocationRequest);
6088 
6089  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6090 
6091  virtual VkResult CheckCorruption(const void* pBlockData);
6092 
6093  virtual void Alloc(
6094  const VmaAllocationRequest& request,
6095  VmaSuballocationType type,
6096  VkDeviceSize allocSize,
6097  VmaAllocation hAllocation);
6098 
6099  virtual void Free(const VmaAllocation allocation);
6100  virtual void FreeAtOffset(VkDeviceSize offset);
6101 
6103  // For defragmentation
6104 
6105  bool IsBufferImageGranularityConflictPossible(
6106  VkDeviceSize bufferImageGranularity,
6107  VmaSuballocationType& inOutPrevSuballocType) const;
6108 
6109 private:
6110  friend class VmaDefragmentationAlgorithm_Generic;
6111  friend class VmaDefragmentationAlgorithm_Fast;
6112 
6113  uint32_t m_FreeCount;
6114  VkDeviceSize m_SumFreeSize;
6115  VmaSuballocationList m_Suballocations;
6116  // Suballocations that are free and have size greater than certain threshold.
6117  // Sorted by size, ascending.
6118  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6119 
6120  bool ValidateFreeSuballocationList() const;
6121 
6122  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6123  // If yes, fills pOffset and returns true. If no, returns false.
6124  bool CheckAllocation(
6125  uint32_t currentFrameIndex,
6126  uint32_t frameInUseCount,
6127  VkDeviceSize bufferImageGranularity,
6128  VkDeviceSize allocSize,
6129  VkDeviceSize allocAlignment,
6130  VmaSuballocationType allocType,
6131  VmaSuballocationList::const_iterator suballocItem,
6132  bool canMakeOtherLost,
6133  VkDeviceSize* pOffset,
6134  size_t* itemsToMakeLostCount,
6135  VkDeviceSize* pSumFreeSize,
6136  VkDeviceSize* pSumItemSize) const;
6137  // Given free suballocation, it merges it with following one, which must also be free.
6138  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6139  // Releases given suballocation, making it free.
6140  // Merges it with adjacent free suballocations if applicable.
6141  // Returns iterator to new free suballocation at this place.
6142  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6143  // Given free suballocation, it inserts it into sorted list of
6144  // m_FreeSuballocationsBySize if it's suitable.
6145  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6146  // Given free suballocation, it removes it from sorted list of
6147  // m_FreeSuballocationsBySize if it's suitable.
6148  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6149 };
6150 
6151 /*
6152 Allocations and their references in internal data structure look like this:
6153 
6154 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6155 
6156  0 +-------+
6157  | |
6158  | |
6159  | |
6160  +-------+
6161  | Alloc | 1st[m_1stNullItemsBeginCount]
6162  +-------+
6163  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6164  +-------+
6165  | ... |
6166  +-------+
6167  | Alloc | 1st[1st.size() - 1]
6168  +-------+
6169  | |
6170  | |
6171  | |
6172 GetSize() +-------+
6173 
6174 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6175 
6176  0 +-------+
6177  | Alloc | 2nd[0]
6178  +-------+
6179  | Alloc | 2nd[1]
6180  +-------+
6181  | ... |
6182  +-------+
6183  | Alloc | 2nd[2nd.size() - 1]
6184  +-------+
6185  | |
6186  | |
6187  | |
6188  +-------+
6189  | Alloc | 1st[m_1stNullItemsBeginCount]
6190  +-------+
6191  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6192  +-------+
6193  | ... |
6194  +-------+
6195  | Alloc | 1st[1st.size() - 1]
6196  +-------+
6197  | |
6198 GetSize() +-------+
6199 
6200 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6201 
6202  0 +-------+
6203  | |
6204  | |
6205  | |
6206  +-------+
6207  | Alloc | 1st[m_1stNullItemsBeginCount]
6208  +-------+
6209  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6210  +-------+
6211  | ... |
6212  +-------+
6213  | Alloc | 1st[1st.size() - 1]
6214  +-------+
6215  | |
6216  | |
6217  | |
6218  +-------+
6219  | Alloc | 2nd[2nd.size() - 1]
6220  +-------+
6221  | ... |
6222  +-------+
6223  | Alloc | 2nd[1]
6224  +-------+
6225  | Alloc | 2nd[0]
6226 GetSize() +-------+
6227 
6228 */
6229 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6230 {
6231  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6232 public:
6233  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6234  virtual ~VmaBlockMetadata_Linear();
6235  virtual void Init(VkDeviceSize size);
6236 
6237  virtual bool Validate() const;
6238  virtual size_t GetAllocationCount() const;
6239  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6240  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6241  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6242 
6243  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6244  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6245 
6246 #if VMA_STATS_STRING_ENABLED
6247  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6248 #endif
6249 
6250  virtual bool CreateAllocationRequest(
6251  uint32_t currentFrameIndex,
6252  uint32_t frameInUseCount,
6253  VkDeviceSize bufferImageGranularity,
6254  VkDeviceSize allocSize,
6255  VkDeviceSize allocAlignment,
6256  bool upperAddress,
6257  VmaSuballocationType allocType,
6258  bool canMakeOtherLost,
6259  uint32_t strategy,
6260  VmaAllocationRequest* pAllocationRequest);
6261 
6262  virtual bool MakeRequestedAllocationsLost(
6263  uint32_t currentFrameIndex,
6264  uint32_t frameInUseCount,
6265  VmaAllocationRequest* pAllocationRequest);
6266 
6267  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6268 
6269  virtual VkResult CheckCorruption(const void* pBlockData);
6270 
6271  virtual void Alloc(
6272  const VmaAllocationRequest& request,
6273  VmaSuballocationType type,
6274  VkDeviceSize allocSize,
6275  VmaAllocation hAllocation);
6276 
6277  virtual void Free(const VmaAllocation allocation);
6278  virtual void FreeAtOffset(VkDeviceSize offset);
6279 
6280 private:
6281  /*
6282  There are two suballocation vectors, used in ping-pong way.
6283  The one with index m_1stVectorIndex is called 1st.
6284  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6285  2nd can be non-empty only when 1st is not empty.
6286  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6287  */
6288  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6289 
6290  enum SECOND_VECTOR_MODE
6291  {
6292  SECOND_VECTOR_EMPTY,
6293  /*
6294  Suballocations in 2nd vector are created later than the ones in 1st, but they
6295  all have smaller offset.
6296  */
6297  SECOND_VECTOR_RING_BUFFER,
6298  /*
6299  Suballocations in 2nd vector are upper side of double stack.
6300  They all have offsets higher than those in 1st vector.
6301  Top of this stack means smaller offsets, but higher indices in this vector.
6302  */
6303  SECOND_VECTOR_DOUBLE_STACK,
6304  };
6305 
6306  VkDeviceSize m_SumFreeSize;
6307  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6308  uint32_t m_1stVectorIndex;
6309  SECOND_VECTOR_MODE m_2ndVectorMode;
6310 
6311  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6312  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6313  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6314  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6315 
6316  // Number of items in 1st vector with hAllocation = null at the beginning.
6317  size_t m_1stNullItemsBeginCount;
6318  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6319  size_t m_1stNullItemsMiddleCount;
6320  // Number of items in 2nd vector with hAllocation = null.
6321  size_t m_2ndNullItemsCount;
6322 
6323  bool ShouldCompact1st() const;
6324  void CleanupAfterFree();
6325 
6326  bool CreateAllocationRequest_LowerAddress(
6327  uint32_t currentFrameIndex,
6328  uint32_t frameInUseCount,
6329  VkDeviceSize bufferImageGranularity,
6330  VkDeviceSize allocSize,
6331  VkDeviceSize allocAlignment,
6332  VmaSuballocationType allocType,
6333  bool canMakeOtherLost,
6334  uint32_t strategy,
6335  VmaAllocationRequest* pAllocationRequest);
6336  bool CreateAllocationRequest_UpperAddress(
6337  uint32_t currentFrameIndex,
6338  uint32_t frameInUseCount,
6339  VkDeviceSize bufferImageGranularity,
6340  VkDeviceSize allocSize,
6341  VkDeviceSize allocAlignment,
6342  VmaSuballocationType allocType,
6343  bool canMakeOtherLost,
6344  uint32_t strategy,
6345  VmaAllocationRequest* pAllocationRequest);
6346 };
6347 
6348 /*
6349 - GetSize() is the original size of allocated memory block.
6350 - m_UsableSize is this size aligned down to a power of two.
6351  All allocations and calculations happen relative to m_UsableSize.
6352 - GetUnusableSize() is the difference between them.
6353  It is repoted as separate, unused range, not available for allocations.
6354 
6355 Node at level 0 has size = m_UsableSize.
6356 Each next level contains nodes with size 2 times smaller than current level.
6357 m_LevelCount is the maximum number of levels to use in the current object.
6358 */
6359 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6360 {
6361  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6362 public:
6363  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6364  virtual ~VmaBlockMetadata_Buddy();
6365  virtual void Init(VkDeviceSize size);
6366 
6367  virtual bool Validate() const;
6368  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6369  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6370  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6371  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6372 
6373  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6374  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6375 
6376 #if VMA_STATS_STRING_ENABLED
6377  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6378 #endif
6379 
6380  virtual bool CreateAllocationRequest(
6381  uint32_t currentFrameIndex,
6382  uint32_t frameInUseCount,
6383  VkDeviceSize bufferImageGranularity,
6384  VkDeviceSize allocSize,
6385  VkDeviceSize allocAlignment,
6386  bool upperAddress,
6387  VmaSuballocationType allocType,
6388  bool canMakeOtherLost,
6389  uint32_t strategy,
6390  VmaAllocationRequest* pAllocationRequest);
6391 
6392  virtual bool MakeRequestedAllocationsLost(
6393  uint32_t currentFrameIndex,
6394  uint32_t frameInUseCount,
6395  VmaAllocationRequest* pAllocationRequest);
6396 
6397  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6398 
6399  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6400 
6401  virtual void Alloc(
6402  const VmaAllocationRequest& request,
6403  VmaSuballocationType type,
6404  VkDeviceSize allocSize,
6405  VmaAllocation hAllocation);
6406 
6407  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6408  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6409 
6410 private:
6411  static const VkDeviceSize MIN_NODE_SIZE = 32;
6412  static const size_t MAX_LEVELS = 30;
6413 
6414  struct ValidationContext
6415  {
6416  size_t calculatedAllocationCount;
6417  size_t calculatedFreeCount;
6418  VkDeviceSize calculatedSumFreeSize;
6419 
6420  ValidationContext() :
6421  calculatedAllocationCount(0),
6422  calculatedFreeCount(0),
6423  calculatedSumFreeSize(0) { }
6424  };
6425 
6426  struct Node
6427  {
6428  VkDeviceSize offset;
6429  enum TYPE
6430  {
6431  TYPE_FREE,
6432  TYPE_ALLOCATION,
6433  TYPE_SPLIT,
6434  TYPE_COUNT
6435  } type;
6436  Node* parent;
6437  Node* buddy;
6438 
6439  union
6440  {
6441  struct
6442  {
6443  Node* prev;
6444  Node* next;
6445  } free;
6446  struct
6447  {
6448  VmaAllocation alloc;
6449  } allocation;
6450  struct
6451  {
6452  Node* leftChild;
6453  } split;
6454  };
6455  };
6456 
6457  // Size of the memory block aligned down to a power of two.
6458  VkDeviceSize m_UsableSize;
6459  uint32_t m_LevelCount;
6460 
6461  Node* m_Root;
6462  struct {
6463  Node* front;
6464  Node* back;
6465  } m_FreeList[MAX_LEVELS];
6466  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6467  size_t m_AllocationCount;
6468  // Number of nodes in the tree with type == TYPE_FREE.
6469  size_t m_FreeCount;
6470  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6471  VkDeviceSize m_SumFreeSize;
6472 
6473  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6474  void DeleteNode(Node* node);
6475  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6476  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6477  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6478  // Alloc passed just for validation. Can be null.
6479  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6480  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6481  // Adds node to the front of FreeList at given level.
6482  // node->type must be FREE.
6483  // node->free.prev, next can be undefined.
6484  void AddToFreeListFront(uint32_t level, Node* node);
6485  // Removes node from FreeList at given level.
6486  // node->type must be FREE.
6487  // node->free.prev, next stay untouched.
6488  void RemoveFromFreeList(uint32_t level, Node* node);
6489 
6490 #if VMA_STATS_STRING_ENABLED
6491  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6492 #endif
6493 };
6494 
6495 /*
6496 Represents a single block of device memory (`VkDeviceMemory`) with all the
6497 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6498 
6499 Thread-safety: This class must be externally synchronized.
6500 */
6501 class VmaDeviceMemoryBlock
6502 {
6503  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6504 public:
6505  VmaBlockMetadata* m_pMetadata;
6506 
6507  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6508 
6509  ~VmaDeviceMemoryBlock()
6510  {
6511  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6512  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6513  }
6514 
6515  // Always call after construction.
6516  void Init(
6517  VmaAllocator hAllocator,
6518  VmaPool hParentPool,
6519  uint32_t newMemoryTypeIndex,
6520  VkDeviceMemory newMemory,
6521  VkDeviceSize newSize,
6522  uint32_t id,
6523  uint32_t algorithm);
6524  // Always call before destruction.
6525  void Destroy(VmaAllocator allocator);
6526 
6527  VmaPool GetParentPool() const { return m_hParentPool; }
6528  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6529  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6530  uint32_t GetId() const { return m_Id; }
6531  void* GetMappedData() const { return m_pMappedData; }
6532 
6533  // Validates all data structures inside this object. If not valid, returns false.
6534  bool Validate() const;
6535 
6536  VkResult CheckCorruption(VmaAllocator hAllocator);
6537 
6538  // ppData can be null.
6539  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6540  void Unmap(VmaAllocator hAllocator, uint32_t count);
6541 
6542  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6543  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6544 
6545  VkResult BindBufferMemory(
6546  const VmaAllocator hAllocator,
6547  const VmaAllocation hAllocation,
6548  VkDeviceSize allocationLocalOffset,
6549  VkBuffer hBuffer,
6550  const void* pNext);
6551  VkResult BindImageMemory(
6552  const VmaAllocator hAllocator,
6553  const VmaAllocation hAllocation,
6554  VkDeviceSize allocationLocalOffset,
6555  VkImage hImage,
6556  const void* pNext);
6557 
6558 private:
6559  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6560  uint32_t m_MemoryTypeIndex;
6561  uint32_t m_Id;
6562  VkDeviceMemory m_hMemory;
6563 
6564  /*
6565  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6566  Also protects m_MapCount, m_pMappedData.
6567  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6568  */
6569  VMA_MUTEX m_Mutex;
6570  uint32_t m_MapCount;
6571  void* m_pMappedData;
6572 };
6573 
6574 struct VmaPointerLess
6575 {
6576  bool operator()(const void* lhs, const void* rhs) const
6577  {
6578  return lhs < rhs;
6579  }
6580 };
6581 
6582 struct VmaDefragmentationMove
6583 {
6584  size_t srcBlockIndex;
6585  size_t dstBlockIndex;
6586  VkDeviceSize srcOffset;
6587  VkDeviceSize dstOffset;
6588  VkDeviceSize size;
6589  VmaAllocation hAllocation;
6590  VmaDeviceMemoryBlock* pSrcBlock;
6591  VmaDeviceMemoryBlock* pDstBlock;
6592 };
6593 
6594 class VmaDefragmentationAlgorithm;
6595 
6596 /*
6597 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6598 Vulkan memory type.
6599 
6600 Synchronized internally with a mutex.
6601 */
6602 struct VmaBlockVector
6603 {
6604  VMA_CLASS_NO_COPY(VmaBlockVector)
6605 public:
6606  VmaBlockVector(
6607  VmaAllocator hAllocator,
6608  VmaPool hParentPool,
6609  uint32_t memoryTypeIndex,
6610  VkDeviceSize preferredBlockSize,
6611  size_t minBlockCount,
6612  size_t maxBlockCount,
6613  VkDeviceSize bufferImageGranularity,
6614  uint32_t frameInUseCount,
6615  bool explicitBlockSize,
6616  uint32_t algorithm);
6617  ~VmaBlockVector();
6618 
6619  VkResult CreateMinBlocks();
6620 
6621  VmaAllocator GetAllocator() const { return m_hAllocator; }
6622  VmaPool GetParentPool() const { return m_hParentPool; }
6623  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6624  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6625  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6626  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6627  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6628  uint32_t GetAlgorithm() const { return m_Algorithm; }
6629 
6630  void GetPoolStats(VmaPoolStats* pStats);
6631 
6632  bool IsEmpty();
6633  bool IsCorruptionDetectionEnabled() const;
6634 
6635  VkResult Allocate(
6636  uint32_t currentFrameIndex,
6637  VkDeviceSize size,
6638  VkDeviceSize alignment,
6639  const VmaAllocationCreateInfo& createInfo,
6640  VmaSuballocationType suballocType,
6641  size_t allocationCount,
6642  VmaAllocation* pAllocations);
6643 
6644  void Free(const VmaAllocation hAllocation);
6645 
6646  // Adds statistics of this BlockVector to pStats.
6647  void AddStats(VmaStats* pStats);
6648 
6649 #if VMA_STATS_STRING_ENABLED
6650  void PrintDetailedMap(class VmaJsonWriter& json);
6651 #endif
6652 
6653  void MakePoolAllocationsLost(
6654  uint32_t currentFrameIndex,
6655  size_t* pLostAllocationCount);
6656  VkResult CheckCorruption();
6657 
6658  // Saves results in pCtx->res.
6659  void Defragment(
6660  class VmaBlockVectorDefragmentationContext* pCtx,
6662  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6663  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6664  VkCommandBuffer commandBuffer);
6665  void DefragmentationEnd(
6666  class VmaBlockVectorDefragmentationContext* pCtx,
6667  uint32_t flags,
6668  VmaDefragmentationStats* pStats);
6669 
6670  uint32_t ProcessDefragmentations(
6671  class VmaBlockVectorDefragmentationContext *pCtx,
6672  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6673 
6674  void CommitDefragmentations(
6675  class VmaBlockVectorDefragmentationContext *pCtx,
6676  VmaDefragmentationStats* pStats);
6677 
6679  // To be used only while the m_Mutex is locked. Used during defragmentation.
6680 
6681  size_t GetBlockCount() const { return m_Blocks.size(); }
6682  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6683  size_t CalcAllocationCount() const;
6684  bool IsBufferImageGranularityConflictPossible() const;
6685 
6686 private:
6687  friend class VmaDefragmentationAlgorithm_Generic;
6688 
6689  const VmaAllocator m_hAllocator;
6690  const VmaPool m_hParentPool;
6691  const uint32_t m_MemoryTypeIndex;
6692  const VkDeviceSize m_PreferredBlockSize;
6693  const size_t m_MinBlockCount;
6694  const size_t m_MaxBlockCount;
6695  const VkDeviceSize m_BufferImageGranularity;
6696  const uint32_t m_FrameInUseCount;
6697  const bool m_ExplicitBlockSize;
6698  const uint32_t m_Algorithm;
6699  VMA_RW_MUTEX m_Mutex;
6700 
6701  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6702  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6703  bool m_HasEmptyBlock;
6704  // Incrementally sorted by sumFreeSize, ascending.
6705  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6706  uint32_t m_NextBlockId;
6707 
6708  VkDeviceSize CalcMaxBlockSize() const;
6709 
6710  // Finds and removes given block from vector.
6711  void Remove(VmaDeviceMemoryBlock* pBlock);
6712 
6713  // Performs single step in sorting m_Blocks. They may not be fully sorted
6714  // after this call.
6715  void IncrementallySortBlocks();
6716 
6717  VkResult AllocatePage(
6718  uint32_t currentFrameIndex,
6719  VkDeviceSize size,
6720  VkDeviceSize alignment,
6721  const VmaAllocationCreateInfo& createInfo,
6722  VmaSuballocationType suballocType,
6723  VmaAllocation* pAllocation);
6724 
6725  // To be used only without CAN_MAKE_OTHER_LOST flag.
6726  VkResult AllocateFromBlock(
6727  VmaDeviceMemoryBlock* pBlock,
6728  uint32_t currentFrameIndex,
6729  VkDeviceSize size,
6730  VkDeviceSize alignment,
6731  VmaAllocationCreateFlags allocFlags,
6732  void* pUserData,
6733  VmaSuballocationType suballocType,
6734  uint32_t strategy,
6735  VmaAllocation* pAllocation);
6736 
6737  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6738 
6739  // Saves result to pCtx->res.
6740  void ApplyDefragmentationMovesCpu(
6741  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6742  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6743  // Saves result to pCtx->res.
6744  void ApplyDefragmentationMovesGpu(
6745  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6746  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6747  VkCommandBuffer commandBuffer);
6748 
6749  /*
6750  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6751  - updated with new data.
6752  */
6753  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6754 
6755  void UpdateHasEmptyBlock();
6756 };
6757 
6758 struct VmaPool_T
6759 {
6760  VMA_CLASS_NO_COPY(VmaPool_T)
6761 public:
6762  VmaBlockVector m_BlockVector;
6763 
6764  VmaPool_T(
6765  VmaAllocator hAllocator,
6766  const VmaPoolCreateInfo& createInfo,
6767  VkDeviceSize preferredBlockSize);
6768  ~VmaPool_T();
6769 
6770  uint32_t GetId() const { return m_Id; }
6771  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6772 
6773  const char* GetName() const { return m_Name; }
6774  void SetName(const char* pName);
6775 
6776 #if VMA_STATS_STRING_ENABLED
6777  //void PrintDetailedMap(class VmaStringBuilder& sb);
6778 #endif
6779 
6780 private:
6781  uint32_t m_Id;
6782  char* m_Name;
6783 };
6784 
6785 /*
6786 Performs defragmentation:
6787 
6788 - Updates `pBlockVector->m_pMetadata`.
6789 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6790 - Does not move actual data, only returns requested moves as `moves`.
6791 */
6792 class VmaDefragmentationAlgorithm
6793 {
6794  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6795 public:
6796  VmaDefragmentationAlgorithm(
6797  VmaAllocator hAllocator,
6798  VmaBlockVector* pBlockVector,
6799  uint32_t currentFrameIndex) :
6800  m_hAllocator(hAllocator),
6801  m_pBlockVector(pBlockVector),
6802  m_CurrentFrameIndex(currentFrameIndex)
6803  {
6804  }
6805  virtual ~VmaDefragmentationAlgorithm()
6806  {
6807  }
6808 
6809  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6810  virtual void AddAll() = 0;
6811 
6812  virtual VkResult Defragment(
6813  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6814  VkDeviceSize maxBytesToMove,
6815  uint32_t maxAllocationsToMove,
6816  VmaDefragmentationFlags flags) = 0;
6817 
6818  virtual VkDeviceSize GetBytesMoved() const = 0;
6819  virtual uint32_t GetAllocationsMoved() const = 0;
6820 
6821 protected:
6822  VmaAllocator const m_hAllocator;
6823  VmaBlockVector* const m_pBlockVector;
6824  const uint32_t m_CurrentFrameIndex;
6825 
6826  struct AllocationInfo
6827  {
6828  VmaAllocation m_hAllocation;
6829  VkBool32* m_pChanged;
6830 
6831  AllocationInfo() :
6832  m_hAllocation(VK_NULL_HANDLE),
6833  m_pChanged(VMA_NULL)
6834  {
6835  }
6836  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6837  m_hAllocation(hAlloc),
6838  m_pChanged(pChanged)
6839  {
6840  }
6841  };
6842 };
6843 
6844 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6845 {
6846  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6847 public:
6848  VmaDefragmentationAlgorithm_Generic(
6849  VmaAllocator hAllocator,
6850  VmaBlockVector* pBlockVector,
6851  uint32_t currentFrameIndex,
6852  bool overlappingMoveSupported);
6853  virtual ~VmaDefragmentationAlgorithm_Generic();
6854 
6855  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6856  virtual void AddAll() { m_AllAllocations = true; }
6857 
6858  virtual VkResult Defragment(
6859  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6860  VkDeviceSize maxBytesToMove,
6861  uint32_t maxAllocationsToMove,
6862  VmaDefragmentationFlags flags);
6863 
6864  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6865  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6866 
6867 private:
6868  uint32_t m_AllocationCount;
6869  bool m_AllAllocations;
6870 
6871  VkDeviceSize m_BytesMoved;
6872  uint32_t m_AllocationsMoved;
6873 
6874  struct AllocationInfoSizeGreater
6875  {
6876  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6877  {
6878  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6879  }
6880  };
6881 
6882  struct AllocationInfoOffsetGreater
6883  {
6884  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6885  {
6886  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6887  }
6888  };
6889 
6890  struct BlockInfo
6891  {
6892  size_t m_OriginalBlockIndex;
6893  VmaDeviceMemoryBlock* m_pBlock;
6894  bool m_HasNonMovableAllocations;
6895  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6896 
6897  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6898  m_OriginalBlockIndex(SIZE_MAX),
6899  m_pBlock(VMA_NULL),
6900  m_HasNonMovableAllocations(true),
6901  m_Allocations(pAllocationCallbacks)
6902  {
6903  }
6904 
6905  void CalcHasNonMovableAllocations()
6906  {
6907  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6908  const size_t defragmentAllocCount = m_Allocations.size();
6909  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6910  }
6911 
6912  void SortAllocationsBySizeDescending()
6913  {
6914  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6915  }
6916 
6917  void SortAllocationsByOffsetDescending()
6918  {
6919  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6920  }
6921  };
6922 
6923  struct BlockPointerLess
6924  {
6925  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6926  {
6927  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6928  }
6929  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6930  {
6931  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6932  }
6933  };
6934 
6935  // 1. Blocks with some non-movable allocations go first.
6936  // 2. Blocks with smaller sumFreeSize go first.
6937  struct BlockInfoCompareMoveDestination
6938  {
6939  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6940  {
6941  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6942  {
6943  return true;
6944  }
6945  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6946  {
6947  return false;
6948  }
6949  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6950  {
6951  return true;
6952  }
6953  return false;
6954  }
6955  };
6956 
6957  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6958  BlockInfoVector m_Blocks;
6959 
6960  VkResult DefragmentRound(
6961  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6962  VkDeviceSize maxBytesToMove,
6963  uint32_t maxAllocationsToMove,
6964  bool freeOldAllocations);
6965 
6966  size_t CalcBlocksWithNonMovableCount() const;
6967 
6968  static bool MoveMakesSense(
6969  size_t dstBlockIndex, VkDeviceSize dstOffset,
6970  size_t srcBlockIndex, VkDeviceSize srcOffset);
6971 };
6972 
6973 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6974 {
6975  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6976 public:
6977  VmaDefragmentationAlgorithm_Fast(
6978  VmaAllocator hAllocator,
6979  VmaBlockVector* pBlockVector,
6980  uint32_t currentFrameIndex,
6981  bool overlappingMoveSupported);
6982  virtual ~VmaDefragmentationAlgorithm_Fast();
6983 
6984  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6985  virtual void AddAll() { m_AllAllocations = true; }
6986 
6987  virtual VkResult Defragment(
6988  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6989  VkDeviceSize maxBytesToMove,
6990  uint32_t maxAllocationsToMove,
6991  VmaDefragmentationFlags flags);
6992 
6993  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6994  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6995 
6996 private:
6997  struct BlockInfo
6998  {
6999  size_t origBlockIndex;
7000  };
7001 
7002  class FreeSpaceDatabase
7003  {
7004  public:
7005  FreeSpaceDatabase()
7006  {
7007  FreeSpace s = {};
7008  s.blockInfoIndex = SIZE_MAX;
7009  for(size_t i = 0; i < MAX_COUNT; ++i)
7010  {
7011  m_FreeSpaces[i] = s;
7012  }
7013  }
7014 
7015  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7016  {
7017  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7018  {
7019  return;
7020  }
7021 
7022  // Find first invalid or the smallest structure.
7023  size_t bestIndex = SIZE_MAX;
7024  for(size_t i = 0; i < MAX_COUNT; ++i)
7025  {
7026  // Empty structure.
7027  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7028  {
7029  bestIndex = i;
7030  break;
7031  }
7032  if(m_FreeSpaces[i].size < size &&
7033  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7034  {
7035  bestIndex = i;
7036  }
7037  }
7038 
7039  if(bestIndex != SIZE_MAX)
7040  {
7041  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7042  m_FreeSpaces[bestIndex].offset = offset;
7043  m_FreeSpaces[bestIndex].size = size;
7044  }
7045  }
7046 
7047  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7048  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7049  {
7050  size_t bestIndex = SIZE_MAX;
7051  VkDeviceSize bestFreeSpaceAfter = 0;
7052  for(size_t i = 0; i < MAX_COUNT; ++i)
7053  {
7054  // Structure is valid.
7055  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7056  {
7057  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7058  // Allocation fits into this structure.
7059  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7060  {
7061  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7062  (dstOffset + size);
7063  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7064  {
7065  bestIndex = i;
7066  bestFreeSpaceAfter = freeSpaceAfter;
7067  }
7068  }
7069  }
7070  }
7071 
7072  if(bestIndex != SIZE_MAX)
7073  {
7074  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7075  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7076 
7077  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7078  {
7079  // Leave this structure for remaining empty space.
7080  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7081  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7082  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7083  }
7084  else
7085  {
7086  // This structure becomes invalid.
7087  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7088  }
7089 
7090  return true;
7091  }
7092 
7093  return false;
7094  }
7095 
7096  private:
7097  static const size_t MAX_COUNT = 4;
7098 
7099  struct FreeSpace
7100  {
7101  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7102  VkDeviceSize offset;
7103  VkDeviceSize size;
7104  } m_FreeSpaces[MAX_COUNT];
7105  };
7106 
7107  const bool m_OverlappingMoveSupported;
7108 
7109  uint32_t m_AllocationCount;
7110  bool m_AllAllocations;
7111 
7112  VkDeviceSize m_BytesMoved;
7113  uint32_t m_AllocationsMoved;
7114 
7115  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7116 
7117  void PreprocessMetadata();
7118  void PostprocessMetadata();
7119  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7120 };
7121 
7122 struct VmaBlockDefragmentationContext
7123 {
7124  enum BLOCK_FLAG
7125  {
7126  BLOCK_FLAG_USED = 0x00000001,
7127  };
7128  uint32_t flags;
7129  VkBuffer hBuffer;
7130 };
7131 
7132 class VmaBlockVectorDefragmentationContext
7133 {
7134  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7135 public:
7136  VkResult res;
7137  bool mutexLocked;
7138  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7139  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7140  uint32_t defragmentationMovesProcessed;
7141  uint32_t defragmentationMovesCommitted;
7142  bool hasDefragmentationPlan;
7143 
7144  VmaBlockVectorDefragmentationContext(
7145  VmaAllocator hAllocator,
7146  VmaPool hCustomPool, // Optional.
7147  VmaBlockVector* pBlockVector,
7148  uint32_t currFrameIndex);
7149  ~VmaBlockVectorDefragmentationContext();
7150 
7151  VmaPool GetCustomPool() const { return m_hCustomPool; }
7152  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7153  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7154 
7155  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7156  void AddAll() { m_AllAllocations = true; }
7157 
7158  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7159 
7160 private:
7161  const VmaAllocator m_hAllocator;
7162  // Null if not from custom pool.
7163  const VmaPool m_hCustomPool;
7164  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7165  VmaBlockVector* const m_pBlockVector;
7166  const uint32_t m_CurrFrameIndex;
7167  // Owner of this object.
7168  VmaDefragmentationAlgorithm* m_pAlgorithm;
7169 
7170  struct AllocInfo
7171  {
7172  VmaAllocation hAlloc;
7173  VkBool32* pChanged;
7174  };
7175  // Used between constructor and Begin.
7176  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7177  bool m_AllAllocations;
7178 };
7179 
7180 struct VmaDefragmentationContext_T
7181 {
7182 private:
7183  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7184 public:
7185  VmaDefragmentationContext_T(
7186  VmaAllocator hAllocator,
7187  uint32_t currFrameIndex,
7188  uint32_t flags,
7189  VmaDefragmentationStats* pStats);
7190  ~VmaDefragmentationContext_T();
7191 
7192  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7193  void AddAllocations(
7194  uint32_t allocationCount,
7195  const VmaAllocation* pAllocations,
7196  VkBool32* pAllocationsChanged);
7197 
7198  /*
7199  Returns:
7200  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7201  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7202  - Negative value if error occured and object can be destroyed immediately.
7203  */
7204  VkResult Defragment(
7205  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7206  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7207  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7208 
7209  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7210  VkResult DefragmentPassEnd();
7211 
7212 private:
7213  const VmaAllocator m_hAllocator;
7214  const uint32_t m_CurrFrameIndex;
7215  const uint32_t m_Flags;
7216  VmaDefragmentationStats* const m_pStats;
7217 
7218  VkDeviceSize m_MaxCpuBytesToMove;
7219  uint32_t m_MaxCpuAllocationsToMove;
7220  VkDeviceSize m_MaxGpuBytesToMove;
7221  uint32_t m_MaxGpuAllocationsToMove;
7222 
7223  // Owner of these objects.
7224  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7225  // Owner of these objects.
7226  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7227 };
7228 
7229 #if VMA_RECORDING_ENABLED
7230 
7231 class VmaRecorder
7232 {
7233 public:
7234  VmaRecorder();
7235  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7236  void WriteConfiguration(
7237  const VkPhysicalDeviceProperties& devProps,
7238  const VkPhysicalDeviceMemoryProperties& memProps,
7239  uint32_t vulkanApiVersion,
7240  bool dedicatedAllocationExtensionEnabled,
7241  bool bindMemory2ExtensionEnabled,
7242  bool memoryBudgetExtensionEnabled,
7243  bool deviceCoherentMemoryExtensionEnabled);
7244  ~VmaRecorder();
7245 
7246  void RecordCreateAllocator(uint32_t frameIndex);
7247  void RecordDestroyAllocator(uint32_t frameIndex);
7248  void RecordCreatePool(uint32_t frameIndex,
7249  const VmaPoolCreateInfo& createInfo,
7250  VmaPool pool);
7251  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7252  void RecordAllocateMemory(uint32_t frameIndex,
7253  const VkMemoryRequirements& vkMemReq,
7254  const VmaAllocationCreateInfo& createInfo,
7255  VmaAllocation allocation);
7256  void RecordAllocateMemoryPages(uint32_t frameIndex,
7257  const VkMemoryRequirements& vkMemReq,
7258  const VmaAllocationCreateInfo& createInfo,
7259  uint64_t allocationCount,
7260  const VmaAllocation* pAllocations);
7261  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7262  const VkMemoryRequirements& vkMemReq,
7263  bool requiresDedicatedAllocation,
7264  bool prefersDedicatedAllocation,
7265  const VmaAllocationCreateInfo& createInfo,
7266  VmaAllocation allocation);
7267  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7268  const VkMemoryRequirements& vkMemReq,
7269  bool requiresDedicatedAllocation,
7270  bool prefersDedicatedAllocation,
7271  const VmaAllocationCreateInfo& createInfo,
7272  VmaAllocation allocation);
7273  void RecordFreeMemory(uint32_t frameIndex,
7274  VmaAllocation allocation);
7275  void RecordFreeMemoryPages(uint32_t frameIndex,
7276  uint64_t allocationCount,
7277  const VmaAllocation* pAllocations);
7278  void RecordSetAllocationUserData(uint32_t frameIndex,
7279  VmaAllocation allocation,
7280  const void* pUserData);
7281  void RecordCreateLostAllocation(uint32_t frameIndex,
7282  VmaAllocation allocation);
7283  void RecordMapMemory(uint32_t frameIndex,
7284  VmaAllocation allocation);
7285  void RecordUnmapMemory(uint32_t frameIndex,
7286  VmaAllocation allocation);
7287  void RecordFlushAllocation(uint32_t frameIndex,
7288  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7289  void RecordInvalidateAllocation(uint32_t frameIndex,
7290  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7291  void RecordCreateBuffer(uint32_t frameIndex,
7292  const VkBufferCreateInfo& bufCreateInfo,
7293  const VmaAllocationCreateInfo& allocCreateInfo,
7294  VmaAllocation allocation);
7295  void RecordCreateImage(uint32_t frameIndex,
7296  const VkImageCreateInfo& imageCreateInfo,
7297  const VmaAllocationCreateInfo& allocCreateInfo,
7298  VmaAllocation allocation);
7299  void RecordDestroyBuffer(uint32_t frameIndex,
7300  VmaAllocation allocation);
7301  void RecordDestroyImage(uint32_t frameIndex,
7302  VmaAllocation allocation);
7303  void RecordTouchAllocation(uint32_t frameIndex,
7304  VmaAllocation allocation);
7305  void RecordGetAllocationInfo(uint32_t frameIndex,
7306  VmaAllocation allocation);
7307  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7308  VmaPool pool);
7309  void RecordDefragmentationBegin(uint32_t frameIndex,
7310  const VmaDefragmentationInfo2& info,
7312  void RecordDefragmentationEnd(uint32_t frameIndex,
7314  void RecordSetPoolName(uint32_t frameIndex,
7315  VmaPool pool,
7316  const char* name);
7317 
7318 private:
7319  struct CallParams
7320  {
7321  uint32_t threadId;
7322  double time;
7323  };
7324 
7325  class UserDataString
7326  {
7327  public:
7328  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7329  const char* GetString() const { return m_Str; }
7330 
7331  private:
7332  char m_PtrStr[17];
7333  const char* m_Str;
7334  };
7335 
7336  bool m_UseMutex;
7337  VmaRecordFlags m_Flags;
7338  FILE* m_File;
7339  VMA_MUTEX m_FileMutex;
7340  int64_t m_Freq;
7341  int64_t m_StartCounter;
7342 
7343  void GetBasicParams(CallParams& outParams);
7344 
7345  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7346  template<typename T>
7347  void PrintPointerList(uint64_t count, const T* pItems)
7348  {
7349  if(count)
7350  {
7351  fprintf(m_File, "%p", pItems[0]);
7352  for(uint64_t i = 1; i < count; ++i)
7353  {
7354  fprintf(m_File, " %p", pItems[i]);
7355  }
7356  }
7357  }
7358 
7359  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7360  void Flush();
7361 };
7362 
7363 #endif // #if VMA_RECORDING_ENABLED
7364 
7365 /*
7366 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7367 */
7368 class VmaAllocationObjectAllocator
7369 {
7370  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7371 public:
7372  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7373 
7374  template<typename... Types> VmaAllocation Allocate(Types... args);
7375  void Free(VmaAllocation hAlloc);
7376 
7377 private:
7378  VMA_MUTEX m_Mutex;
7379  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7380 };
7381 
7382 struct VmaCurrentBudgetData
7383 {
7384  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7385  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7386 
7387 #if VMA_MEMORY_BUDGET
7388  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7389  VMA_RW_MUTEX m_BudgetMutex;
7390  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7391  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7392  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7393 #endif // #if VMA_MEMORY_BUDGET
7394 
7395  VmaCurrentBudgetData()
7396  {
7397  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7398  {
7399  m_BlockBytes[heapIndex] = 0;
7400  m_AllocationBytes[heapIndex] = 0;
7401 #if VMA_MEMORY_BUDGET
7402  m_VulkanUsage[heapIndex] = 0;
7403  m_VulkanBudget[heapIndex] = 0;
7404  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7405 #endif
7406  }
7407 
7408 #if VMA_MEMORY_BUDGET
7409  m_OperationsSinceBudgetFetch = 0;
7410 #endif
7411  }
7412 
7413  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7414  {
7415  m_AllocationBytes[heapIndex] += allocationSize;
7416 #if VMA_MEMORY_BUDGET
7417  ++m_OperationsSinceBudgetFetch;
7418 #endif
7419  }
7420 
7421  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7422  {
7423  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7424  m_AllocationBytes[heapIndex] -= allocationSize;
7425 #if VMA_MEMORY_BUDGET
7426  ++m_OperationsSinceBudgetFetch;
7427 #endif
7428  }
7429 };
7430 
7431 // Main allocator object.
7432 struct VmaAllocator_T
7433 {
7434  VMA_CLASS_NO_COPY(VmaAllocator_T)
7435 public:
7436  bool m_UseMutex;
7437  uint32_t m_VulkanApiVersion;
7438  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7439  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7440  bool m_UseExtMemoryBudget;
7441  bool m_UseAmdDeviceCoherentMemory;
7442  bool m_UseKhrBufferDeviceAddress;
7443  VkDevice m_hDevice;
7444  VkInstance m_hInstance;
7445  bool m_AllocationCallbacksSpecified;
7446  VkAllocationCallbacks m_AllocationCallbacks;
7447  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7448  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7449 
7450  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7451  uint32_t m_HeapSizeLimitMask;
7452 
7453  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7454  VkPhysicalDeviceMemoryProperties m_MemProps;
7455 
7456  // Default pools.
7457  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7458 
7459  // Each vector is sorted by memory (handle value).
7460  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7461  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7462  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7463 
7464  VmaCurrentBudgetData m_Budget;
7465 
7466  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7467  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7468  ~VmaAllocator_T();
7469 
7470  const VkAllocationCallbacks* GetAllocationCallbacks() const
7471  {
7472  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7473  }
7474  const VmaVulkanFunctions& GetVulkanFunctions() const
7475  {
7476  return m_VulkanFunctions;
7477  }
7478 
7479  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7480 
7481  VkDeviceSize GetBufferImageGranularity() const
7482  {
7483  return VMA_MAX(
7484  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7485  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7486  }
7487 
7488  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7489  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7490 
7491  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7492  {
7493  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7494  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7495  }
7496  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7497  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7498  {
7499  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7500  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7501  }
7502  // Minimum alignment for all allocations in specific memory type.
7503  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7504  {
7505  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7506  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7507  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7508  }
7509 
7510  bool IsIntegratedGpu() const
7511  {
7512  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7513  }
7514 
7515  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7516 
7517 #if VMA_RECORDING_ENABLED
7518  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7519 #endif
7520 
7521  void GetBufferMemoryRequirements(
7522  VkBuffer hBuffer,
7523  VkMemoryRequirements& memReq,
7524  bool& requiresDedicatedAllocation,
7525  bool& prefersDedicatedAllocation) const;
7526  void GetImageMemoryRequirements(
7527  VkImage hImage,
7528  VkMemoryRequirements& memReq,
7529  bool& requiresDedicatedAllocation,
7530  bool& prefersDedicatedAllocation) const;
7531 
7532  // Main allocation function.
7533  VkResult AllocateMemory(
7534  const VkMemoryRequirements& vkMemReq,
7535  bool requiresDedicatedAllocation,
7536  bool prefersDedicatedAllocation,
7537  VkBuffer dedicatedBuffer,
7538  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7539  VkImage dedicatedImage,
7540  const VmaAllocationCreateInfo& createInfo,
7541  VmaSuballocationType suballocType,
7542  size_t allocationCount,
7543  VmaAllocation* pAllocations);
7544 
7545  // Main deallocation function.
7546  void FreeMemory(
7547  size_t allocationCount,
7548  const VmaAllocation* pAllocations);
7549 
7550  VkResult ResizeAllocation(
7551  const VmaAllocation alloc,
7552  VkDeviceSize newSize);
7553 
7554  void CalculateStats(VmaStats* pStats);
7555 
7556  void GetBudget(
7557  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7558 
7559 #if VMA_STATS_STRING_ENABLED
7560  void PrintDetailedMap(class VmaJsonWriter& json);
7561 #endif
7562 
7563  VkResult DefragmentationBegin(
7564  const VmaDefragmentationInfo2& info,
7565  VmaDefragmentationStats* pStats,
7566  VmaDefragmentationContext* pContext);
7567  VkResult DefragmentationEnd(
7568  VmaDefragmentationContext context);
7569 
7570  VkResult DefragmentationPassBegin(
7572  VmaDefragmentationContext context);
7573  VkResult DefragmentationPassEnd(
7574  VmaDefragmentationContext context);
7575 
7576  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7577  bool TouchAllocation(VmaAllocation hAllocation);
7578 
7579  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7580  void DestroyPool(VmaPool pool);
7581  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7582 
7583  void SetCurrentFrameIndex(uint32_t frameIndex);
7584  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7585 
7586  void MakePoolAllocationsLost(
7587  VmaPool hPool,
7588  size_t* pLostAllocationCount);
7589  VkResult CheckPoolCorruption(VmaPool hPool);
7590  VkResult CheckCorruption(uint32_t memoryTypeBits);
7591 
7592  void CreateLostAllocation(VmaAllocation* pAllocation);
7593 
7594  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7595  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7596  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7597  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7598  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7599  VkResult BindVulkanBuffer(
7600  VkDeviceMemory memory,
7601  VkDeviceSize memoryOffset,
7602  VkBuffer buffer,
7603  const void* pNext);
7604  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7605  VkResult BindVulkanImage(
7606  VkDeviceMemory memory,
7607  VkDeviceSize memoryOffset,
7608  VkImage image,
7609  const void* pNext);
7610 
7611  VkResult Map(VmaAllocation hAllocation, void** ppData);
7612  void Unmap(VmaAllocation hAllocation);
7613 
7614  VkResult BindBufferMemory(
7615  VmaAllocation hAllocation,
7616  VkDeviceSize allocationLocalOffset,
7617  VkBuffer hBuffer,
7618  const void* pNext);
7619  VkResult BindImageMemory(
7620  VmaAllocation hAllocation,
7621  VkDeviceSize allocationLocalOffset,
7622  VkImage hImage,
7623  const void* pNext);
7624 
7625  void FlushOrInvalidateAllocation(
7626  VmaAllocation hAllocation,
7627  VkDeviceSize offset, VkDeviceSize size,
7628  VMA_CACHE_OPERATION op);
7629 
7630  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7631 
7632  /*
7633  Returns bit mask of memory types that can support defragmentation on GPU as
7634  they support creation of required buffer for copy operations.
7635  */
7636  uint32_t GetGpuDefragmentationMemoryTypeBits();
7637 
7638 private:
7639  VkDeviceSize m_PreferredLargeHeapBlockSize;
7640 
7641  VkPhysicalDevice m_PhysicalDevice;
7642  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7643  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7644 
7645  VMA_RW_MUTEX m_PoolsMutex;
7646  // Protected by m_PoolsMutex. Sorted by pointer value.
7647  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7648  uint32_t m_NextPoolId;
7649 
7650  VmaVulkanFunctions m_VulkanFunctions;
7651 
7652  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7653  uint32_t m_GlobalMemoryTypeBits;
7654 
7655 #if VMA_RECORDING_ENABLED
7656  VmaRecorder* m_pRecorder;
7657 #endif
7658 
7659  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7660 
7661 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7662  void ImportVulkanFunctions_Static();
7663 #endif
7664 
7665  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
7666 
7667 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
7668  void ImportVulkanFunctions_Dynamic();
7669 #endif
7670 
7671  void ValidateVulkanFunctions();
7672 
7673  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7674 
7675  VkResult AllocateMemoryOfType(
7676  VkDeviceSize size,
7677  VkDeviceSize alignment,
7678  bool dedicatedAllocation,
7679  VkBuffer dedicatedBuffer,
7680  VkBufferUsageFlags dedicatedBufferUsage,
7681  VkImage dedicatedImage,
7682  const VmaAllocationCreateInfo& createInfo,
7683  uint32_t memTypeIndex,
7684  VmaSuballocationType suballocType,
7685  size_t allocationCount,
7686  VmaAllocation* pAllocations);
7687 
7688  // Helper function only to be used inside AllocateDedicatedMemory.
7689  VkResult AllocateDedicatedMemoryPage(
7690  VkDeviceSize size,
7691  VmaSuballocationType suballocType,
7692  uint32_t memTypeIndex,
7693  const VkMemoryAllocateInfo& allocInfo,
7694  bool map,
7695  bool isUserDataString,
7696  void* pUserData,
7697  VmaAllocation* pAllocation);
7698 
7699  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7700  VkResult AllocateDedicatedMemory(
7701  VkDeviceSize size,
7702  VmaSuballocationType suballocType,
7703  uint32_t memTypeIndex,
7704  bool withinBudget,
7705  bool map,
7706  bool isUserDataString,
7707  void* pUserData,
7708  VkBuffer dedicatedBuffer,
7709  VkBufferUsageFlags dedicatedBufferUsage,
7710  VkImage dedicatedImage,
7711  size_t allocationCount,
7712  VmaAllocation* pAllocations);
7713 
7714  void FreeDedicatedMemory(const VmaAllocation allocation);
7715 
7716  /*
7717  Calculates and returns bit mask of memory types that can support defragmentation
7718  on GPU as they support creation of required buffer for copy operations.
7719  */
7720  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7721 
7722  uint32_t CalculateGlobalMemoryTypeBits() const;
7723 
7724 #if VMA_MEMORY_BUDGET
7725  void UpdateVulkanBudget();
7726 #endif // #if VMA_MEMORY_BUDGET
7727 };
7728 
7730 // Memory allocation #2 after VmaAllocator_T definition
7731 
7732 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7733 {
7734  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7735 }
7736 
7737 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7738 {
7739  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7740 }
7741 
7742 template<typename T>
7743 static T* VmaAllocate(VmaAllocator hAllocator)
7744 {
7745  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7746 }
7747 
7748 template<typename T>
7749 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7750 {
7751  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7752 }
7753 
7754 template<typename T>
7755 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7756 {
7757  if(ptr != VMA_NULL)
7758  {
7759  ptr->~T();
7760  VmaFree(hAllocator, ptr);
7761  }
7762 }
7763 
7764 template<typename T>
7765 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7766 {
7767  if(ptr != VMA_NULL)
7768  {
7769  for(size_t i = count; i--; )
7770  ptr[i].~T();
7771  VmaFree(hAllocator, ptr);
7772  }
7773 }
7774 
7776 // VmaStringBuilder
7777 
7778 #if VMA_STATS_STRING_ENABLED
7779 
7780 class VmaStringBuilder
7781 {
7782 public:
7783  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7784  size_t GetLength() const { return m_Data.size(); }
7785  const char* GetData() const { return m_Data.data(); }
7786 
7787  void Add(char ch) { m_Data.push_back(ch); }
7788  void Add(const char* pStr);
7789  void AddNewLine() { Add('\n'); }
7790  void AddNumber(uint32_t num);
7791  void AddNumber(uint64_t num);
7792  void AddPointer(const void* ptr);
7793 
7794 private:
7795  VmaVector< char, VmaStlAllocator<char> > m_Data;
7796 };
7797 
7798 void VmaStringBuilder::Add(const char* pStr)
7799 {
7800  const size_t strLen = strlen(pStr);
7801  if(strLen > 0)
7802  {
7803  const size_t oldCount = m_Data.size();
7804  m_Data.resize(oldCount + strLen);
7805  memcpy(m_Data.data() + oldCount, pStr, strLen);
7806  }
7807 }
7808 
7809 void VmaStringBuilder::AddNumber(uint32_t num)
7810 {
7811  char buf[11];
7812  buf[10] = '\0';
7813  char *p = &buf[10];
7814  do
7815  {
7816  *--p = '0' + (num % 10);
7817  num /= 10;
7818  }
7819  while(num);
7820  Add(p);
7821 }
7822 
7823 void VmaStringBuilder::AddNumber(uint64_t num)
7824 {
7825  char buf[21];
7826  buf[20] = '\0';
7827  char *p = &buf[20];
7828  do
7829  {
7830  *--p = '0' + (num % 10);
7831  num /= 10;
7832  }
7833  while(num);
7834  Add(p);
7835 }
7836 
7837 void VmaStringBuilder::AddPointer(const void* ptr)
7838 {
7839  char buf[21];
7840  VmaPtrToStr(buf, sizeof(buf), ptr);
7841  Add(buf);
7842 }
7843 
7844 #endif // #if VMA_STATS_STRING_ENABLED
7845 
7847 // VmaJsonWriter
7848 
7849 #if VMA_STATS_STRING_ENABLED
7850 
7851 class VmaJsonWriter
7852 {
7853  VMA_CLASS_NO_COPY(VmaJsonWriter)
7854 public:
7855  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7856  ~VmaJsonWriter();
7857 
7858  void BeginObject(bool singleLine = false);
7859  void EndObject();
7860 
7861  void BeginArray(bool singleLine = false);
7862  void EndArray();
7863 
7864  void WriteString(const char* pStr);
7865  void BeginString(const char* pStr = VMA_NULL);
7866  void ContinueString(const char* pStr);
7867  void ContinueString(uint32_t n);
7868  void ContinueString(uint64_t n);
7869  void ContinueString_Pointer(const void* ptr);
7870  void EndString(const char* pStr = VMA_NULL);
7871 
7872  void WriteNumber(uint32_t n);
7873  void WriteNumber(uint64_t n);
7874  void WriteBool(bool b);
7875  void WriteNull();
7876 
7877 private:
7878  static const char* const INDENT;
7879 
7880  enum COLLECTION_TYPE
7881  {
7882  COLLECTION_TYPE_OBJECT,
7883  COLLECTION_TYPE_ARRAY,
7884  };
7885  struct StackItem
7886  {
7887  COLLECTION_TYPE type;
7888  uint32_t valueCount;
7889  bool singleLineMode;
7890  };
7891 
7892  VmaStringBuilder& m_SB;
7893  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7894  bool m_InsideString;
7895 
7896  void BeginValue(bool isString);
7897  void WriteIndent(bool oneLess = false);
7898 };
7899 
7900 const char* const VmaJsonWriter::INDENT = " ";
7901 
7902 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7903  m_SB(sb),
7904  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7905  m_InsideString(false)
7906 {
7907 }
7908 
7909 VmaJsonWriter::~VmaJsonWriter()
7910 {
7911  VMA_ASSERT(!m_InsideString);
7912  VMA_ASSERT(m_Stack.empty());
7913 }
7914 
7915 void VmaJsonWriter::BeginObject(bool singleLine)
7916 {
7917  VMA_ASSERT(!m_InsideString);
7918 
7919  BeginValue(false);
7920  m_SB.Add('{');
7921 
7922  StackItem item;
7923  item.type = COLLECTION_TYPE_OBJECT;
7924  item.valueCount = 0;
7925  item.singleLineMode = singleLine;
7926  m_Stack.push_back(item);
7927 }
7928 
7929 void VmaJsonWriter::EndObject()
7930 {
7931  VMA_ASSERT(!m_InsideString);
7932 
7933  WriteIndent(true);
7934  m_SB.Add('}');
7935 
7936  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7937  m_Stack.pop_back();
7938 }
7939 
7940 void VmaJsonWriter::BeginArray(bool singleLine)
7941 {
7942  VMA_ASSERT(!m_InsideString);
7943 
7944  BeginValue(false);
7945  m_SB.Add('[');
7946 
7947  StackItem item;
7948  item.type = COLLECTION_TYPE_ARRAY;
7949  item.valueCount = 0;
7950  item.singleLineMode = singleLine;
7951  m_Stack.push_back(item);
7952 }
7953 
7954 void VmaJsonWriter::EndArray()
7955 {
7956  VMA_ASSERT(!m_InsideString);
7957 
7958  WriteIndent(true);
7959  m_SB.Add(']');
7960 
7961  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7962  m_Stack.pop_back();
7963 }
7964 
7965 void VmaJsonWriter::WriteString(const char* pStr)
7966 {
7967  BeginString(pStr);
7968  EndString();
7969 }
7970 
7971 void VmaJsonWriter::BeginString(const char* pStr)
7972 {
7973  VMA_ASSERT(!m_InsideString);
7974 
7975  BeginValue(true);
7976  m_SB.Add('"');
7977  m_InsideString = true;
7978  if(pStr != VMA_NULL && pStr[0] != '\0')
7979  {
7980  ContinueString(pStr);
7981  }
7982 }
7983 
7984 void VmaJsonWriter::ContinueString(const char* pStr)
7985 {
7986  VMA_ASSERT(m_InsideString);
7987 
7988  const size_t strLen = strlen(pStr);
7989  for(size_t i = 0; i < strLen; ++i)
7990  {
7991  char ch = pStr[i];
7992  if(ch == '\\')
7993  {
7994  m_SB.Add("\\\\");
7995  }
7996  else if(ch == '"')
7997  {
7998  m_SB.Add("\\\"");
7999  }
8000  else if(ch >= 32)
8001  {
8002  m_SB.Add(ch);
8003  }
8004  else switch(ch)
8005  {
8006  case '\b':
8007  m_SB.Add("\\b");
8008  break;
8009  case '\f':
8010  m_SB.Add("\\f");
8011  break;
8012  case '\n':
8013  m_SB.Add("\\n");
8014  break;
8015  case '\r':
8016  m_SB.Add("\\r");
8017  break;
8018  case '\t':
8019  m_SB.Add("\\t");
8020  break;
8021  default:
8022  VMA_ASSERT(0 && "Character not currently supported.");
8023  break;
8024  }
8025  }
8026 }
8027 
8028 void VmaJsonWriter::ContinueString(uint32_t n)
8029 {
8030  VMA_ASSERT(m_InsideString);
8031  m_SB.AddNumber(n);
8032 }
8033 
8034 void VmaJsonWriter::ContinueString(uint64_t n)
8035 {
8036  VMA_ASSERT(m_InsideString);
8037  m_SB.AddNumber(n);
8038 }
8039 
8040 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8041 {
8042  VMA_ASSERT(m_InsideString);
8043  m_SB.AddPointer(ptr);
8044 }
8045 
8046 void VmaJsonWriter::EndString(const char* pStr)
8047 {
8048  VMA_ASSERT(m_InsideString);
8049  if(pStr != VMA_NULL && pStr[0] != '\0')
8050  {
8051  ContinueString(pStr);
8052  }
8053  m_SB.Add('"');
8054  m_InsideString = false;
8055 }
8056 
8057 void VmaJsonWriter::WriteNumber(uint32_t n)
8058 {
8059  VMA_ASSERT(!m_InsideString);
8060  BeginValue(false);
8061  m_SB.AddNumber(n);
8062 }
8063 
8064 void VmaJsonWriter::WriteNumber(uint64_t n)
8065 {
8066  VMA_ASSERT(!m_InsideString);
8067  BeginValue(false);
8068  m_SB.AddNumber(n);
8069 }
8070 
8071 void VmaJsonWriter::WriteBool(bool b)
8072 {
8073  VMA_ASSERT(!m_InsideString);
8074  BeginValue(false);
8075  m_SB.Add(b ? "true" : "false");
8076 }
8077 
8078 void VmaJsonWriter::WriteNull()
8079 {
8080  VMA_ASSERT(!m_InsideString);
8081  BeginValue(false);
8082  m_SB.Add("null");
8083 }
8084 
8085 void VmaJsonWriter::BeginValue(bool isString)
8086 {
8087  if(!m_Stack.empty())
8088  {
8089  StackItem& currItem = m_Stack.back();
8090  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8091  currItem.valueCount % 2 == 0)
8092  {
8093  VMA_ASSERT(isString);
8094  }
8095 
8096  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8097  currItem.valueCount % 2 != 0)
8098  {
8099  m_SB.Add(": ");
8100  }
8101  else if(currItem.valueCount > 0)
8102  {
8103  m_SB.Add(", ");
8104  WriteIndent();
8105  }
8106  else
8107  {
8108  WriteIndent();
8109  }
8110  ++currItem.valueCount;
8111  }
8112 }
8113 
8114 void VmaJsonWriter::WriteIndent(bool oneLess)
8115 {
8116  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8117  {
8118  m_SB.AddNewLine();
8119 
8120  size_t count = m_Stack.size();
8121  if(count > 0 && oneLess)
8122  {
8123  --count;
8124  }
8125  for(size_t i = 0; i < count; ++i)
8126  {
8127  m_SB.Add(INDENT);
8128  }
8129  }
8130 }
8131 
8132 #endif // #if VMA_STATS_STRING_ENABLED
8133 
8135 
8136 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8137 {
8138  if(IsUserDataString())
8139  {
8140  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8141 
8142  FreeUserDataString(hAllocator);
8143 
8144  if(pUserData != VMA_NULL)
8145  {
8146  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8147  }
8148  }
8149  else
8150  {
8151  m_pUserData = pUserData;
8152  }
8153 }
8154 
8155 void VmaAllocation_T::ChangeBlockAllocation(
8156  VmaAllocator hAllocator,
8157  VmaDeviceMemoryBlock* block,
8158  VkDeviceSize offset)
8159 {
8160  VMA_ASSERT(block != VMA_NULL);
8161  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8162 
8163  // Move mapping reference counter from old block to new block.
8164  if(block != m_BlockAllocation.m_Block)
8165  {
8166  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8167  if(IsPersistentMap())
8168  ++mapRefCount;
8169  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8170  block->Map(hAllocator, mapRefCount, VMA_NULL);
8171  }
8172 
8173  m_BlockAllocation.m_Block = block;
8174  m_BlockAllocation.m_Offset = offset;
8175 }
8176 
8177 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8178 {
8179  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8180  m_BlockAllocation.m_Offset = newOffset;
8181 }
8182 
8183 VkDeviceSize VmaAllocation_T::GetOffset() const
8184 {
8185  switch(m_Type)
8186  {
8187  case ALLOCATION_TYPE_BLOCK:
8188  return m_BlockAllocation.m_Offset;
8189  case ALLOCATION_TYPE_DEDICATED:
8190  return 0;
8191  default:
8192  VMA_ASSERT(0);
8193  return 0;
8194  }
8195 }
8196 
8197 VkDeviceMemory VmaAllocation_T::GetMemory() const
8198 {
8199  switch(m_Type)
8200  {
8201  case ALLOCATION_TYPE_BLOCK:
8202  return m_BlockAllocation.m_Block->GetDeviceMemory();
8203  case ALLOCATION_TYPE_DEDICATED:
8204  return m_DedicatedAllocation.m_hMemory;
8205  default:
8206  VMA_ASSERT(0);
8207  return VK_NULL_HANDLE;
8208  }
8209 }
8210 
8211 void* VmaAllocation_T::GetMappedData() const
8212 {
8213  switch(m_Type)
8214  {
8215  case ALLOCATION_TYPE_BLOCK:
8216  if(m_MapCount != 0)
8217  {
8218  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8219  VMA_ASSERT(pBlockData != VMA_NULL);
8220  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8221  }
8222  else
8223  {
8224  return VMA_NULL;
8225  }
8226  break;
8227  case ALLOCATION_TYPE_DEDICATED:
8228  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8229  return m_DedicatedAllocation.m_pMappedData;
8230  default:
8231  VMA_ASSERT(0);
8232  return VMA_NULL;
8233  }
8234 }
8235 
8236 bool VmaAllocation_T::CanBecomeLost() const
8237 {
8238  switch(m_Type)
8239  {
8240  case ALLOCATION_TYPE_BLOCK:
8241  return m_BlockAllocation.m_CanBecomeLost;
8242  case ALLOCATION_TYPE_DEDICATED:
8243  return false;
8244  default:
8245  VMA_ASSERT(0);
8246  return false;
8247  }
8248 }
8249 
8250 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8251 {
8252  VMA_ASSERT(CanBecomeLost());
8253 
8254  /*
8255  Warning: This is a carefully designed algorithm.
8256  Do not modify unless you really know what you're doing :)
8257  */
8258  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8259  for(;;)
8260  {
8261  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8262  {
8263  VMA_ASSERT(0);
8264  return false;
8265  }
8266  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8267  {
8268  return false;
8269  }
8270  else // Last use time earlier than current time.
8271  {
8272  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8273  {
8274  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8275  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8276  return true;
8277  }
8278  }
8279  }
8280 }
8281 
8282 #if VMA_STATS_STRING_ENABLED
8283 
8284 // Correspond to values of enum VmaSuballocationType.
8285 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8286  "FREE",
8287  "UNKNOWN",
8288  "BUFFER",
8289  "IMAGE_UNKNOWN",
8290  "IMAGE_LINEAR",
8291  "IMAGE_OPTIMAL",
8292 };
8293 
8294 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8295 {
8296  json.WriteString("Type");
8297  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8298 
8299  json.WriteString("Size");
8300  json.WriteNumber(m_Size);
8301 
8302  if(m_pUserData != VMA_NULL)
8303  {
8304  json.WriteString("UserData");
8305  if(IsUserDataString())
8306  {
8307  json.WriteString((const char*)m_pUserData);
8308  }
8309  else
8310  {
8311  json.BeginString();
8312  json.ContinueString_Pointer(m_pUserData);
8313  json.EndString();
8314  }
8315  }
8316 
8317  json.WriteString("CreationFrameIndex");
8318  json.WriteNumber(m_CreationFrameIndex);
8319 
8320  json.WriteString("LastUseFrameIndex");
8321  json.WriteNumber(GetLastUseFrameIndex());
8322 
8323  if(m_BufferImageUsage != 0)
8324  {
8325  json.WriteString("Usage");
8326  json.WriteNumber(m_BufferImageUsage);
8327  }
8328 }
8329 
8330 #endif
8331 
8332 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8333 {
8334  VMA_ASSERT(IsUserDataString());
8335  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8336  m_pUserData = VMA_NULL;
8337 }
8338 
8339 void VmaAllocation_T::BlockAllocMap()
8340 {
8341  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8342 
8343  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8344  {
8345  ++m_MapCount;
8346  }
8347  else
8348  {
8349  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8350  }
8351 }
8352 
8353 void VmaAllocation_T::BlockAllocUnmap()
8354 {
8355  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8356 
8357  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8358  {
8359  --m_MapCount;
8360  }
8361  else
8362  {
8363  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8364  }
8365 }
8366 
8367 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8368 {
8369  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8370 
8371  if(m_MapCount != 0)
8372  {
8373  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8374  {
8375  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8376  *ppData = m_DedicatedAllocation.m_pMappedData;
8377  ++m_MapCount;
8378  return VK_SUCCESS;
8379  }
8380  else
8381  {
8382  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8383  return VK_ERROR_MEMORY_MAP_FAILED;
8384  }
8385  }
8386  else
8387  {
8388  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8389  hAllocator->m_hDevice,
8390  m_DedicatedAllocation.m_hMemory,
8391  0, // offset
8392  VK_WHOLE_SIZE,
8393  0, // flags
8394  ppData);
8395  if(result == VK_SUCCESS)
8396  {
8397  m_DedicatedAllocation.m_pMappedData = *ppData;
8398  m_MapCount = 1;
8399  }
8400  return result;
8401  }
8402 }
8403 
8404 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8405 {
8406  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8407 
8408  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8409  {
8410  --m_MapCount;
8411  if(m_MapCount == 0)
8412  {
8413  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8414  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8415  hAllocator->m_hDevice,
8416  m_DedicatedAllocation.m_hMemory);
8417  }
8418  }
8419  else
8420  {
8421  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8422  }
8423 }
8424 
8425 #if VMA_STATS_STRING_ENABLED
8426 
8427 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8428 {
8429  json.BeginObject();
8430 
8431  json.WriteString("Blocks");
8432  json.WriteNumber(stat.blockCount);
8433 
8434  json.WriteString("Allocations");
8435  json.WriteNumber(stat.allocationCount);
8436 
8437  json.WriteString("UnusedRanges");
8438  json.WriteNumber(stat.unusedRangeCount);
8439 
8440  json.WriteString("UsedBytes");
8441  json.WriteNumber(stat.usedBytes);
8442 
8443  json.WriteString("UnusedBytes");
8444  json.WriteNumber(stat.unusedBytes);
8445 
8446  if(stat.allocationCount > 1)
8447  {
8448  json.WriteString("AllocationSize");
8449  json.BeginObject(true);
8450  json.WriteString("Min");
8451  json.WriteNumber(stat.allocationSizeMin);
8452  json.WriteString("Avg");
8453  json.WriteNumber(stat.allocationSizeAvg);
8454  json.WriteString("Max");
8455  json.WriteNumber(stat.allocationSizeMax);
8456  json.EndObject();
8457  }
8458 
8459  if(stat.unusedRangeCount > 1)
8460  {
8461  json.WriteString("UnusedRangeSize");
8462  json.BeginObject(true);
8463  json.WriteString("Min");
8464  json.WriteNumber(stat.unusedRangeSizeMin);
8465  json.WriteString("Avg");
8466  json.WriteNumber(stat.unusedRangeSizeAvg);
8467  json.WriteString("Max");
8468  json.WriteNumber(stat.unusedRangeSizeMax);
8469  json.EndObject();
8470  }
8471 
8472  json.EndObject();
8473 }
8474 
8475 #endif // #if VMA_STATS_STRING_ENABLED
8476 
8477 struct VmaSuballocationItemSizeLess
8478 {
8479  bool operator()(
8480  const VmaSuballocationList::iterator lhs,
8481  const VmaSuballocationList::iterator rhs) const
8482  {
8483  return lhs->size < rhs->size;
8484  }
8485  bool operator()(
8486  const VmaSuballocationList::iterator lhs,
8487  VkDeviceSize rhsSize) const
8488  {
8489  return lhs->size < rhsSize;
8490  }
8491 };
8492 
8493 
8495 // class VmaBlockMetadata
8496 
8497 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8498  m_Size(0),
8499  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8500 {
8501 }
8502 
8503 #if VMA_STATS_STRING_ENABLED
8504 
8505 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8506  VkDeviceSize unusedBytes,
8507  size_t allocationCount,
8508  size_t unusedRangeCount) const
8509 {
8510  json.BeginObject();
8511 
8512  json.WriteString("TotalBytes");
8513  json.WriteNumber(GetSize());
8514 
8515  json.WriteString("UnusedBytes");
8516  json.WriteNumber(unusedBytes);
8517 
8518  json.WriteString("Allocations");
8519  json.WriteNumber((uint64_t)allocationCount);
8520 
8521  json.WriteString("UnusedRanges");
8522  json.WriteNumber((uint64_t)unusedRangeCount);
8523 
8524  json.WriteString("Suballocations");
8525  json.BeginArray();
8526 }
8527 
8528 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8529  VkDeviceSize offset,
8530  VmaAllocation hAllocation) const
8531 {
8532  json.BeginObject(true);
8533 
8534  json.WriteString("Offset");
8535  json.WriteNumber(offset);
8536 
8537  hAllocation->PrintParameters(json);
8538 
8539  json.EndObject();
8540 }
8541 
8542 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8543  VkDeviceSize offset,
8544  VkDeviceSize size) const
8545 {
8546  json.BeginObject(true);
8547 
8548  json.WriteString("Offset");
8549  json.WriteNumber(offset);
8550 
8551  json.WriteString("Type");
8552  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8553 
8554  json.WriteString("Size");
8555  json.WriteNumber(size);
8556 
8557  json.EndObject();
8558 }
8559 
8560 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8561 {
8562  json.EndArray();
8563  json.EndObject();
8564 }
8565 
8566 #endif // #if VMA_STATS_STRING_ENABLED
8567 
8569 // class VmaBlockMetadata_Generic
8570 
8571 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8572  VmaBlockMetadata(hAllocator),
8573  m_FreeCount(0),
8574  m_SumFreeSize(0),
8575  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8576  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8577 {
8578 }
8579 
8580 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8581 {
8582 }
8583 
8584 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8585 {
8586  VmaBlockMetadata::Init(size);
8587 
8588  m_FreeCount = 1;
8589  m_SumFreeSize = size;
8590 
8591  VmaSuballocation suballoc = {};
8592  suballoc.offset = 0;
8593  suballoc.size = size;
8594  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8595  suballoc.hAllocation = VK_NULL_HANDLE;
8596 
8597  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8598  m_Suballocations.push_back(suballoc);
8599  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8600  --suballocItem;
8601  m_FreeSuballocationsBySize.push_back(suballocItem);
8602 }
8603 
8604 bool VmaBlockMetadata_Generic::Validate() const
8605 {
8606  VMA_VALIDATE(!m_Suballocations.empty());
8607 
8608  // Expected offset of new suballocation as calculated from previous ones.
8609  VkDeviceSize calculatedOffset = 0;
8610  // Expected number of free suballocations as calculated from traversing their list.
8611  uint32_t calculatedFreeCount = 0;
8612  // Expected sum size of free suballocations as calculated from traversing their list.
8613  VkDeviceSize calculatedSumFreeSize = 0;
8614  // Expected number of free suballocations that should be registered in
8615  // m_FreeSuballocationsBySize calculated from traversing their list.
8616  size_t freeSuballocationsToRegister = 0;
8617  // True if previous visited suballocation was free.
8618  bool prevFree = false;
8619 
8620  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8621  suballocItem != m_Suballocations.cend();
8622  ++suballocItem)
8623  {
8624  const VmaSuballocation& subAlloc = *suballocItem;
8625 
8626  // Actual offset of this suballocation doesn't match expected one.
8627  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8628 
8629  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8630  // Two adjacent free suballocations are invalid. They should be merged.
8631  VMA_VALIDATE(!prevFree || !currFree);
8632 
8633  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8634 
8635  if(currFree)
8636  {
8637  calculatedSumFreeSize += subAlloc.size;
8638  ++calculatedFreeCount;
8639  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8640  {
8641  ++freeSuballocationsToRegister;
8642  }
8643 
8644  // Margin required between allocations - every free space must be at least that large.
8645  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8646  }
8647  else
8648  {
8649  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8650  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8651 
8652  // Margin required between allocations - previous allocation must be free.
8653  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8654  }
8655 
8656  calculatedOffset += subAlloc.size;
8657  prevFree = currFree;
8658  }
8659 
8660  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8661  // match expected one.
8662  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8663 
8664  VkDeviceSize lastSize = 0;
8665  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8666  {
8667  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8668 
8669  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8670  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8671  // They must be sorted by size ascending.
8672  VMA_VALIDATE(suballocItem->size >= lastSize);
8673 
8674  lastSize = suballocItem->size;
8675  }
8676 
8677  // Check if totals match calculacted values.
8678  VMA_VALIDATE(ValidateFreeSuballocationList());
8679  VMA_VALIDATE(calculatedOffset == GetSize());
8680  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8681  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8682 
8683  return true;
8684 }
8685 
8686 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8687 {
8688  if(!m_FreeSuballocationsBySize.empty())
8689  {
8690  return m_FreeSuballocationsBySize.back()->size;
8691  }
8692  else
8693  {
8694  return 0;
8695  }
8696 }
8697 
8698 bool VmaBlockMetadata_Generic::IsEmpty() const
8699 {
8700  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8701 }
8702 
8703 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8704 {
8705  outInfo.blockCount = 1;
8706 
8707  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8708  outInfo.allocationCount = rangeCount - m_FreeCount;
8709  outInfo.unusedRangeCount = m_FreeCount;
8710 
8711  outInfo.unusedBytes = m_SumFreeSize;
8712  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8713 
8714  outInfo.allocationSizeMin = UINT64_MAX;
8715  outInfo.allocationSizeMax = 0;
8716  outInfo.unusedRangeSizeMin = UINT64_MAX;
8717  outInfo.unusedRangeSizeMax = 0;
8718 
8719  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8720  suballocItem != m_Suballocations.cend();
8721  ++suballocItem)
8722  {
8723  const VmaSuballocation& suballoc = *suballocItem;
8724  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8725  {
8726  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8727  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8728  }
8729  else
8730  {
8731  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8732  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8733  }
8734  }
8735 }
8736 
8737 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8738 {
8739  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8740 
8741  inoutStats.size += GetSize();
8742  inoutStats.unusedSize += m_SumFreeSize;
8743  inoutStats.allocationCount += rangeCount - m_FreeCount;
8744  inoutStats.unusedRangeCount += m_FreeCount;
8745  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8746 }
8747 
8748 #if VMA_STATS_STRING_ENABLED
8749 
8750 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8751 {
8752  PrintDetailedMap_Begin(json,
8753  m_SumFreeSize, // unusedBytes
8754  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8755  m_FreeCount); // unusedRangeCount
8756 
8757  size_t i = 0;
8758  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8759  suballocItem != m_Suballocations.cend();
8760  ++suballocItem, ++i)
8761  {
8762  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8763  {
8764  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8765  }
8766  else
8767  {
8768  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8769  }
8770  }
8771 
8772  PrintDetailedMap_End(json);
8773 }
8774 
8775 #endif // #if VMA_STATS_STRING_ENABLED
8776 
8777 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8778  uint32_t currentFrameIndex,
8779  uint32_t frameInUseCount,
8780  VkDeviceSize bufferImageGranularity,
8781  VkDeviceSize allocSize,
8782  VkDeviceSize allocAlignment,
8783  bool upperAddress,
8784  VmaSuballocationType allocType,
8785  bool canMakeOtherLost,
8786  uint32_t strategy,
8787  VmaAllocationRequest* pAllocationRequest)
8788 {
8789  VMA_ASSERT(allocSize > 0);
8790  VMA_ASSERT(!upperAddress);
8791  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8792  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8793  VMA_HEAVY_ASSERT(Validate());
8794 
8795  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8796 
8797  // There is not enough total free space in this block to fullfill the request: Early return.
8798  if(canMakeOtherLost == false &&
8799  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8800  {
8801  return false;
8802  }
8803 
8804  // New algorithm, efficiently searching freeSuballocationsBySize.
8805  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8806  if(freeSuballocCount > 0)
8807  {
8809  {
8810  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8811  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8812  m_FreeSuballocationsBySize.data(),
8813  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8814  allocSize + 2 * VMA_DEBUG_MARGIN,
8815  VmaSuballocationItemSizeLess());
8816  size_t index = it - m_FreeSuballocationsBySize.data();
8817  for(; index < freeSuballocCount; ++index)
8818  {
8819  if(CheckAllocation(
8820  currentFrameIndex,
8821  frameInUseCount,
8822  bufferImageGranularity,
8823  allocSize,
8824  allocAlignment,
8825  allocType,
8826  m_FreeSuballocationsBySize[index],
8827  false, // canMakeOtherLost
8828  &pAllocationRequest->offset,
8829  &pAllocationRequest->itemsToMakeLostCount,
8830  &pAllocationRequest->sumFreeSize,
8831  &pAllocationRequest->sumItemSize))
8832  {
8833  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8834  return true;
8835  }
8836  }
8837  }
8838  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8839  {
8840  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8841  it != m_Suballocations.end();
8842  ++it)
8843  {
8844  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8845  currentFrameIndex,
8846  frameInUseCount,
8847  bufferImageGranularity,
8848  allocSize,
8849  allocAlignment,
8850  allocType,
8851  it,
8852  false, // canMakeOtherLost
8853  &pAllocationRequest->offset,
8854  &pAllocationRequest->itemsToMakeLostCount,
8855  &pAllocationRequest->sumFreeSize,
8856  &pAllocationRequest->sumItemSize))
8857  {
8858  pAllocationRequest->item = it;
8859  return true;
8860  }
8861  }
8862  }
8863  else // WORST_FIT, FIRST_FIT
8864  {
8865  // Search staring from biggest suballocations.
8866  for(size_t index = freeSuballocCount; index--; )
8867  {
8868  if(CheckAllocation(
8869  currentFrameIndex,
8870  frameInUseCount,
8871  bufferImageGranularity,
8872  allocSize,
8873  allocAlignment,
8874  allocType,
8875  m_FreeSuballocationsBySize[index],
8876  false, // canMakeOtherLost
8877  &pAllocationRequest->offset,
8878  &pAllocationRequest->itemsToMakeLostCount,
8879  &pAllocationRequest->sumFreeSize,
8880  &pAllocationRequest->sumItemSize))
8881  {
8882  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8883  return true;
8884  }
8885  }
8886  }
8887  }
8888 
8889  if(canMakeOtherLost)
8890  {
8891  // Brute-force algorithm. TODO: Come up with something better.
8892 
8893  bool found = false;
8894  VmaAllocationRequest tmpAllocRequest = {};
8895  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8896  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8897  suballocIt != m_Suballocations.end();
8898  ++suballocIt)
8899  {
8900  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8901  suballocIt->hAllocation->CanBecomeLost())
8902  {
8903  if(CheckAllocation(
8904  currentFrameIndex,
8905  frameInUseCount,
8906  bufferImageGranularity,
8907  allocSize,
8908  allocAlignment,
8909  allocType,
8910  suballocIt,
8911  canMakeOtherLost,
8912  &tmpAllocRequest.offset,
8913  &tmpAllocRequest.itemsToMakeLostCount,
8914  &tmpAllocRequest.sumFreeSize,
8915  &tmpAllocRequest.sumItemSize))
8916  {
8918  {
8919  *pAllocationRequest = tmpAllocRequest;
8920  pAllocationRequest->item = suballocIt;
8921  break;
8922  }
8923  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8924  {
8925  *pAllocationRequest = tmpAllocRequest;
8926  pAllocationRequest->item = suballocIt;
8927  found = true;
8928  }
8929  }
8930  }
8931  }
8932 
8933  return found;
8934  }
8935 
8936  return false;
8937 }
8938 
8939 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8940  uint32_t currentFrameIndex,
8941  uint32_t frameInUseCount,
8942  VmaAllocationRequest* pAllocationRequest)
8943 {
8944  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8945 
8946  while(pAllocationRequest->itemsToMakeLostCount > 0)
8947  {
8948  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8949  {
8950  ++pAllocationRequest->item;
8951  }
8952  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8953  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8954  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8955  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8956  {
8957  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8958  --pAllocationRequest->itemsToMakeLostCount;
8959  }
8960  else
8961  {
8962  return false;
8963  }
8964  }
8965 
8966  VMA_HEAVY_ASSERT(Validate());
8967  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8968  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8969 
8970  return true;
8971 }
8972 
8973 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8974 {
8975  uint32_t lostAllocationCount = 0;
8976  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8977  it != m_Suballocations.end();
8978  ++it)
8979  {
8980  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8981  it->hAllocation->CanBecomeLost() &&
8982  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8983  {
8984  it = FreeSuballocation(it);
8985  ++lostAllocationCount;
8986  }
8987  }
8988  return lostAllocationCount;
8989 }
8990 
8991 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8992 {
8993  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8994  it != m_Suballocations.end();
8995  ++it)
8996  {
8997  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8998  {
8999  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9000  {
9001  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9002  return VK_ERROR_VALIDATION_FAILED_EXT;
9003  }
9004  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9005  {
9006  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9007  return VK_ERROR_VALIDATION_FAILED_EXT;
9008  }
9009  }
9010  }
9011 
9012  return VK_SUCCESS;
9013 }
9014 
9015 void VmaBlockMetadata_Generic::Alloc(
9016  const VmaAllocationRequest& request,
9017  VmaSuballocationType type,
9018  VkDeviceSize allocSize,
9019  VmaAllocation hAllocation)
9020 {
9021  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9022  VMA_ASSERT(request.item != m_Suballocations.end());
9023  VmaSuballocation& suballoc = *request.item;
9024  // Given suballocation is a free block.
9025  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9026  // Given offset is inside this suballocation.
9027  VMA_ASSERT(request.offset >= suballoc.offset);
9028  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9029  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9030  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9031 
9032  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9033  // it to become used.
9034  UnregisterFreeSuballocation(request.item);
9035 
9036  suballoc.offset = request.offset;
9037  suballoc.size = allocSize;
9038  suballoc.type = type;
9039  suballoc.hAllocation = hAllocation;
9040 
9041  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9042  if(paddingEnd)
9043  {
9044  VmaSuballocation paddingSuballoc = {};
9045  paddingSuballoc.offset = request.offset + allocSize;
9046  paddingSuballoc.size = paddingEnd;
9047  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9048  VmaSuballocationList::iterator next = request.item;
9049  ++next;
9050  const VmaSuballocationList::iterator paddingEndItem =
9051  m_Suballocations.insert(next, paddingSuballoc);
9052  RegisterFreeSuballocation(paddingEndItem);
9053  }
9054 
9055  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9056  if(paddingBegin)
9057  {
9058  VmaSuballocation paddingSuballoc = {};
9059  paddingSuballoc.offset = request.offset - paddingBegin;
9060  paddingSuballoc.size = paddingBegin;
9061  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9062  const VmaSuballocationList::iterator paddingBeginItem =
9063  m_Suballocations.insert(request.item, paddingSuballoc);
9064  RegisterFreeSuballocation(paddingBeginItem);
9065  }
9066 
9067  // Update totals.
9068  m_FreeCount = m_FreeCount - 1;
9069  if(paddingBegin > 0)
9070  {
9071  ++m_FreeCount;
9072  }
9073  if(paddingEnd > 0)
9074  {
9075  ++m_FreeCount;
9076  }
9077  m_SumFreeSize -= allocSize;
9078 }
9079 
9080 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9081 {
9082  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9083  suballocItem != m_Suballocations.end();
9084  ++suballocItem)
9085  {
9086  VmaSuballocation& suballoc = *suballocItem;
9087  if(suballoc.hAllocation == allocation)
9088  {
9089  FreeSuballocation(suballocItem);
9090  VMA_HEAVY_ASSERT(Validate());
9091  return;
9092  }
9093  }
9094  VMA_ASSERT(0 && "Not found!");
9095 }
9096 
9097 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9098 {
9099  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9100  suballocItem != m_Suballocations.end();
9101  ++suballocItem)
9102  {
9103  VmaSuballocation& suballoc = *suballocItem;
9104  if(suballoc.offset == offset)
9105  {
9106  FreeSuballocation(suballocItem);
9107  return;
9108  }
9109  }
9110  VMA_ASSERT(0 && "Not found!");
9111 }
9112 
9113 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9114 {
9115  VkDeviceSize lastSize = 0;
9116  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9117  {
9118  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9119 
9120  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9121  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9122  VMA_VALIDATE(it->size >= lastSize);
9123  lastSize = it->size;
9124  }
9125  return true;
9126 }
9127 
9128 bool VmaBlockMetadata_Generic::CheckAllocation(
9129  uint32_t currentFrameIndex,
9130  uint32_t frameInUseCount,
9131  VkDeviceSize bufferImageGranularity,
9132  VkDeviceSize allocSize,
9133  VkDeviceSize allocAlignment,
9134  VmaSuballocationType allocType,
9135  VmaSuballocationList::const_iterator suballocItem,
9136  bool canMakeOtherLost,
9137  VkDeviceSize* pOffset,
9138  size_t* itemsToMakeLostCount,
9139  VkDeviceSize* pSumFreeSize,
9140  VkDeviceSize* pSumItemSize) const
9141 {
9142  VMA_ASSERT(allocSize > 0);
9143  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9144  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9145  VMA_ASSERT(pOffset != VMA_NULL);
9146 
9147  *itemsToMakeLostCount = 0;
9148  *pSumFreeSize = 0;
9149  *pSumItemSize = 0;
9150 
9151  if(canMakeOtherLost)
9152  {
9153  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9154  {
9155  *pSumFreeSize = suballocItem->size;
9156  }
9157  else
9158  {
9159  if(suballocItem->hAllocation->CanBecomeLost() &&
9160  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9161  {
9162  ++*itemsToMakeLostCount;
9163  *pSumItemSize = suballocItem->size;
9164  }
9165  else
9166  {
9167  return false;
9168  }
9169  }
9170 
9171  // Remaining size is too small for this request: Early return.
9172  if(GetSize() - suballocItem->offset < allocSize)
9173  {
9174  return false;
9175  }
9176 
9177  // Start from offset equal to beginning of this suballocation.
9178  *pOffset = suballocItem->offset;
9179 
9180  // Apply VMA_DEBUG_MARGIN at the beginning.
9181  if(VMA_DEBUG_MARGIN > 0)
9182  {
9183  *pOffset += VMA_DEBUG_MARGIN;
9184  }
9185 
9186  // Apply alignment.
9187  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9188 
9189  // Check previous suballocations for BufferImageGranularity conflicts.
9190  // Make bigger alignment if necessary.
9191  if(bufferImageGranularity > 1)
9192  {
9193  bool bufferImageGranularityConflict = false;
9194  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9195  while(prevSuballocItem != m_Suballocations.cbegin())
9196  {
9197  --prevSuballocItem;
9198  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9199  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9200  {
9201  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9202  {
9203  bufferImageGranularityConflict = true;
9204  break;
9205  }
9206  }
9207  else
9208  // Already on previous page.
9209  break;
9210  }
9211  if(bufferImageGranularityConflict)
9212  {
9213  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9214  }
9215  }
9216 
9217  // Now that we have final *pOffset, check if we are past suballocItem.
9218  // If yes, return false - this function should be called for another suballocItem as starting point.
9219  if(*pOffset >= suballocItem->offset + suballocItem->size)
9220  {
9221  return false;
9222  }
9223 
9224  // Calculate padding at the beginning based on current offset.
9225  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9226 
9227  // Calculate required margin at the end.
9228  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9229 
9230  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9231  // Another early return check.
9232  if(suballocItem->offset + totalSize > GetSize())
9233  {
9234  return false;
9235  }
9236 
9237  // Advance lastSuballocItem until desired size is reached.
9238  // Update itemsToMakeLostCount.
9239  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9240  if(totalSize > suballocItem->size)
9241  {
9242  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9243  while(remainingSize > 0)
9244  {
9245  ++lastSuballocItem;
9246  if(lastSuballocItem == m_Suballocations.cend())
9247  {
9248  return false;
9249  }
9250  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9251  {
9252  *pSumFreeSize += lastSuballocItem->size;
9253  }
9254  else
9255  {
9256  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9257  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9258  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9259  {
9260  ++*itemsToMakeLostCount;
9261  *pSumItemSize += lastSuballocItem->size;
9262  }
9263  else
9264  {
9265  return false;
9266  }
9267  }
9268  remainingSize = (lastSuballocItem->size < remainingSize) ?
9269  remainingSize - lastSuballocItem->size : 0;
9270  }
9271  }
9272 
9273  // Check next suballocations for BufferImageGranularity conflicts.
9274  // If conflict exists, we must mark more allocations lost or fail.
9275  if(bufferImageGranularity > 1)
9276  {
9277  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9278  ++nextSuballocItem;
9279  while(nextSuballocItem != m_Suballocations.cend())
9280  {
9281  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9282  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9283  {
9284  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9285  {
9286  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9287  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9288  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9289  {
9290  ++*itemsToMakeLostCount;
9291  }
9292  else
9293  {
9294  return false;
9295  }
9296  }
9297  }
9298  else
9299  {
9300  // Already on next page.
9301  break;
9302  }
9303  ++nextSuballocItem;
9304  }
9305  }
9306  }
9307  else
9308  {
9309  const VmaSuballocation& suballoc = *suballocItem;
9310  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9311 
9312  *pSumFreeSize = suballoc.size;
9313 
9314  // Size of this suballocation is too small for this request: Early return.
9315  if(suballoc.size < allocSize)
9316  {
9317  return false;
9318  }
9319 
9320  // Start from offset equal to beginning of this suballocation.
9321  *pOffset = suballoc.offset;
9322 
9323  // Apply VMA_DEBUG_MARGIN at the beginning.
9324  if(VMA_DEBUG_MARGIN > 0)
9325  {
9326  *pOffset += VMA_DEBUG_MARGIN;
9327  }
9328 
9329  // Apply alignment.
9330  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9331 
9332  // Check previous suballocations for BufferImageGranularity conflicts.
9333  // Make bigger alignment if necessary.
9334  if(bufferImageGranularity > 1)
9335  {
9336  bool bufferImageGranularityConflict = false;
9337  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9338  while(prevSuballocItem != m_Suballocations.cbegin())
9339  {
9340  --prevSuballocItem;
9341  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9342  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9343  {
9344  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9345  {
9346  bufferImageGranularityConflict = true;
9347  break;
9348  }
9349  }
9350  else
9351  // Already on previous page.
9352  break;
9353  }
9354  if(bufferImageGranularityConflict)
9355  {
9356  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9357  }
9358  }
9359 
9360  // Calculate padding at the beginning based on current offset.
9361  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9362 
9363  // Calculate required margin at the end.
9364  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9365 
9366  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9367  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9368  {
9369  return false;
9370  }
9371 
9372  // Check next suballocations for BufferImageGranularity conflicts.
9373  // If conflict exists, allocation cannot be made here.
9374  if(bufferImageGranularity > 1)
9375  {
9376  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9377  ++nextSuballocItem;
9378  while(nextSuballocItem != m_Suballocations.cend())
9379  {
9380  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9381  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9382  {
9383  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9384  {
9385  return false;
9386  }
9387  }
9388  else
9389  {
9390  // Already on next page.
9391  break;
9392  }
9393  ++nextSuballocItem;
9394  }
9395  }
9396  }
9397 
9398  // All tests passed: Success. pOffset is already filled.
9399  return true;
9400 }
9401 
9402 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9403 {
9404  VMA_ASSERT(item != m_Suballocations.end());
9405  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9406 
9407  VmaSuballocationList::iterator nextItem = item;
9408  ++nextItem;
9409  VMA_ASSERT(nextItem != m_Suballocations.end());
9410  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9411 
9412  item->size += nextItem->size;
9413  --m_FreeCount;
9414  m_Suballocations.erase(nextItem);
9415 }
9416 
9417 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9418 {
9419  // Change this suballocation to be marked as free.
9420  VmaSuballocation& suballoc = *suballocItem;
9421  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9422  suballoc.hAllocation = VK_NULL_HANDLE;
9423 
9424  // Update totals.
9425  ++m_FreeCount;
9426  m_SumFreeSize += suballoc.size;
9427 
9428  // Merge with previous and/or next suballocation if it's also free.
9429  bool mergeWithNext = false;
9430  bool mergeWithPrev = false;
9431 
9432  VmaSuballocationList::iterator nextItem = suballocItem;
9433  ++nextItem;
9434  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9435  {
9436  mergeWithNext = true;
9437  }
9438 
9439  VmaSuballocationList::iterator prevItem = suballocItem;
9440  if(suballocItem != m_Suballocations.begin())
9441  {
9442  --prevItem;
9443  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9444  {
9445  mergeWithPrev = true;
9446  }
9447  }
9448 
9449  if(mergeWithNext)
9450  {
9451  UnregisterFreeSuballocation(nextItem);
9452  MergeFreeWithNext(suballocItem);
9453  }
9454 
9455  if(mergeWithPrev)
9456  {
9457  UnregisterFreeSuballocation(prevItem);
9458  MergeFreeWithNext(prevItem);
9459  RegisterFreeSuballocation(prevItem);
9460  return prevItem;
9461  }
9462  else
9463  {
9464  RegisterFreeSuballocation(suballocItem);
9465  return suballocItem;
9466  }
9467 }
9468 
9469 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9470 {
9471  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9472  VMA_ASSERT(item->size > 0);
9473 
9474  // You may want to enable this validation at the beginning or at the end of
9475  // this function, depending on what do you want to check.
9476  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9477 
9478  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9479  {
9480  if(m_FreeSuballocationsBySize.empty())
9481  {
9482  m_FreeSuballocationsBySize.push_back(item);
9483  }
9484  else
9485  {
9486  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9487  }
9488  }
9489 
9490  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9491 }
9492 
9493 
9494 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9495 {
9496  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9497  VMA_ASSERT(item->size > 0);
9498 
9499  // You may want to enable this validation at the beginning or at the end of
9500  // this function, depending on what do you want to check.
9501  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9502 
9503  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9504  {
9505  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9506  m_FreeSuballocationsBySize.data(),
9507  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9508  item,
9509  VmaSuballocationItemSizeLess());
9510  for(size_t index = it - m_FreeSuballocationsBySize.data();
9511  index < m_FreeSuballocationsBySize.size();
9512  ++index)
9513  {
9514  if(m_FreeSuballocationsBySize[index] == item)
9515  {
9516  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9517  return;
9518  }
9519  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9520  }
9521  VMA_ASSERT(0 && "Not found.");
9522  }
9523 
9524  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9525 }
9526 
9527 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9528  VkDeviceSize bufferImageGranularity,
9529  VmaSuballocationType& inOutPrevSuballocType) const
9530 {
9531  if(bufferImageGranularity == 1 || IsEmpty())
9532  {
9533  return false;
9534  }
9535 
9536  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9537  bool typeConflictFound = false;
9538  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9539  it != m_Suballocations.cend();
9540  ++it)
9541  {
9542  const VmaSuballocationType suballocType = it->type;
9543  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9544  {
9545  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9546  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9547  {
9548  typeConflictFound = true;
9549  }
9550  inOutPrevSuballocType = suballocType;
9551  }
9552  }
9553 
9554  return typeConflictFound || minAlignment >= bufferImageGranularity;
9555 }
9556 
9558 // class VmaBlockMetadata_Linear
9559 
9560 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9561  VmaBlockMetadata(hAllocator),
9562  m_SumFreeSize(0),
9563  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9564  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9565  m_1stVectorIndex(0),
9566  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9567  m_1stNullItemsBeginCount(0),
9568  m_1stNullItemsMiddleCount(0),
9569  m_2ndNullItemsCount(0)
9570 {
9571 }
9572 
9573 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9574 {
9575 }
9576 
9577 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9578 {
9579  VmaBlockMetadata::Init(size);
9580  m_SumFreeSize = size;
9581 }
9582 
9583 bool VmaBlockMetadata_Linear::Validate() const
9584 {
9585  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9586  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9587 
9588  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9589  VMA_VALIDATE(!suballocations1st.empty() ||
9590  suballocations2nd.empty() ||
9591  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9592 
9593  if(!suballocations1st.empty())
9594  {
9595  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9596  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9597  // Null item at the end should be just pop_back().
9598  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9599  }
9600  if(!suballocations2nd.empty())
9601  {
9602  // Null item at the end should be just pop_back().
9603  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9604  }
9605 
9606  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9607  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9608 
9609  VkDeviceSize sumUsedSize = 0;
9610  const size_t suballoc1stCount = suballocations1st.size();
9611  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9612 
9613  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9614  {
9615  const size_t suballoc2ndCount = suballocations2nd.size();
9616  size_t nullItem2ndCount = 0;
9617  for(size_t i = 0; i < suballoc2ndCount; ++i)
9618  {
9619  const VmaSuballocation& suballoc = suballocations2nd[i];
9620  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9621 
9622  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9623  VMA_VALIDATE(suballoc.offset >= offset);
9624 
9625  if(!currFree)
9626  {
9627  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9628  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9629  sumUsedSize += suballoc.size;
9630  }
9631  else
9632  {
9633  ++nullItem2ndCount;
9634  }
9635 
9636  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9637  }
9638 
9639  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9640  }
9641 
9642  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9643  {
9644  const VmaSuballocation& suballoc = suballocations1st[i];
9645  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9646  suballoc.hAllocation == VK_NULL_HANDLE);
9647  }
9648 
9649  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9650 
9651  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9652  {
9653  const VmaSuballocation& suballoc = suballocations1st[i];
9654  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9655 
9656  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9657  VMA_VALIDATE(suballoc.offset >= offset);
9658  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9659 
9660  if(!currFree)
9661  {
9662  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9663  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9664  sumUsedSize += suballoc.size;
9665  }
9666  else
9667  {
9668  ++nullItem1stCount;
9669  }
9670 
9671  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9672  }
9673  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9674 
9675  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9676  {
9677  const size_t suballoc2ndCount = suballocations2nd.size();
9678  size_t nullItem2ndCount = 0;
9679  for(size_t i = suballoc2ndCount; i--; )
9680  {
9681  const VmaSuballocation& suballoc = suballocations2nd[i];
9682  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9683 
9684  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9685  VMA_VALIDATE(suballoc.offset >= offset);
9686 
9687  if(!currFree)
9688  {
9689  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9690  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9691  sumUsedSize += suballoc.size;
9692  }
9693  else
9694  {
9695  ++nullItem2ndCount;
9696  }
9697 
9698  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9699  }
9700 
9701  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9702  }
9703 
9704  VMA_VALIDATE(offset <= GetSize());
9705  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9706 
9707  return true;
9708 }
9709 
9710 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9711 {
9712  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9713  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9714 }
9715 
9716 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9717 {
9718  const VkDeviceSize size = GetSize();
9719 
9720  /*
9721  We don't consider gaps inside allocation vectors with freed allocations because
9722  they are not suitable for reuse in linear allocator. We consider only space that
9723  is available for new allocations.
9724  */
9725  if(IsEmpty())
9726  {
9727  return size;
9728  }
9729 
9730  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9731 
9732  switch(m_2ndVectorMode)
9733  {
9734  case SECOND_VECTOR_EMPTY:
9735  /*
9736  Available space is after end of 1st, as well as before beginning of 1st (which
9737  whould make it a ring buffer).
9738  */
9739  {
9740  const size_t suballocations1stCount = suballocations1st.size();
9741  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9742  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9743  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9744  return VMA_MAX(
9745  firstSuballoc.offset,
9746  size - (lastSuballoc.offset + lastSuballoc.size));
9747  }
9748  break;
9749 
9750  case SECOND_VECTOR_RING_BUFFER:
9751  /*
9752  Available space is only between end of 2nd and beginning of 1st.
9753  */
9754  {
9755  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9756  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9757  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9758  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9759  }
9760  break;
9761 
9762  case SECOND_VECTOR_DOUBLE_STACK:
9763  /*
9764  Available space is only between end of 1st and top of 2nd.
9765  */
9766  {
9767  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9768  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9769  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9770  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9771  }
9772  break;
9773 
9774  default:
9775  VMA_ASSERT(0);
9776  return 0;
9777  }
9778 }
9779 
9780 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9781 {
9782  const VkDeviceSize size = GetSize();
9783  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9784  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9785  const size_t suballoc1stCount = suballocations1st.size();
9786  const size_t suballoc2ndCount = suballocations2nd.size();
9787 
9788  outInfo.blockCount = 1;
9789  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9790  outInfo.unusedRangeCount = 0;
9791  outInfo.usedBytes = 0;
9792  outInfo.allocationSizeMin = UINT64_MAX;
9793  outInfo.allocationSizeMax = 0;
9794  outInfo.unusedRangeSizeMin = UINT64_MAX;
9795  outInfo.unusedRangeSizeMax = 0;
9796 
9797  VkDeviceSize lastOffset = 0;
9798 
9799  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9800  {
9801  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9802  size_t nextAlloc2ndIndex = 0;
9803  while(lastOffset < freeSpace2ndTo1stEnd)
9804  {
9805  // Find next non-null allocation or move nextAllocIndex to the end.
9806  while(nextAlloc2ndIndex < suballoc2ndCount &&
9807  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9808  {
9809  ++nextAlloc2ndIndex;
9810  }
9811 
9812  // Found non-null allocation.
9813  if(nextAlloc2ndIndex < suballoc2ndCount)
9814  {
9815  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9816 
9817  // 1. Process free space before this allocation.
9818  if(lastOffset < suballoc.offset)
9819  {
9820  // There is free space from lastOffset to suballoc.offset.
9821  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9822  ++outInfo.unusedRangeCount;
9823  outInfo.unusedBytes += unusedRangeSize;
9824  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9825  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9826  }
9827 
9828  // 2. Process this allocation.
9829  // There is allocation with suballoc.offset, suballoc.size.
9830  outInfo.usedBytes += suballoc.size;
9831  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9832  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9833 
9834  // 3. Prepare for next iteration.
9835  lastOffset = suballoc.offset + suballoc.size;
9836  ++nextAlloc2ndIndex;
9837  }
9838  // We are at the end.
9839  else
9840  {
9841  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9842  if(lastOffset < freeSpace2ndTo1stEnd)
9843  {
9844  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9845  ++outInfo.unusedRangeCount;
9846  outInfo.unusedBytes += unusedRangeSize;
9847  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9848  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9849  }
9850 
9851  // End of loop.
9852  lastOffset = freeSpace2ndTo1stEnd;
9853  }
9854  }
9855  }
9856 
9857  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9858  const VkDeviceSize freeSpace1stTo2ndEnd =
9859  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9860  while(lastOffset < freeSpace1stTo2ndEnd)
9861  {
9862  // Find next non-null allocation or move nextAllocIndex to the end.
9863  while(nextAlloc1stIndex < suballoc1stCount &&
9864  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9865  {
9866  ++nextAlloc1stIndex;
9867  }
9868 
9869  // Found non-null allocation.
9870  if(nextAlloc1stIndex < suballoc1stCount)
9871  {
9872  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9873 
9874  // 1. Process free space before this allocation.
9875  if(lastOffset < suballoc.offset)
9876  {
9877  // There is free space from lastOffset to suballoc.offset.
9878  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9879  ++outInfo.unusedRangeCount;
9880  outInfo.unusedBytes += unusedRangeSize;
9881  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9882  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9883  }
9884 
9885  // 2. Process this allocation.
9886  // There is allocation with suballoc.offset, suballoc.size.
9887  outInfo.usedBytes += suballoc.size;
9888  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9889  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9890 
9891  // 3. Prepare for next iteration.
9892  lastOffset = suballoc.offset + suballoc.size;
9893  ++nextAlloc1stIndex;
9894  }
9895  // We are at the end.
9896  else
9897  {
9898  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9899  if(lastOffset < freeSpace1stTo2ndEnd)
9900  {
9901  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9902  ++outInfo.unusedRangeCount;
9903  outInfo.unusedBytes += unusedRangeSize;
9904  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9905  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9906  }
9907 
9908  // End of loop.
9909  lastOffset = freeSpace1stTo2ndEnd;
9910  }
9911  }
9912 
9913  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9914  {
9915  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9916  while(lastOffset < size)
9917  {
9918  // Find next non-null allocation or move nextAllocIndex to the end.
9919  while(nextAlloc2ndIndex != SIZE_MAX &&
9920  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9921  {
9922  --nextAlloc2ndIndex;
9923  }
9924 
9925  // Found non-null allocation.
9926  if(nextAlloc2ndIndex != SIZE_MAX)
9927  {
9928  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9929 
9930  // 1. Process free space before this allocation.
9931  if(lastOffset < suballoc.offset)
9932  {
9933  // There is free space from lastOffset to suballoc.offset.
9934  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9935  ++outInfo.unusedRangeCount;
9936  outInfo.unusedBytes += unusedRangeSize;
9937  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9938  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9939  }
9940 
9941  // 2. Process this allocation.
9942  // There is allocation with suballoc.offset, suballoc.size.
9943  outInfo.usedBytes += suballoc.size;
9944  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9945  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9946 
9947  // 3. Prepare for next iteration.
9948  lastOffset = suballoc.offset + suballoc.size;
9949  --nextAlloc2ndIndex;
9950  }
9951  // We are at the end.
9952  else
9953  {
9954  // There is free space from lastOffset to size.
9955  if(lastOffset < size)
9956  {
9957  const VkDeviceSize unusedRangeSize = size - lastOffset;
9958  ++outInfo.unusedRangeCount;
9959  outInfo.unusedBytes += unusedRangeSize;
9960  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9961  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9962  }
9963 
9964  // End of loop.
9965  lastOffset = size;
9966  }
9967  }
9968  }
9969 
9970  outInfo.unusedBytes = size - outInfo.usedBytes;
9971 }
9972 
9973 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9974 {
9975  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9976  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9977  const VkDeviceSize size = GetSize();
9978  const size_t suballoc1stCount = suballocations1st.size();
9979  const size_t suballoc2ndCount = suballocations2nd.size();
9980 
9981  inoutStats.size += size;
9982 
9983  VkDeviceSize lastOffset = 0;
9984 
9985  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9986  {
9987  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9988  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9989  while(lastOffset < freeSpace2ndTo1stEnd)
9990  {
9991  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9992  while(nextAlloc2ndIndex < suballoc2ndCount &&
9993  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9994  {
9995  ++nextAlloc2ndIndex;
9996  }
9997 
9998  // Found non-null allocation.
9999  if(nextAlloc2ndIndex < suballoc2ndCount)
10000  {
10001  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10002 
10003  // 1. Process free space before this allocation.
10004  if(lastOffset < suballoc.offset)
10005  {
10006  // There is free space from lastOffset to suballoc.offset.
10007  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10008  inoutStats.unusedSize += unusedRangeSize;
10009  ++inoutStats.unusedRangeCount;
10010  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10011  }
10012 
10013  // 2. Process this allocation.
10014  // There is allocation with suballoc.offset, suballoc.size.
10015  ++inoutStats.allocationCount;
10016 
10017  // 3. Prepare for next iteration.
10018  lastOffset = suballoc.offset + suballoc.size;
10019  ++nextAlloc2ndIndex;
10020  }
10021  // We are at the end.
10022  else
10023  {
10024  if(lastOffset < freeSpace2ndTo1stEnd)
10025  {
10026  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10027  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10028  inoutStats.unusedSize += unusedRangeSize;
10029  ++inoutStats.unusedRangeCount;
10030  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10031  }
10032 
10033  // End of loop.
10034  lastOffset = freeSpace2ndTo1stEnd;
10035  }
10036  }
10037  }
10038 
10039  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10040  const VkDeviceSize freeSpace1stTo2ndEnd =
10041  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10042  while(lastOffset < freeSpace1stTo2ndEnd)
10043  {
10044  // Find next non-null allocation or move nextAllocIndex to the end.
10045  while(nextAlloc1stIndex < suballoc1stCount &&
10046  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10047  {
10048  ++nextAlloc1stIndex;
10049  }
10050 
10051  // Found non-null allocation.
10052  if(nextAlloc1stIndex < suballoc1stCount)
10053  {
10054  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10055 
10056  // 1. Process free space before this allocation.
10057  if(lastOffset < suballoc.offset)
10058  {
10059  // There is free space from lastOffset to suballoc.offset.
10060  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10061  inoutStats.unusedSize += unusedRangeSize;
10062  ++inoutStats.unusedRangeCount;
10063  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10064  }
10065 
10066  // 2. Process this allocation.
10067  // There is allocation with suballoc.offset, suballoc.size.
10068  ++inoutStats.allocationCount;
10069 
10070  // 3. Prepare for next iteration.
10071  lastOffset = suballoc.offset + suballoc.size;
10072  ++nextAlloc1stIndex;
10073  }
10074  // We are at the end.
10075  else
10076  {
10077  if(lastOffset < freeSpace1stTo2ndEnd)
10078  {
10079  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10080  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10081  inoutStats.unusedSize += unusedRangeSize;
10082  ++inoutStats.unusedRangeCount;
10083  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10084  }
10085 
10086  // End of loop.
10087  lastOffset = freeSpace1stTo2ndEnd;
10088  }
10089  }
10090 
10091  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10092  {
10093  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10094  while(lastOffset < size)
10095  {
10096  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10097  while(nextAlloc2ndIndex != SIZE_MAX &&
10098  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10099  {
10100  --nextAlloc2ndIndex;
10101  }
10102 
10103  // Found non-null allocation.
10104  if(nextAlloc2ndIndex != SIZE_MAX)
10105  {
10106  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10107 
10108  // 1. Process free space before this allocation.
10109  if(lastOffset < suballoc.offset)
10110  {
10111  // There is free space from lastOffset to suballoc.offset.
10112  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10113  inoutStats.unusedSize += unusedRangeSize;
10114  ++inoutStats.unusedRangeCount;
10115  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10116  }
10117 
10118  // 2. Process this allocation.
10119  // There is allocation with suballoc.offset, suballoc.size.
10120  ++inoutStats.allocationCount;
10121 
10122  // 3. Prepare for next iteration.
10123  lastOffset = suballoc.offset + suballoc.size;
10124  --nextAlloc2ndIndex;
10125  }
10126  // We are at the end.
10127  else
10128  {
10129  if(lastOffset < size)
10130  {
10131  // There is free space from lastOffset to size.
10132  const VkDeviceSize unusedRangeSize = size - lastOffset;
10133  inoutStats.unusedSize += unusedRangeSize;
10134  ++inoutStats.unusedRangeCount;
10135  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10136  }
10137 
10138  // End of loop.
10139  lastOffset = size;
10140  }
10141  }
10142  }
10143 }
10144 
10145 #if VMA_STATS_STRING_ENABLED
10146 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10147 {
10148  const VkDeviceSize size = GetSize();
10149  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10150  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10151  const size_t suballoc1stCount = suballocations1st.size();
10152  const size_t suballoc2ndCount = suballocations2nd.size();
10153 
10154  // FIRST PASS
10155 
10156  size_t unusedRangeCount = 0;
10157  VkDeviceSize usedBytes = 0;
10158 
10159  VkDeviceSize lastOffset = 0;
10160 
10161  size_t alloc2ndCount = 0;
10162  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10163  {
10164  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10165  size_t nextAlloc2ndIndex = 0;
10166  while(lastOffset < freeSpace2ndTo1stEnd)
10167  {
10168  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10169  while(nextAlloc2ndIndex < suballoc2ndCount &&
10170  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10171  {
10172  ++nextAlloc2ndIndex;
10173  }
10174 
10175  // Found non-null allocation.
10176  if(nextAlloc2ndIndex < suballoc2ndCount)
10177  {
10178  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10179 
10180  // 1. Process free space before this allocation.
10181  if(lastOffset < suballoc.offset)
10182  {
10183  // There is free space from lastOffset to suballoc.offset.
10184  ++unusedRangeCount;
10185  }
10186 
10187  // 2. Process this allocation.
10188  // There is allocation with suballoc.offset, suballoc.size.
10189  ++alloc2ndCount;
10190  usedBytes += suballoc.size;
10191 
10192  // 3. Prepare for next iteration.
10193  lastOffset = suballoc.offset + suballoc.size;
10194  ++nextAlloc2ndIndex;
10195  }
10196  // We are at the end.
10197  else
10198  {
10199  if(lastOffset < freeSpace2ndTo1stEnd)
10200  {
10201  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10202  ++unusedRangeCount;
10203  }
10204 
10205  // End of loop.
10206  lastOffset = freeSpace2ndTo1stEnd;
10207  }
10208  }
10209  }
10210 
10211  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10212  size_t alloc1stCount = 0;
10213  const VkDeviceSize freeSpace1stTo2ndEnd =
10214  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10215  while(lastOffset < freeSpace1stTo2ndEnd)
10216  {
10217  // Find next non-null allocation or move nextAllocIndex to the end.
10218  while(nextAlloc1stIndex < suballoc1stCount &&
10219  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10220  {
10221  ++nextAlloc1stIndex;
10222  }
10223 
10224  // Found non-null allocation.
10225  if(nextAlloc1stIndex < suballoc1stCount)
10226  {
10227  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10228 
10229  // 1. Process free space before this allocation.
10230  if(lastOffset < suballoc.offset)
10231  {
10232  // There is free space from lastOffset to suballoc.offset.
10233  ++unusedRangeCount;
10234  }
10235 
10236  // 2. Process this allocation.
10237  // There is allocation with suballoc.offset, suballoc.size.
10238  ++alloc1stCount;
10239  usedBytes += suballoc.size;
10240 
10241  // 3. Prepare for next iteration.
10242  lastOffset = suballoc.offset + suballoc.size;
10243  ++nextAlloc1stIndex;
10244  }
10245  // We are at the end.
10246  else
10247  {
10248  if(lastOffset < size)
10249  {
10250  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10251  ++unusedRangeCount;
10252  }
10253 
10254  // End of loop.
10255  lastOffset = freeSpace1stTo2ndEnd;
10256  }
10257  }
10258 
10259  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10260  {
10261  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10262  while(lastOffset < size)
10263  {
10264  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10265  while(nextAlloc2ndIndex != SIZE_MAX &&
10266  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10267  {
10268  --nextAlloc2ndIndex;
10269  }
10270 
10271  // Found non-null allocation.
10272  if(nextAlloc2ndIndex != SIZE_MAX)
10273  {
10274  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10275 
10276  // 1. Process free space before this allocation.
10277  if(lastOffset < suballoc.offset)
10278  {
10279  // There is free space from lastOffset to suballoc.offset.
10280  ++unusedRangeCount;
10281  }
10282 
10283  // 2. Process this allocation.
10284  // There is allocation with suballoc.offset, suballoc.size.
10285  ++alloc2ndCount;
10286  usedBytes += suballoc.size;
10287 
10288  // 3. Prepare for next iteration.
10289  lastOffset = suballoc.offset + suballoc.size;
10290  --nextAlloc2ndIndex;
10291  }
10292  // We are at the end.
10293  else
10294  {
10295  if(lastOffset < size)
10296  {
10297  // There is free space from lastOffset to size.
10298  ++unusedRangeCount;
10299  }
10300 
10301  // End of loop.
10302  lastOffset = size;
10303  }
10304  }
10305  }
10306 
10307  const VkDeviceSize unusedBytes = size - usedBytes;
10308  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10309 
10310  // SECOND PASS
10311  lastOffset = 0;
10312 
10313  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10314  {
10315  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10316  size_t nextAlloc2ndIndex = 0;
10317  while(lastOffset < freeSpace2ndTo1stEnd)
10318  {
10319  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10320  while(nextAlloc2ndIndex < suballoc2ndCount &&
10321  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10322  {
10323  ++nextAlloc2ndIndex;
10324  }
10325 
10326  // Found non-null allocation.
10327  if(nextAlloc2ndIndex < suballoc2ndCount)
10328  {
10329  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10330 
10331  // 1. Process free space before this allocation.
10332  if(lastOffset < suballoc.offset)
10333  {
10334  // There is free space from lastOffset to suballoc.offset.
10335  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10336  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10337  }
10338 
10339  // 2. Process this allocation.
10340  // There is allocation with suballoc.offset, suballoc.size.
10341  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10342 
10343  // 3. Prepare for next iteration.
10344  lastOffset = suballoc.offset + suballoc.size;
10345  ++nextAlloc2ndIndex;
10346  }
10347  // We are at the end.
10348  else
10349  {
10350  if(lastOffset < freeSpace2ndTo1stEnd)
10351  {
10352  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10353  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10354  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10355  }
10356 
10357  // End of loop.
10358  lastOffset = freeSpace2ndTo1stEnd;
10359  }
10360  }
10361  }
10362 
10363  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10364  while(lastOffset < freeSpace1stTo2ndEnd)
10365  {
10366  // Find next non-null allocation or move nextAllocIndex to the end.
10367  while(nextAlloc1stIndex < suballoc1stCount &&
10368  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10369  {
10370  ++nextAlloc1stIndex;
10371  }
10372 
10373  // Found non-null allocation.
10374  if(nextAlloc1stIndex < suballoc1stCount)
10375  {
10376  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10377 
10378  // 1. Process free space before this allocation.
10379  if(lastOffset < suballoc.offset)
10380  {
10381  // There is free space from lastOffset to suballoc.offset.
10382  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10383  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10384  }
10385 
10386  // 2. Process this allocation.
10387  // There is allocation with suballoc.offset, suballoc.size.
10388  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10389 
10390  // 3. Prepare for next iteration.
10391  lastOffset = suballoc.offset + suballoc.size;
10392  ++nextAlloc1stIndex;
10393  }
10394  // We are at the end.
10395  else
10396  {
10397  if(lastOffset < freeSpace1stTo2ndEnd)
10398  {
10399  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10400  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10401  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10402  }
10403 
10404  // End of loop.
10405  lastOffset = freeSpace1stTo2ndEnd;
10406  }
10407  }
10408 
10409  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10410  {
10411  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10412  while(lastOffset < size)
10413  {
10414  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10415  while(nextAlloc2ndIndex != SIZE_MAX &&
10416  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10417  {
10418  --nextAlloc2ndIndex;
10419  }
10420 
10421  // Found non-null allocation.
10422  if(nextAlloc2ndIndex != SIZE_MAX)
10423  {
10424  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10425 
10426  // 1. Process free space before this allocation.
10427  if(lastOffset < suballoc.offset)
10428  {
10429  // There is free space from lastOffset to suballoc.offset.
10430  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10431  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10432  }
10433 
10434  // 2. Process this allocation.
10435  // There is allocation with suballoc.offset, suballoc.size.
10436  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10437 
10438  // 3. Prepare for next iteration.
10439  lastOffset = suballoc.offset + suballoc.size;
10440  --nextAlloc2ndIndex;
10441  }
10442  // We are at the end.
10443  else
10444  {
10445  if(lastOffset < size)
10446  {
10447  // There is free space from lastOffset to size.
10448  const VkDeviceSize unusedRangeSize = size - lastOffset;
10449  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10450  }
10451 
10452  // End of loop.
10453  lastOffset = size;
10454  }
10455  }
10456  }
10457 
10458  PrintDetailedMap_End(json);
10459 }
10460 #endif // #if VMA_STATS_STRING_ENABLED
10461 
10462 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10463  uint32_t currentFrameIndex,
10464  uint32_t frameInUseCount,
10465  VkDeviceSize bufferImageGranularity,
10466  VkDeviceSize allocSize,
10467  VkDeviceSize allocAlignment,
10468  bool upperAddress,
10469  VmaSuballocationType allocType,
10470  bool canMakeOtherLost,
10471  uint32_t strategy,
10472  VmaAllocationRequest* pAllocationRequest)
10473 {
10474  VMA_ASSERT(allocSize > 0);
10475  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10476  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10477  VMA_HEAVY_ASSERT(Validate());
10478  return upperAddress ?
10479  CreateAllocationRequest_UpperAddress(
10480  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10481  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10482  CreateAllocationRequest_LowerAddress(
10483  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10484  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10485 }
10486 
10487 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10488  uint32_t currentFrameIndex,
10489  uint32_t frameInUseCount,
10490  VkDeviceSize bufferImageGranularity,
10491  VkDeviceSize allocSize,
10492  VkDeviceSize allocAlignment,
10493  VmaSuballocationType allocType,
10494  bool canMakeOtherLost,
10495  uint32_t strategy,
10496  VmaAllocationRequest* pAllocationRequest)
10497 {
10498  const VkDeviceSize size = GetSize();
10499  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10500  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10501 
10502  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10503  {
10504  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10505  return false;
10506  }
10507 
10508  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10509  if(allocSize > size)
10510  {
10511  return false;
10512  }
10513  VkDeviceSize resultBaseOffset = size - allocSize;
10514  if(!suballocations2nd.empty())
10515  {
10516  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10517  resultBaseOffset = lastSuballoc.offset - allocSize;
10518  if(allocSize > lastSuballoc.offset)
10519  {
10520  return false;
10521  }
10522  }
10523 
10524  // Start from offset equal to end of free space.
10525  VkDeviceSize resultOffset = resultBaseOffset;
10526 
10527  // Apply VMA_DEBUG_MARGIN at the end.
10528  if(VMA_DEBUG_MARGIN > 0)
10529  {
10530  if(resultOffset < VMA_DEBUG_MARGIN)
10531  {
10532  return false;
10533  }
10534  resultOffset -= VMA_DEBUG_MARGIN;
10535  }
10536 
10537  // Apply alignment.
10538  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10539 
10540  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10541  // Make bigger alignment if necessary.
10542  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10543  {
10544  bool bufferImageGranularityConflict = false;
10545  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10546  {
10547  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10548  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10549  {
10550  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10551  {
10552  bufferImageGranularityConflict = true;
10553  break;
10554  }
10555  }
10556  else
10557  // Already on previous page.
10558  break;
10559  }
10560  if(bufferImageGranularityConflict)
10561  {
10562  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10563  }
10564  }
10565 
10566  // There is enough free space.
10567  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10568  suballocations1st.back().offset + suballocations1st.back().size :
10569  0;
10570  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10571  {
10572  // Check previous suballocations for BufferImageGranularity conflicts.
10573  // If conflict exists, allocation cannot be made here.
10574  if(bufferImageGranularity > 1)
10575  {
10576  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10577  {
10578  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10579  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10580  {
10581  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10582  {
10583  return false;
10584  }
10585  }
10586  else
10587  {
10588  // Already on next page.
10589  break;
10590  }
10591  }
10592  }
10593 
10594  // All tests passed: Success.
10595  pAllocationRequest->offset = resultOffset;
10596  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10597  pAllocationRequest->sumItemSize = 0;
10598  // pAllocationRequest->item unused.
10599  pAllocationRequest->itemsToMakeLostCount = 0;
10600  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10601  return true;
10602  }
10603 
10604  return false;
10605 }
10606 
10607 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10608  uint32_t currentFrameIndex,
10609  uint32_t frameInUseCount,
10610  VkDeviceSize bufferImageGranularity,
10611  VkDeviceSize allocSize,
10612  VkDeviceSize allocAlignment,
10613  VmaSuballocationType allocType,
10614  bool canMakeOtherLost,
10615  uint32_t strategy,
10616  VmaAllocationRequest* pAllocationRequest)
10617 {
10618  const VkDeviceSize size = GetSize();
10619  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10620  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10621 
10622  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10623  {
10624  // Try to allocate at the end of 1st vector.
10625 
10626  VkDeviceSize resultBaseOffset = 0;
10627  if(!suballocations1st.empty())
10628  {
10629  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10630  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10631  }
10632 
10633  // Start from offset equal to beginning of free space.
10634  VkDeviceSize resultOffset = resultBaseOffset;
10635 
10636  // Apply VMA_DEBUG_MARGIN at the beginning.
10637  if(VMA_DEBUG_MARGIN > 0)
10638  {
10639  resultOffset += VMA_DEBUG_MARGIN;
10640  }
10641 
10642  // Apply alignment.
10643  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10644 
10645  // Check previous suballocations for BufferImageGranularity conflicts.
10646  // Make bigger alignment if necessary.
10647  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10648  {
10649  bool bufferImageGranularityConflict = false;
10650  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10651  {
10652  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10653  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10654  {
10655  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10656  {
10657  bufferImageGranularityConflict = true;
10658  break;
10659  }
10660  }
10661  else
10662  // Already on previous page.
10663  break;
10664  }
10665  if(bufferImageGranularityConflict)
10666  {
10667  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10668  }
10669  }
10670 
10671  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10672  suballocations2nd.back().offset : size;
10673 
10674  // There is enough free space at the end after alignment.
10675  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10676  {
10677  // Check next suballocations for BufferImageGranularity conflicts.
10678  // If conflict exists, allocation cannot be made here.
10679  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10680  {
10681  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10682  {
10683  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10684  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10685  {
10686  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10687  {
10688  return false;
10689  }
10690  }
10691  else
10692  {
10693  // Already on previous page.
10694  break;
10695  }
10696  }
10697  }
10698 
10699  // All tests passed: Success.
10700  pAllocationRequest->offset = resultOffset;
10701  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10702  pAllocationRequest->sumItemSize = 0;
10703  // pAllocationRequest->item, customData unused.
10704  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10705  pAllocationRequest->itemsToMakeLostCount = 0;
10706  return true;
10707  }
10708  }
10709 
10710  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10711  // beginning of 1st vector as the end of free space.
10712  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10713  {
10714  VMA_ASSERT(!suballocations1st.empty());
10715 
10716  VkDeviceSize resultBaseOffset = 0;
10717  if(!suballocations2nd.empty())
10718  {
10719  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10720  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10721  }
10722 
10723  // Start from offset equal to beginning of free space.
10724  VkDeviceSize resultOffset = resultBaseOffset;
10725 
10726  // Apply VMA_DEBUG_MARGIN at the beginning.
10727  if(VMA_DEBUG_MARGIN > 0)
10728  {
10729  resultOffset += VMA_DEBUG_MARGIN;
10730  }
10731 
10732  // Apply alignment.
10733  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10734 
10735  // Check previous suballocations for BufferImageGranularity conflicts.
10736  // Make bigger alignment if necessary.
10737  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10738  {
10739  bool bufferImageGranularityConflict = false;
10740  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10741  {
10742  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10743  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10744  {
10745  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10746  {
10747  bufferImageGranularityConflict = true;
10748  break;
10749  }
10750  }
10751  else
10752  // Already on previous page.
10753  break;
10754  }
10755  if(bufferImageGranularityConflict)
10756  {
10757  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10758  }
10759  }
10760 
10761  pAllocationRequest->itemsToMakeLostCount = 0;
10762  pAllocationRequest->sumItemSize = 0;
10763  size_t index1st = m_1stNullItemsBeginCount;
10764 
10765  if(canMakeOtherLost)
10766  {
10767  while(index1st < suballocations1st.size() &&
10768  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10769  {
10770  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10771  const VmaSuballocation& suballoc = suballocations1st[index1st];
10772  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10773  {
10774  // No problem.
10775  }
10776  else
10777  {
10778  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10779  if(suballoc.hAllocation->CanBecomeLost() &&
10780  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10781  {
10782  ++pAllocationRequest->itemsToMakeLostCount;
10783  pAllocationRequest->sumItemSize += suballoc.size;
10784  }
10785  else
10786  {
10787  return false;
10788  }
10789  }
10790  ++index1st;
10791  }
10792 
10793  // Check next suballocations for BufferImageGranularity conflicts.
10794  // If conflict exists, we must mark more allocations lost or fail.
10795  if(bufferImageGranularity > 1)
10796  {
10797  while(index1st < suballocations1st.size())
10798  {
10799  const VmaSuballocation& suballoc = suballocations1st[index1st];
10800  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10801  {
10802  if(suballoc.hAllocation != VK_NULL_HANDLE)
10803  {
10804  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10805  if(suballoc.hAllocation->CanBecomeLost() &&
10806  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10807  {
10808  ++pAllocationRequest->itemsToMakeLostCount;
10809  pAllocationRequest->sumItemSize += suballoc.size;
10810  }
10811  else
10812  {
10813  return false;
10814  }
10815  }
10816  }
10817  else
10818  {
10819  // Already on next page.
10820  break;
10821  }
10822  ++index1st;
10823  }
10824  }
10825 
10826  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10827  if(index1st == suballocations1st.size() &&
10828  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10829  {
10830  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10831  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10832  }
10833  }
10834 
10835  // There is enough free space at the end after alignment.
10836  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10837  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10838  {
10839  // Check next suballocations for BufferImageGranularity conflicts.
10840  // If conflict exists, allocation cannot be made here.
10841  if(bufferImageGranularity > 1)
10842  {
10843  for(size_t nextSuballocIndex = index1st;
10844  nextSuballocIndex < suballocations1st.size();
10845  nextSuballocIndex++)
10846  {
10847  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10848  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10849  {
10850  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10851  {
10852  return false;
10853  }
10854  }
10855  else
10856  {
10857  // Already on next page.
10858  break;
10859  }
10860  }
10861  }
10862 
10863  // All tests passed: Success.
10864  pAllocationRequest->offset = resultOffset;
10865  pAllocationRequest->sumFreeSize =
10866  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10867  - resultBaseOffset
10868  - pAllocationRequest->sumItemSize;
10869  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10870  // pAllocationRequest->item, customData unused.
10871  return true;
10872  }
10873  }
10874 
10875  return false;
10876 }
10877 
10878 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10879  uint32_t currentFrameIndex,
10880  uint32_t frameInUseCount,
10881  VmaAllocationRequest* pAllocationRequest)
10882 {
10883  if(pAllocationRequest->itemsToMakeLostCount == 0)
10884  {
10885  return true;
10886  }
10887 
10888  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10889 
10890  // We always start from 1st.
10891  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10892  size_t index = m_1stNullItemsBeginCount;
10893  size_t madeLostCount = 0;
10894  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10895  {
10896  if(index == suballocations->size())
10897  {
10898  index = 0;
10899  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10900  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10901  {
10902  suballocations = &AccessSuballocations2nd();
10903  }
10904  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10905  // suballocations continues pointing at AccessSuballocations1st().
10906  VMA_ASSERT(!suballocations->empty());
10907  }
10908  VmaSuballocation& suballoc = (*suballocations)[index];
10909  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10910  {
10911  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10912  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10913  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10914  {
10915  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10916  suballoc.hAllocation = VK_NULL_HANDLE;
10917  m_SumFreeSize += suballoc.size;
10918  if(suballocations == &AccessSuballocations1st())
10919  {
10920  ++m_1stNullItemsMiddleCount;
10921  }
10922  else
10923  {
10924  ++m_2ndNullItemsCount;
10925  }
10926  ++madeLostCount;
10927  }
10928  else
10929  {
10930  return false;
10931  }
10932  }
10933  ++index;
10934  }
10935 
10936  CleanupAfterFree();
10937  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10938 
10939  return true;
10940 }
10941 
10942 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10943 {
10944  uint32_t lostAllocationCount = 0;
10945 
10946  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10947  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10948  {
10949  VmaSuballocation& suballoc = suballocations1st[i];
10950  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10951  suballoc.hAllocation->CanBecomeLost() &&
10952  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10953  {
10954  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10955  suballoc.hAllocation = VK_NULL_HANDLE;
10956  ++m_1stNullItemsMiddleCount;
10957  m_SumFreeSize += suballoc.size;
10958  ++lostAllocationCount;
10959  }
10960  }
10961 
10962  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10963  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10964  {
10965  VmaSuballocation& suballoc = suballocations2nd[i];
10966  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10967  suballoc.hAllocation->CanBecomeLost() &&
10968  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10969  {
10970  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10971  suballoc.hAllocation = VK_NULL_HANDLE;
10972  ++m_2ndNullItemsCount;
10973  m_SumFreeSize += suballoc.size;
10974  ++lostAllocationCount;
10975  }
10976  }
10977 
10978  if(lostAllocationCount)
10979  {
10980  CleanupAfterFree();
10981  }
10982 
10983  return lostAllocationCount;
10984 }
10985 
10986 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10987 {
10988  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10989  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10990  {
10991  const VmaSuballocation& suballoc = suballocations1st[i];
10992  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10993  {
10994  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10995  {
10996  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10997  return VK_ERROR_VALIDATION_FAILED_EXT;
10998  }
10999  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11000  {
11001  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11002  return VK_ERROR_VALIDATION_FAILED_EXT;
11003  }
11004  }
11005  }
11006 
11007  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11008  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11009  {
11010  const VmaSuballocation& suballoc = suballocations2nd[i];
11011  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11012  {
11013  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11014  {
11015  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11016  return VK_ERROR_VALIDATION_FAILED_EXT;
11017  }
11018  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11019  {
11020  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11021  return VK_ERROR_VALIDATION_FAILED_EXT;
11022  }
11023  }
11024  }
11025 
11026  return VK_SUCCESS;
11027 }
11028 
11029 void VmaBlockMetadata_Linear::Alloc(
11030  const VmaAllocationRequest& request,
11031  VmaSuballocationType type,
11032  VkDeviceSize allocSize,
11033  VmaAllocation hAllocation)
11034 {
11035  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11036 
11037  switch(request.type)
11038  {
11039  case VmaAllocationRequestType::UpperAddress:
11040  {
11041  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11042  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11043  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11044  suballocations2nd.push_back(newSuballoc);
11045  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11046  }
11047  break;
11048  case VmaAllocationRequestType::EndOf1st:
11049  {
11050  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11051 
11052  VMA_ASSERT(suballocations1st.empty() ||
11053  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11054  // Check if it fits before the end of the block.
11055  VMA_ASSERT(request.offset + allocSize <= GetSize());
11056 
11057  suballocations1st.push_back(newSuballoc);
11058  }
11059  break;
11060  case VmaAllocationRequestType::EndOf2nd:
11061  {
11062  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11063  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11064  VMA_ASSERT(!suballocations1st.empty() &&
11065  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11066  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11067 
11068  switch(m_2ndVectorMode)
11069  {
11070  case SECOND_VECTOR_EMPTY:
11071  // First allocation from second part ring buffer.
11072  VMA_ASSERT(suballocations2nd.empty());
11073  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11074  break;
11075  case SECOND_VECTOR_RING_BUFFER:
11076  // 2-part ring buffer is already started.
11077  VMA_ASSERT(!suballocations2nd.empty());
11078  break;
11079  case SECOND_VECTOR_DOUBLE_STACK:
11080  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11081  break;
11082  default:
11083  VMA_ASSERT(0);
11084  }
11085 
11086  suballocations2nd.push_back(newSuballoc);
11087  }
11088  break;
11089  default:
11090  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11091  }
11092 
11093  m_SumFreeSize -= newSuballoc.size;
11094 }
11095 
11096 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11097 {
11098  FreeAtOffset(allocation->GetOffset());
11099 }
11100 
11101 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11102 {
11103  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11104  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11105 
11106  if(!suballocations1st.empty())
11107  {
11108  // First allocation: Mark it as next empty at the beginning.
11109  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11110  if(firstSuballoc.offset == offset)
11111  {
11112  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11113  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11114  m_SumFreeSize += firstSuballoc.size;
11115  ++m_1stNullItemsBeginCount;
11116  CleanupAfterFree();
11117  return;
11118  }
11119  }
11120 
11121  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11122  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11123  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11124  {
11125  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11126  if(lastSuballoc.offset == offset)
11127  {
11128  m_SumFreeSize += lastSuballoc.size;
11129  suballocations2nd.pop_back();
11130  CleanupAfterFree();
11131  return;
11132  }
11133  }
11134  // Last allocation in 1st vector.
11135  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11136  {
11137  VmaSuballocation& lastSuballoc = suballocations1st.back();
11138  if(lastSuballoc.offset == offset)
11139  {
11140  m_SumFreeSize += lastSuballoc.size;
11141  suballocations1st.pop_back();
11142  CleanupAfterFree();
11143  return;
11144  }
11145  }
11146 
11147  // Item from the middle of 1st vector.
11148  {
11149  VmaSuballocation refSuballoc;
11150  refSuballoc.offset = offset;
11151  // Rest of members stays uninitialized intentionally for better performance.
11152  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11153  suballocations1st.begin() + m_1stNullItemsBeginCount,
11154  suballocations1st.end(),
11155  refSuballoc,
11156  VmaSuballocationOffsetLess());
11157  if(it != suballocations1st.end())
11158  {
11159  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11160  it->hAllocation = VK_NULL_HANDLE;
11161  ++m_1stNullItemsMiddleCount;
11162  m_SumFreeSize += it->size;
11163  CleanupAfterFree();
11164  return;
11165  }
11166  }
11167 
11168  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11169  {
11170  // Item from the middle of 2nd vector.
11171  VmaSuballocation refSuballoc;
11172  refSuballoc.offset = offset;
11173  // Rest of members stays uninitialized intentionally for better performance.
11174  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11175  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11176  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11177  if(it != suballocations2nd.end())
11178  {
11179  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11180  it->hAllocation = VK_NULL_HANDLE;
11181  ++m_2ndNullItemsCount;
11182  m_SumFreeSize += it->size;
11183  CleanupAfterFree();
11184  return;
11185  }
11186  }
11187 
11188  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11189 }
11190 
11191 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11192 {
11193  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11194  const size_t suballocCount = AccessSuballocations1st().size();
11195  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11196 }
11197 
11198 void VmaBlockMetadata_Linear::CleanupAfterFree()
11199 {
11200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11202 
11203  if(IsEmpty())
11204  {
11205  suballocations1st.clear();
11206  suballocations2nd.clear();
11207  m_1stNullItemsBeginCount = 0;
11208  m_1stNullItemsMiddleCount = 0;
11209  m_2ndNullItemsCount = 0;
11210  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11211  }
11212  else
11213  {
11214  const size_t suballoc1stCount = suballocations1st.size();
11215  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11216  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11217 
11218  // Find more null items at the beginning of 1st vector.
11219  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11220  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11221  {
11222  ++m_1stNullItemsBeginCount;
11223  --m_1stNullItemsMiddleCount;
11224  }
11225 
11226  // Find more null items at the end of 1st vector.
11227  while(m_1stNullItemsMiddleCount > 0 &&
11228  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11229  {
11230  --m_1stNullItemsMiddleCount;
11231  suballocations1st.pop_back();
11232  }
11233 
11234  // Find more null items at the end of 2nd vector.
11235  while(m_2ndNullItemsCount > 0 &&
11236  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11237  {
11238  --m_2ndNullItemsCount;
11239  suballocations2nd.pop_back();
11240  }
11241 
11242  // Find more null items at the beginning of 2nd vector.
11243  while(m_2ndNullItemsCount > 0 &&
11244  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11245  {
11246  --m_2ndNullItemsCount;
11247  VmaVectorRemove(suballocations2nd, 0);
11248  }
11249 
11250  if(ShouldCompact1st())
11251  {
11252  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11253  size_t srcIndex = m_1stNullItemsBeginCount;
11254  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11255  {
11256  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11257  {
11258  ++srcIndex;
11259  }
11260  if(dstIndex != srcIndex)
11261  {
11262  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11263  }
11264  ++srcIndex;
11265  }
11266  suballocations1st.resize(nonNullItemCount);
11267  m_1stNullItemsBeginCount = 0;
11268  m_1stNullItemsMiddleCount = 0;
11269  }
11270 
11271  // 2nd vector became empty.
11272  if(suballocations2nd.empty())
11273  {
11274  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11275  }
11276 
11277  // 1st vector became empty.
11278  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11279  {
11280  suballocations1st.clear();
11281  m_1stNullItemsBeginCount = 0;
11282 
11283  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11284  {
11285  // Swap 1st with 2nd. Now 2nd is empty.
11286  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11287  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11288  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11289  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11290  {
11291  ++m_1stNullItemsBeginCount;
11292  --m_1stNullItemsMiddleCount;
11293  }
11294  m_2ndNullItemsCount = 0;
11295  m_1stVectorIndex ^= 1;
11296  }
11297  }
11298  }
11299 
11300  VMA_HEAVY_ASSERT(Validate());
11301 }
11302 
11303 
11305 // class VmaBlockMetadata_Buddy
11306 
11307 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11308  VmaBlockMetadata(hAllocator),
11309  m_Root(VMA_NULL),
11310  m_AllocationCount(0),
11311  m_FreeCount(1),
11312  m_SumFreeSize(0)
11313 {
11314  memset(m_FreeList, 0, sizeof(m_FreeList));
11315 }
11316 
11317 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11318 {
11319  DeleteNode(m_Root);
11320 }
11321 
11322 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11323 {
11324  VmaBlockMetadata::Init(size);
11325 
11326  m_UsableSize = VmaPrevPow2(size);
11327  m_SumFreeSize = m_UsableSize;
11328 
11329  // Calculate m_LevelCount.
11330  m_LevelCount = 1;
11331  while(m_LevelCount < MAX_LEVELS &&
11332  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11333  {
11334  ++m_LevelCount;
11335  }
11336 
11337  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11338  rootNode->offset = 0;
11339  rootNode->type = Node::TYPE_FREE;
11340  rootNode->parent = VMA_NULL;
11341  rootNode->buddy = VMA_NULL;
11342 
11343  m_Root = rootNode;
11344  AddToFreeListFront(0, rootNode);
11345 }
11346 
11347 bool VmaBlockMetadata_Buddy::Validate() const
11348 {
11349  // Validate tree.
11350  ValidationContext ctx;
11351  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11352  {
11353  VMA_VALIDATE(false && "ValidateNode failed.");
11354  }
11355  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11356  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11357 
11358  // Validate free node lists.
11359  for(uint32_t level = 0; level < m_LevelCount; ++level)
11360  {
11361  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11362  m_FreeList[level].front->free.prev == VMA_NULL);
11363 
11364  for(Node* node = m_FreeList[level].front;
11365  node != VMA_NULL;
11366  node = node->free.next)
11367  {
11368  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11369 
11370  if(node->free.next == VMA_NULL)
11371  {
11372  VMA_VALIDATE(m_FreeList[level].back == node);
11373  }
11374  else
11375  {
11376  VMA_VALIDATE(node->free.next->free.prev == node);
11377  }
11378  }
11379  }
11380 
11381  // Validate that free lists ar higher levels are empty.
11382  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11383  {
11384  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11385  }
11386 
11387  return true;
11388 }
11389 
11390 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11391 {
11392  for(uint32_t level = 0; level < m_LevelCount; ++level)
11393  {
11394  if(m_FreeList[level].front != VMA_NULL)
11395  {
11396  return LevelToNodeSize(level);
11397  }
11398  }
11399  return 0;
11400 }
11401 
11402 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11403 {
11404  const VkDeviceSize unusableSize = GetUnusableSize();
11405 
11406  outInfo.blockCount = 1;
11407 
11408  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11409  outInfo.usedBytes = outInfo.unusedBytes = 0;
11410 
11411  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11412  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11413  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11414 
11415  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11416 
11417  if(unusableSize > 0)
11418  {
11419  ++outInfo.unusedRangeCount;
11420  outInfo.unusedBytes += unusableSize;
11421  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11422  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11423  }
11424 }
11425 
11426 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11427 {
11428  const VkDeviceSize unusableSize = GetUnusableSize();
11429 
11430  inoutStats.size += GetSize();
11431  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11432  inoutStats.allocationCount += m_AllocationCount;
11433  inoutStats.unusedRangeCount += m_FreeCount;
11434  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11435 
11436  if(unusableSize > 0)
11437  {
11438  ++inoutStats.unusedRangeCount;
11439  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11440  }
11441 }
11442 
11443 #if VMA_STATS_STRING_ENABLED
11444 
11445 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11446 {
11447  // TODO optimize
11448  VmaStatInfo stat;
11449  CalcAllocationStatInfo(stat);
11450 
11451  PrintDetailedMap_Begin(
11452  json,
11453  stat.unusedBytes,
11454  stat.allocationCount,
11455  stat.unusedRangeCount);
11456 
11457  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11458 
11459  const VkDeviceSize unusableSize = GetUnusableSize();
11460  if(unusableSize > 0)
11461  {
11462  PrintDetailedMap_UnusedRange(json,
11463  m_UsableSize, // offset
11464  unusableSize); // size
11465  }
11466 
11467  PrintDetailedMap_End(json);
11468 }
11469 
11470 #endif // #if VMA_STATS_STRING_ENABLED
11471 
11472 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11473  uint32_t currentFrameIndex,
11474  uint32_t frameInUseCount,
11475  VkDeviceSize bufferImageGranularity,
11476  VkDeviceSize allocSize,
11477  VkDeviceSize allocAlignment,
11478  bool upperAddress,
11479  VmaSuballocationType allocType,
11480  bool canMakeOtherLost,
11481  uint32_t strategy,
11482  VmaAllocationRequest* pAllocationRequest)
11483 {
11484  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11485 
11486  // Simple way to respect bufferImageGranularity. May be optimized some day.
11487  // Whenever it might be an OPTIMAL image...
11488  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11489  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11490  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11491  {
11492  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11493  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11494  }
11495 
11496  if(allocSize > m_UsableSize)
11497  {
11498  return false;
11499  }
11500 
11501  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11502  for(uint32_t level = targetLevel + 1; level--; )
11503  {
11504  for(Node* freeNode = m_FreeList[level].front;
11505  freeNode != VMA_NULL;
11506  freeNode = freeNode->free.next)
11507  {
11508  if(freeNode->offset % allocAlignment == 0)
11509  {
11510  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11511  pAllocationRequest->offset = freeNode->offset;
11512  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11513  pAllocationRequest->sumItemSize = 0;
11514  pAllocationRequest->itemsToMakeLostCount = 0;
11515  pAllocationRequest->customData = (void*)(uintptr_t)level;
11516  return true;
11517  }
11518  }
11519  }
11520 
11521  return false;
11522 }
11523 
11524 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11525  uint32_t currentFrameIndex,
11526  uint32_t frameInUseCount,
11527  VmaAllocationRequest* pAllocationRequest)
11528 {
11529  /*
11530  Lost allocations are not supported in buddy allocator at the moment.
11531  Support might be added in the future.
11532  */
11533  return pAllocationRequest->itemsToMakeLostCount == 0;
11534 }
11535 
11536 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11537 {
11538  /*
11539  Lost allocations are not supported in buddy allocator at the moment.
11540  Support might be added in the future.
11541  */
11542  return 0;
11543 }
11544 
11545 void VmaBlockMetadata_Buddy::Alloc(
11546  const VmaAllocationRequest& request,
11547  VmaSuballocationType type,
11548  VkDeviceSize allocSize,
11549  VmaAllocation hAllocation)
11550 {
11551  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11552 
11553  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11554  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11555 
11556  Node* currNode = m_FreeList[currLevel].front;
11557  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11558  while(currNode->offset != request.offset)
11559  {
11560  currNode = currNode->free.next;
11561  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11562  }
11563 
11564  // Go down, splitting free nodes.
11565  while(currLevel < targetLevel)
11566  {
11567  // currNode is already first free node at currLevel.
11568  // Remove it from list of free nodes at this currLevel.
11569  RemoveFromFreeList(currLevel, currNode);
11570 
11571  const uint32_t childrenLevel = currLevel + 1;
11572 
11573  // Create two free sub-nodes.
11574  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11575  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11576 
11577  leftChild->offset = currNode->offset;
11578  leftChild->type = Node::TYPE_FREE;
11579  leftChild->parent = currNode;
11580  leftChild->buddy = rightChild;
11581 
11582  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11583  rightChild->type = Node::TYPE_FREE;
11584  rightChild->parent = currNode;
11585  rightChild->buddy = leftChild;
11586 
11587  // Convert current currNode to split type.
11588  currNode->type = Node::TYPE_SPLIT;
11589  currNode->split.leftChild = leftChild;
11590 
11591  // Add child nodes to free list. Order is important!
11592  AddToFreeListFront(childrenLevel, rightChild);
11593  AddToFreeListFront(childrenLevel, leftChild);
11594 
11595  ++m_FreeCount;
11596  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11597  ++currLevel;
11598  currNode = m_FreeList[currLevel].front;
11599 
11600  /*
11601  We can be sure that currNode, as left child of node previously split,
11602  also fullfills the alignment requirement.
11603  */
11604  }
11605 
11606  // Remove from free list.
11607  VMA_ASSERT(currLevel == targetLevel &&
11608  currNode != VMA_NULL &&
11609  currNode->type == Node::TYPE_FREE);
11610  RemoveFromFreeList(currLevel, currNode);
11611 
11612  // Convert to allocation node.
11613  currNode->type = Node::TYPE_ALLOCATION;
11614  currNode->allocation.alloc = hAllocation;
11615 
11616  ++m_AllocationCount;
11617  --m_FreeCount;
11618  m_SumFreeSize -= allocSize;
11619 }
11620 
11621 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11622 {
11623  if(node->type == Node::TYPE_SPLIT)
11624  {
11625  DeleteNode(node->split.leftChild->buddy);
11626  DeleteNode(node->split.leftChild);
11627  }
11628 
11629  vma_delete(GetAllocationCallbacks(), node);
11630 }
11631 
11632 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11633 {
11634  VMA_VALIDATE(level < m_LevelCount);
11635  VMA_VALIDATE(curr->parent == parent);
11636  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11637  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11638  switch(curr->type)
11639  {
11640  case Node::TYPE_FREE:
11641  // curr->free.prev, next are validated separately.
11642  ctx.calculatedSumFreeSize += levelNodeSize;
11643  ++ctx.calculatedFreeCount;
11644  break;
11645  case Node::TYPE_ALLOCATION:
11646  ++ctx.calculatedAllocationCount;
11647  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11648  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11649  break;
11650  case Node::TYPE_SPLIT:
11651  {
11652  const uint32_t childrenLevel = level + 1;
11653  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11654  const Node* const leftChild = curr->split.leftChild;
11655  VMA_VALIDATE(leftChild != VMA_NULL);
11656  VMA_VALIDATE(leftChild->offset == curr->offset);
11657  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11658  {
11659  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11660  }
11661  const Node* const rightChild = leftChild->buddy;
11662  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11663  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11664  {
11665  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11666  }
11667  }
11668  break;
11669  default:
11670  return false;
11671  }
11672 
11673  return true;
11674 }
11675 
11676 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11677 {
11678  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11679  uint32_t level = 0;
11680  VkDeviceSize currLevelNodeSize = m_UsableSize;
11681  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11682  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11683  {
11684  ++level;
11685  currLevelNodeSize = nextLevelNodeSize;
11686  nextLevelNodeSize = currLevelNodeSize >> 1;
11687  }
11688  return level;
11689 }
11690 
11691 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11692 {
11693  // Find node and level.
11694  Node* node = m_Root;
11695  VkDeviceSize nodeOffset = 0;
11696  uint32_t level = 0;
11697  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11698  while(node->type == Node::TYPE_SPLIT)
11699  {
11700  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11701  if(offset < nodeOffset + nextLevelSize)
11702  {
11703  node = node->split.leftChild;
11704  }
11705  else
11706  {
11707  node = node->split.leftChild->buddy;
11708  nodeOffset += nextLevelSize;
11709  }
11710  ++level;
11711  levelNodeSize = nextLevelSize;
11712  }
11713 
11714  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11715  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11716 
11717  ++m_FreeCount;
11718  --m_AllocationCount;
11719  m_SumFreeSize += alloc->GetSize();
11720 
11721  node->type = Node::TYPE_FREE;
11722 
11723  // Join free nodes if possible.
11724  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11725  {
11726  RemoveFromFreeList(level, node->buddy);
11727  Node* const parent = node->parent;
11728 
11729  vma_delete(GetAllocationCallbacks(), node->buddy);
11730  vma_delete(GetAllocationCallbacks(), node);
11731  parent->type = Node::TYPE_FREE;
11732 
11733  node = parent;
11734  --level;
11735  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11736  --m_FreeCount;
11737  }
11738 
11739  AddToFreeListFront(level, node);
11740 }
11741 
11742 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11743 {
11744  switch(node->type)
11745  {
11746  case Node::TYPE_FREE:
11747  ++outInfo.unusedRangeCount;
11748  outInfo.unusedBytes += levelNodeSize;
11749  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11750  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11751  break;
11752  case Node::TYPE_ALLOCATION:
11753  {
11754  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11755  ++outInfo.allocationCount;
11756  outInfo.usedBytes += allocSize;
11757  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11758  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11759 
11760  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11761  if(unusedRangeSize > 0)
11762  {
11763  ++outInfo.unusedRangeCount;
11764  outInfo.unusedBytes += unusedRangeSize;
11765  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11766  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11767  }
11768  }
11769  break;
11770  case Node::TYPE_SPLIT:
11771  {
11772  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11773  const Node* const leftChild = node->split.leftChild;
11774  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11775  const Node* const rightChild = leftChild->buddy;
11776  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11777  }
11778  break;
11779  default:
11780  VMA_ASSERT(0);
11781  }
11782 }
11783 
11784 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11785 {
11786  VMA_ASSERT(node->type == Node::TYPE_FREE);
11787 
11788  // List is empty.
11789  Node* const frontNode = m_FreeList[level].front;
11790  if(frontNode == VMA_NULL)
11791  {
11792  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11793  node->free.prev = node->free.next = VMA_NULL;
11794  m_FreeList[level].front = m_FreeList[level].back = node;
11795  }
11796  else
11797  {
11798  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11799  node->free.prev = VMA_NULL;
11800  node->free.next = frontNode;
11801  frontNode->free.prev = node;
11802  m_FreeList[level].front = node;
11803  }
11804 }
11805 
11806 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11807 {
11808  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11809 
11810  // It is at the front.
11811  if(node->free.prev == VMA_NULL)
11812  {
11813  VMA_ASSERT(m_FreeList[level].front == node);
11814  m_FreeList[level].front = node->free.next;
11815  }
11816  else
11817  {
11818  Node* const prevFreeNode = node->free.prev;
11819  VMA_ASSERT(prevFreeNode->free.next == node);
11820  prevFreeNode->free.next = node->free.next;
11821  }
11822 
11823  // It is at the back.
11824  if(node->free.next == VMA_NULL)
11825  {
11826  VMA_ASSERT(m_FreeList[level].back == node);
11827  m_FreeList[level].back = node->free.prev;
11828  }
11829  else
11830  {
11831  Node* const nextFreeNode = node->free.next;
11832  VMA_ASSERT(nextFreeNode->free.prev == node);
11833  nextFreeNode->free.prev = node->free.prev;
11834  }
11835 }
11836 
11837 #if VMA_STATS_STRING_ENABLED
11838 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11839 {
11840  switch(node->type)
11841  {
11842  case Node::TYPE_FREE:
11843  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11844  break;
11845  case Node::TYPE_ALLOCATION:
11846  {
11847  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11848  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11849  if(allocSize < levelNodeSize)
11850  {
11851  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11852  }
11853  }
11854  break;
11855  case Node::TYPE_SPLIT:
11856  {
11857  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11858  const Node* const leftChild = node->split.leftChild;
11859  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11860  const Node* const rightChild = leftChild->buddy;
11861  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11862  }
11863  break;
11864  default:
11865  VMA_ASSERT(0);
11866  }
11867 }
11868 #endif // #if VMA_STATS_STRING_ENABLED
11869 
11870 
11872 // class VmaDeviceMemoryBlock
11873 
11874 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11875  m_pMetadata(VMA_NULL),
11876  m_MemoryTypeIndex(UINT32_MAX),
11877  m_Id(0),
11878  m_hMemory(VK_NULL_HANDLE),
11879  m_MapCount(0),
11880  m_pMappedData(VMA_NULL)
11881 {
11882 }
11883 
11884 void VmaDeviceMemoryBlock::Init(
11885  VmaAllocator hAllocator,
11886  VmaPool hParentPool,
11887  uint32_t newMemoryTypeIndex,
11888  VkDeviceMemory newMemory,
11889  VkDeviceSize newSize,
11890  uint32_t id,
11891  uint32_t algorithm)
11892 {
11893  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11894 
11895  m_hParentPool = hParentPool;
11896  m_MemoryTypeIndex = newMemoryTypeIndex;
11897  m_Id = id;
11898  m_hMemory = newMemory;
11899 
11900  switch(algorithm)
11901  {
11903  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11904  break;
11906  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11907  break;
11908  default:
11909  VMA_ASSERT(0);
11910  // Fall-through.
11911  case 0:
11912  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11913  }
11914  m_pMetadata->Init(newSize);
11915 }
11916 
11917 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11918 {
11919  // This is the most important assert in the entire library.
11920  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11921  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11922 
11923  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11924  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11925  m_hMemory = VK_NULL_HANDLE;
11926 
11927  vma_delete(allocator, m_pMetadata);
11928  m_pMetadata = VMA_NULL;
11929 }
11930 
11931 bool VmaDeviceMemoryBlock::Validate() const
11932 {
11933  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11934  (m_pMetadata->GetSize() != 0));
11935 
11936  return m_pMetadata->Validate();
11937 }
11938 
11939 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11940 {
11941  void* pData = nullptr;
11942  VkResult res = Map(hAllocator, 1, &pData);
11943  if(res != VK_SUCCESS)
11944  {
11945  return res;
11946  }
11947 
11948  res = m_pMetadata->CheckCorruption(pData);
11949 
11950  Unmap(hAllocator, 1);
11951 
11952  return res;
11953 }
11954 
11955 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11956 {
11957  if(count == 0)
11958  {
11959  return VK_SUCCESS;
11960  }
11961 
11962  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11963  if(m_MapCount != 0)
11964  {
11965  m_MapCount += count;
11966  VMA_ASSERT(m_pMappedData != VMA_NULL);
11967  if(ppData != VMA_NULL)
11968  {
11969  *ppData = m_pMappedData;
11970  }
11971  return VK_SUCCESS;
11972  }
11973  else
11974  {
11975  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11976  hAllocator->m_hDevice,
11977  m_hMemory,
11978  0, // offset
11979  VK_WHOLE_SIZE,
11980  0, // flags
11981  &m_pMappedData);
11982  if(result == VK_SUCCESS)
11983  {
11984  if(ppData != VMA_NULL)
11985  {
11986  *ppData = m_pMappedData;
11987  }
11988  m_MapCount = count;
11989  }
11990  return result;
11991  }
11992 }
11993 
11994 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11995 {
11996  if(count == 0)
11997  {
11998  return;
11999  }
12000 
12001  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12002  if(m_MapCount >= count)
12003  {
12004  m_MapCount -= count;
12005  if(m_MapCount == 0)
12006  {
12007  m_pMappedData = VMA_NULL;
12008  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12009  }
12010  }
12011  else
12012  {
12013  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12014  }
12015 }
12016 
12017 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12018 {
12019  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12020  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12021 
12022  void* pData;
12023  VkResult res = Map(hAllocator, 1, &pData);
12024  if(res != VK_SUCCESS)
12025  {
12026  return res;
12027  }
12028 
12029  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12030  VmaWriteMagicValue(pData, allocOffset + allocSize);
12031 
12032  Unmap(hAllocator, 1);
12033 
12034  return VK_SUCCESS;
12035 }
12036 
12037 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12038 {
12039  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12040  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12041 
12042  void* pData;
12043  VkResult res = Map(hAllocator, 1, &pData);
12044  if(res != VK_SUCCESS)
12045  {
12046  return res;
12047  }
12048 
12049  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12050  {
12051  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12052  }
12053  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12054  {
12055  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12056  }
12057 
12058  Unmap(hAllocator, 1);
12059 
12060  return VK_SUCCESS;
12061 }
12062 
12063 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12064  const VmaAllocator hAllocator,
12065  const VmaAllocation hAllocation,
12066  VkDeviceSize allocationLocalOffset,
12067  VkBuffer hBuffer,
12068  const void* pNext)
12069 {
12070  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12071  hAllocation->GetBlock() == this);
12072  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12073  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12074  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12075  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12076  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12077  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12078 }
12079 
12080 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12081  const VmaAllocator hAllocator,
12082  const VmaAllocation hAllocation,
12083  VkDeviceSize allocationLocalOffset,
12084  VkImage hImage,
12085  const void* pNext)
12086 {
12087  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12088  hAllocation->GetBlock() == this);
12089  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12090  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12091  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12092  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12093  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12094  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12095 }
12096 
12097 static void InitStatInfo(VmaStatInfo& outInfo)
12098 {
12099  memset(&outInfo, 0, sizeof(outInfo));
12100  outInfo.allocationSizeMin = UINT64_MAX;
12101  outInfo.unusedRangeSizeMin = UINT64_MAX;
12102 }
12103 
12104 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12105 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12106 {
12107  inoutInfo.blockCount += srcInfo.blockCount;
12108  inoutInfo.allocationCount += srcInfo.allocationCount;
12109  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12110  inoutInfo.usedBytes += srcInfo.usedBytes;
12111  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12112  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12113  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12114  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12115  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12116 }
12117 
12118 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12119 {
12120  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12121  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12122  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12123  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12124 }
12125 
12126 VmaPool_T::VmaPool_T(
12127  VmaAllocator hAllocator,
12128  const VmaPoolCreateInfo& createInfo,
12129  VkDeviceSize preferredBlockSize) :
12130  m_BlockVector(
12131  hAllocator,
12132  this, // hParentPool
12133  createInfo.memoryTypeIndex,
12134  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12135  createInfo.minBlockCount,
12136  createInfo.maxBlockCount,
12137  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12138  createInfo.frameInUseCount,
12139  createInfo.blockSize != 0, // explicitBlockSize
12140  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12141  m_Id(0),
12142  m_Name(VMA_NULL)
12143 {
12144 }
12145 
12146 VmaPool_T::~VmaPool_T()
12147 {
12148 }
12149 
12150 void VmaPool_T::SetName(const char* pName)
12151 {
12152  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12153  VmaFreeString(allocs, m_Name);
12154 
12155  if(pName != VMA_NULL)
12156  {
12157  m_Name = VmaCreateStringCopy(allocs, pName);
12158  }
12159  else
12160  {
12161  m_Name = VMA_NULL;
12162  }
12163 }
12164 
12165 #if VMA_STATS_STRING_ENABLED
12166 
12167 #endif // #if VMA_STATS_STRING_ENABLED
12168 
12169 VmaBlockVector::VmaBlockVector(
12170  VmaAllocator hAllocator,
12171  VmaPool hParentPool,
12172  uint32_t memoryTypeIndex,
12173  VkDeviceSize preferredBlockSize,
12174  size_t minBlockCount,
12175  size_t maxBlockCount,
12176  VkDeviceSize bufferImageGranularity,
12177  uint32_t frameInUseCount,
12178  bool explicitBlockSize,
12179  uint32_t algorithm) :
12180  m_hAllocator(hAllocator),
12181  m_hParentPool(hParentPool),
12182  m_MemoryTypeIndex(memoryTypeIndex),
12183  m_PreferredBlockSize(preferredBlockSize),
12184  m_MinBlockCount(minBlockCount),
12185  m_MaxBlockCount(maxBlockCount),
12186  m_BufferImageGranularity(bufferImageGranularity),
12187  m_FrameInUseCount(frameInUseCount),
12188  m_ExplicitBlockSize(explicitBlockSize),
12189  m_Algorithm(algorithm),
12190  m_HasEmptyBlock(false),
12191  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12192  m_NextBlockId(0)
12193 {
12194 }
12195 
12196 VmaBlockVector::~VmaBlockVector()
12197 {
12198  for(size_t i = m_Blocks.size(); i--; )
12199  {
12200  m_Blocks[i]->Destroy(m_hAllocator);
12201  vma_delete(m_hAllocator, m_Blocks[i]);
12202  }
12203 }
12204 
12205 VkResult VmaBlockVector::CreateMinBlocks()
12206 {
12207  for(size_t i = 0; i < m_MinBlockCount; ++i)
12208  {
12209  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12210  if(res != VK_SUCCESS)
12211  {
12212  return res;
12213  }
12214  }
12215  return VK_SUCCESS;
12216 }
12217 
12218 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12219 {
12220  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12221 
12222  const size_t blockCount = m_Blocks.size();
12223 
12224  pStats->size = 0;
12225  pStats->unusedSize = 0;
12226  pStats->allocationCount = 0;
12227  pStats->unusedRangeCount = 0;
12228  pStats->unusedRangeSizeMax = 0;
12229  pStats->blockCount = blockCount;
12230 
12231  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12232  {
12233  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12234  VMA_ASSERT(pBlock);
12235  VMA_HEAVY_ASSERT(pBlock->Validate());
12236  pBlock->m_pMetadata->AddPoolStats(*pStats);
12237  }
12238 }
12239 
12240 bool VmaBlockVector::IsEmpty()
12241 {
12242  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12243  return m_Blocks.empty();
12244 }
12245 
12246 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12247 {
12248  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12249  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12250  (VMA_DEBUG_MARGIN > 0) &&
12251  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12252  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12253 }
12254 
12255 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12256 
12257 VkResult VmaBlockVector::Allocate(
12258  uint32_t currentFrameIndex,
12259  VkDeviceSize size,
12260  VkDeviceSize alignment,
12261  const VmaAllocationCreateInfo& createInfo,
12262  VmaSuballocationType suballocType,
12263  size_t allocationCount,
12264  VmaAllocation* pAllocations)
12265 {
12266  size_t allocIndex;
12267  VkResult res = VK_SUCCESS;
12268 
12269  if(IsCorruptionDetectionEnabled())
12270  {
12271  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12272  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12273  }
12274 
12275  {
12276  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12277  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12278  {
12279  res = AllocatePage(
12280  currentFrameIndex,
12281  size,
12282  alignment,
12283  createInfo,
12284  suballocType,
12285  pAllocations + allocIndex);
12286  if(res != VK_SUCCESS)
12287  {
12288  break;
12289  }
12290  }
12291  }
12292 
12293  if(res != VK_SUCCESS)
12294  {
12295  // Free all already created allocations.
12296  while(allocIndex--)
12297  {
12298  Free(pAllocations[allocIndex]);
12299  }
12300  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12301  }
12302 
12303  return res;
12304 }
12305 
12306 VkResult VmaBlockVector::AllocatePage(
12307  uint32_t currentFrameIndex,
12308  VkDeviceSize size,
12309  VkDeviceSize alignment,
12310  const VmaAllocationCreateInfo& createInfo,
12311  VmaSuballocationType suballocType,
12312  VmaAllocation* pAllocation)
12313 {
12314  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12315  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12316  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12317  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12318 
12319  VkDeviceSize freeMemory;
12320  {
12321  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12322  VmaBudget heapBudget = {};
12323  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12324  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12325  }
12326 
12327  const bool canFallbackToDedicated = !IsCustomPool();
12328  const bool canCreateNewBlock =
12329  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12330  (m_Blocks.size() < m_MaxBlockCount) &&
12331  (freeMemory >= size || !canFallbackToDedicated);
12332  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12333 
12334  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12335  // Which in turn is available only when maxBlockCount = 1.
12336  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12337  {
12338  canMakeOtherLost = false;
12339  }
12340 
12341  // Upper address can only be used with linear allocator and within single memory block.
12342  if(isUpperAddress &&
12343  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12344  {
12345  return VK_ERROR_FEATURE_NOT_PRESENT;
12346  }
12347 
12348  // Validate strategy.
12349  switch(strategy)
12350  {
12351  case 0:
12353  break;
12357  break;
12358  default:
12359  return VK_ERROR_FEATURE_NOT_PRESENT;
12360  }
12361 
12362  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12363  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12364  {
12365  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12366  }
12367 
12368  /*
12369  Under certain condition, this whole section can be skipped for optimization, so
12370  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12371  e.g. for custom pools with linear algorithm.
12372  */
12373  if(!canMakeOtherLost || canCreateNewBlock)
12374  {
12375  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12376  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12378 
12379  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12380  {
12381  // Use only last block.
12382  if(!m_Blocks.empty())
12383  {
12384  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12385  VMA_ASSERT(pCurrBlock);
12386  VkResult res = AllocateFromBlock(
12387  pCurrBlock,
12388  currentFrameIndex,
12389  size,
12390  alignment,
12391  allocFlagsCopy,
12392  createInfo.pUserData,
12393  suballocType,
12394  strategy,
12395  pAllocation);
12396  if(res == VK_SUCCESS)
12397  {
12398  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12399  return VK_SUCCESS;
12400  }
12401  }
12402  }
12403  else
12404  {
12406  {
12407  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12408  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12409  {
12410  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12411  VMA_ASSERT(pCurrBlock);
12412  VkResult res = AllocateFromBlock(
12413  pCurrBlock,
12414  currentFrameIndex,
12415  size,
12416  alignment,
12417  allocFlagsCopy,
12418  createInfo.pUserData,
12419  suballocType,
12420  strategy,
12421  pAllocation);
12422  if(res == VK_SUCCESS)
12423  {
12424  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12425  return VK_SUCCESS;
12426  }
12427  }
12428  }
12429  else // WORST_FIT, FIRST_FIT
12430  {
12431  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12432  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12433  {
12434  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12435  VMA_ASSERT(pCurrBlock);
12436  VkResult res = AllocateFromBlock(
12437  pCurrBlock,
12438  currentFrameIndex,
12439  size,
12440  alignment,
12441  allocFlagsCopy,
12442  createInfo.pUserData,
12443  suballocType,
12444  strategy,
12445  pAllocation);
12446  if(res == VK_SUCCESS)
12447  {
12448  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12449  return VK_SUCCESS;
12450  }
12451  }
12452  }
12453  }
12454 
12455  // 2. Try to create new block.
12456  if(canCreateNewBlock)
12457  {
12458  // Calculate optimal size for new block.
12459  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12460  uint32_t newBlockSizeShift = 0;
12461  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12462 
12463  if(!m_ExplicitBlockSize)
12464  {
12465  // Allocate 1/8, 1/4, 1/2 as first blocks.
12466  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12467  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12468  {
12469  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12470  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12471  {
12472  newBlockSize = smallerNewBlockSize;
12473  ++newBlockSizeShift;
12474  }
12475  else
12476  {
12477  break;
12478  }
12479  }
12480  }
12481 
12482  size_t newBlockIndex = 0;
12483  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12484  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12485  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12486  if(!m_ExplicitBlockSize)
12487  {
12488  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12489  {
12490  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12491  if(smallerNewBlockSize >= size)
12492  {
12493  newBlockSize = smallerNewBlockSize;
12494  ++newBlockSizeShift;
12495  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12496  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12497  }
12498  else
12499  {
12500  break;
12501  }
12502  }
12503  }
12504 
12505  if(res == VK_SUCCESS)
12506  {
12507  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12508  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12509 
12510  res = AllocateFromBlock(
12511  pBlock,
12512  currentFrameIndex,
12513  size,
12514  alignment,
12515  allocFlagsCopy,
12516  createInfo.pUserData,
12517  suballocType,
12518  strategy,
12519  pAllocation);
12520  if(res == VK_SUCCESS)
12521  {
12522  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12523  return VK_SUCCESS;
12524  }
12525  else
12526  {
12527  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12529  }
12530  }
12531  }
12532  }
12533 
12534  // 3. Try to allocate from existing blocks with making other allocations lost.
12535  if(canMakeOtherLost)
12536  {
12537  uint32_t tryIndex = 0;
12538  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12539  {
12540  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12541  VmaAllocationRequest bestRequest = {};
12542  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12543 
12544  // 1. Search existing allocations.
12546  {
12547  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12548  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12549  {
12550  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12551  VMA_ASSERT(pCurrBlock);
12552  VmaAllocationRequest currRequest = {};
12553  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12554  currentFrameIndex,
12555  m_FrameInUseCount,
12556  m_BufferImageGranularity,
12557  size,
12558  alignment,
12559  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12560  suballocType,
12561  canMakeOtherLost,
12562  strategy,
12563  &currRequest))
12564  {
12565  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12566  if(pBestRequestBlock == VMA_NULL ||
12567  currRequestCost < bestRequestCost)
12568  {
12569  pBestRequestBlock = pCurrBlock;
12570  bestRequest = currRequest;
12571  bestRequestCost = currRequestCost;
12572 
12573  if(bestRequestCost == 0)
12574  {
12575  break;
12576  }
12577  }
12578  }
12579  }
12580  }
12581  else // WORST_FIT, FIRST_FIT
12582  {
12583  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12584  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12585  {
12586  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12587  VMA_ASSERT(pCurrBlock);
12588  VmaAllocationRequest currRequest = {};
12589  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12590  currentFrameIndex,
12591  m_FrameInUseCount,
12592  m_BufferImageGranularity,
12593  size,
12594  alignment,
12595  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12596  suballocType,
12597  canMakeOtherLost,
12598  strategy,
12599  &currRequest))
12600  {
12601  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12602  if(pBestRequestBlock == VMA_NULL ||
12603  currRequestCost < bestRequestCost ||
12605  {
12606  pBestRequestBlock = pCurrBlock;
12607  bestRequest = currRequest;
12608  bestRequestCost = currRequestCost;
12609 
12610  if(bestRequestCost == 0 ||
12612  {
12613  break;
12614  }
12615  }
12616  }
12617  }
12618  }
12619 
12620  if(pBestRequestBlock != VMA_NULL)
12621  {
12622  if(mapped)
12623  {
12624  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12625  if(res != VK_SUCCESS)
12626  {
12627  return res;
12628  }
12629  }
12630 
12631  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12632  currentFrameIndex,
12633  m_FrameInUseCount,
12634  &bestRequest))
12635  {
12636  // Allocate from this pBlock.
12637  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12638  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12639  UpdateHasEmptyBlock();
12640  (*pAllocation)->InitBlockAllocation(
12641  pBestRequestBlock,
12642  bestRequest.offset,
12643  alignment,
12644  size,
12645  m_MemoryTypeIndex,
12646  suballocType,
12647  mapped,
12648  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12649  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12650  VMA_DEBUG_LOG(" Returned from existing block");
12651  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12652  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12653  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12654  {
12655  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12656  }
12657  if(IsCorruptionDetectionEnabled())
12658  {
12659  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12660  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12661  }
12662  return VK_SUCCESS;
12663  }
12664  // else: Some allocations must have been touched while we are here. Next try.
12665  }
12666  else
12667  {
12668  // Could not find place in any of the blocks - break outer loop.
12669  break;
12670  }
12671  }
12672  /* Maximum number of tries exceeded - a very unlike event when many other
12673  threads are simultaneously touching allocations making it impossible to make
12674  lost at the same time as we try to allocate. */
12675  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12676  {
12677  return VK_ERROR_TOO_MANY_OBJECTS;
12678  }
12679  }
12680 
12681  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12682 }
12683 
12684 void VmaBlockVector::Free(
12685  const VmaAllocation hAllocation)
12686 {
12687  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12688 
12689  bool budgetExceeded = false;
12690  {
12691  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12692  VmaBudget heapBudget = {};
12693  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12694  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12695  }
12696 
12697  // Scope for lock.
12698  {
12699  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12700 
12701  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12702 
12703  if(IsCorruptionDetectionEnabled())
12704  {
12705  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12706  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12707  }
12708 
12709  if(hAllocation->IsPersistentMap())
12710  {
12711  pBlock->Unmap(m_hAllocator, 1);
12712  }
12713 
12714  pBlock->m_pMetadata->Free(hAllocation);
12715  VMA_HEAVY_ASSERT(pBlock->Validate());
12716 
12717  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12718 
12719  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12720  // pBlock became empty after this deallocation.
12721  if(pBlock->m_pMetadata->IsEmpty())
12722  {
12723  // Already has empty block. We don't want to have two, so delete this one.
12724  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12725  {
12726  pBlockToDelete = pBlock;
12727  Remove(pBlock);
12728  }
12729  // else: We now have an empty block - leave it.
12730  }
12731  // pBlock didn't become empty, but we have another empty block - find and free that one.
12732  // (This is optional, heuristics.)
12733  else if(m_HasEmptyBlock && canDeleteBlock)
12734  {
12735  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12736  if(pLastBlock->m_pMetadata->IsEmpty())
12737  {
12738  pBlockToDelete = pLastBlock;
12739  m_Blocks.pop_back();
12740  }
12741  }
12742 
12743  UpdateHasEmptyBlock();
12744  IncrementallySortBlocks();
12745  }
12746 
12747  // Destruction of a free block. Deferred until this point, outside of mutex
12748  // lock, for performance reason.
12749  if(pBlockToDelete != VMA_NULL)
12750  {
12751  VMA_DEBUG_LOG(" Deleted empty block");
12752  pBlockToDelete->Destroy(m_hAllocator);
12753  vma_delete(m_hAllocator, pBlockToDelete);
12754  }
12755 }
12756 
12757 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12758 {
12759  VkDeviceSize result = 0;
12760  for(size_t i = m_Blocks.size(); i--; )
12761  {
12762  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12763  if(result >= m_PreferredBlockSize)
12764  {
12765  break;
12766  }
12767  }
12768  return result;
12769 }
12770 
12771 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12772 {
12773  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12774  {
12775  if(m_Blocks[blockIndex] == pBlock)
12776  {
12777  VmaVectorRemove(m_Blocks, blockIndex);
12778  return;
12779  }
12780  }
12781  VMA_ASSERT(0);
12782 }
12783 
12784 void VmaBlockVector::IncrementallySortBlocks()
12785 {
12786  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12787  {
12788  // Bubble sort only until first swap.
12789  for(size_t i = 1; i < m_Blocks.size(); ++i)
12790  {
12791  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12792  {
12793  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12794  return;
12795  }
12796  }
12797  }
12798 }
12799 
12800 VkResult VmaBlockVector::AllocateFromBlock(
12801  VmaDeviceMemoryBlock* pBlock,
12802  uint32_t currentFrameIndex,
12803  VkDeviceSize size,
12804  VkDeviceSize alignment,
12805  VmaAllocationCreateFlags allocFlags,
12806  void* pUserData,
12807  VmaSuballocationType suballocType,
12808  uint32_t strategy,
12809  VmaAllocation* pAllocation)
12810 {
12811  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12812  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12813  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12814  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12815 
12816  VmaAllocationRequest currRequest = {};
12817  if(pBlock->m_pMetadata->CreateAllocationRequest(
12818  currentFrameIndex,
12819  m_FrameInUseCount,
12820  m_BufferImageGranularity,
12821  size,
12822  alignment,
12823  isUpperAddress,
12824  suballocType,
12825  false, // canMakeOtherLost
12826  strategy,
12827  &currRequest))
12828  {
12829  // Allocate from pCurrBlock.
12830  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12831 
12832  if(mapped)
12833  {
12834  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12835  if(res != VK_SUCCESS)
12836  {
12837  return res;
12838  }
12839  }
12840 
12841  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12842  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12843  UpdateHasEmptyBlock();
12844  (*pAllocation)->InitBlockAllocation(
12845  pBlock,
12846  currRequest.offset,
12847  alignment,
12848  size,
12849  m_MemoryTypeIndex,
12850  suballocType,
12851  mapped,
12852  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12853  VMA_HEAVY_ASSERT(pBlock->Validate());
12854  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12855  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12856  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12857  {
12858  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12859  }
12860  if(IsCorruptionDetectionEnabled())
12861  {
12862  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12863  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12864  }
12865  return VK_SUCCESS;
12866  }
12867  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12868 }
12869 
12870 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12871 {
12872  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12873  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12874  allocInfo.allocationSize = blockSize;
12875 
12876 #if VMA_BUFFER_DEVICE_ADDRESS
12877  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12878  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12879  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
12880  {
12881  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12882  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12883  }
12884 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
12885 
12886  VkDeviceMemory mem = VK_NULL_HANDLE;
12887  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12888  if(res < 0)
12889  {
12890  return res;
12891  }
12892 
12893  // New VkDeviceMemory successfully created.
12894 
12895  // Create new Allocation for it.
12896  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12897  pBlock->Init(
12898  m_hAllocator,
12899  m_hParentPool,
12900  m_MemoryTypeIndex,
12901  mem,
12902  allocInfo.allocationSize,
12903  m_NextBlockId++,
12904  m_Algorithm);
12905 
12906  m_Blocks.push_back(pBlock);
12907  if(pNewBlockIndex != VMA_NULL)
12908  {
12909  *pNewBlockIndex = m_Blocks.size() - 1;
12910  }
12911 
12912  return VK_SUCCESS;
12913 }
12914 
12915 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12916  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12917  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12918 {
12919  const size_t blockCount = m_Blocks.size();
12920  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12921 
12922  enum BLOCK_FLAG
12923  {
12924  BLOCK_FLAG_USED = 0x00000001,
12925  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12926  };
12927 
12928  struct BlockInfo
12929  {
12930  uint32_t flags;
12931  void* pMappedData;
12932  };
12933  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12934  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12935  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12936 
12937  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12938  const size_t moveCount = moves.size();
12939  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12940  {
12941  const VmaDefragmentationMove& move = moves[moveIndex];
12942  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12943  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12944  }
12945 
12946  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12947 
12948  // Go over all blocks. Get mapped pointer or map if necessary.
12949  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12950  {
12951  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12952  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12953  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12954  {
12955  currBlockInfo.pMappedData = pBlock->GetMappedData();
12956  // It is not originally mapped - map it.
12957  if(currBlockInfo.pMappedData == VMA_NULL)
12958  {
12959  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12960  if(pDefragCtx->res == VK_SUCCESS)
12961  {
12962  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12963  }
12964  }
12965  }
12966  }
12967 
12968  // Go over all moves. Do actual data transfer.
12969  if(pDefragCtx->res == VK_SUCCESS)
12970  {
12971  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12972  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12973 
12974  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12975  {
12976  const VmaDefragmentationMove& move = moves[moveIndex];
12977 
12978  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12979  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12980 
12981  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12982 
12983  // Invalidate source.
12984  if(isNonCoherent)
12985  {
12986  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12987  memRange.memory = pSrcBlock->GetDeviceMemory();
12988  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12989  memRange.size = VMA_MIN(
12990  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12991  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12992  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12993  }
12994 
12995  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12996  memmove(
12997  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12998  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12999  static_cast<size_t>(move.size));
13000 
13001  if(IsCorruptionDetectionEnabled())
13002  {
13003  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13004  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13005  }
13006 
13007  // Flush destination.
13008  if(isNonCoherent)
13009  {
13010  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13011  memRange.memory = pDstBlock->GetDeviceMemory();
13012  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13013  memRange.size = VMA_MIN(
13014  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13015  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13016  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13017  }
13018  }
13019  }
13020 
13021  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13022  // Regardless of pCtx->res == VK_SUCCESS.
13023  for(size_t blockIndex = blockCount; blockIndex--; )
13024  {
13025  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13026  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13027  {
13028  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13029  pBlock->Unmap(m_hAllocator, 1);
13030  }
13031  }
13032 }
13033 
13034 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13035  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13036  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13037  VkCommandBuffer commandBuffer)
13038 {
13039  const size_t blockCount = m_Blocks.size();
13040 
13041  pDefragCtx->blockContexts.resize(blockCount);
13042  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13043 
13044  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13045  const size_t moveCount = moves.size();
13046  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13047  {
13048  const VmaDefragmentationMove& move = moves[moveIndex];
13049 
13050  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13051  {
13052  // Old school move still require us to map the whole block
13053  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13054  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13055  }
13056  }
13057 
13058  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13059 
13060  // Go over all blocks. Create and bind buffer for whole block if necessary.
13061  {
13062  VkBufferCreateInfo bufCreateInfo;
13063  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13064 
13065  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13066  {
13067  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13068  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13069  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13070  {
13071  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13072  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13073  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13074  if(pDefragCtx->res == VK_SUCCESS)
13075  {
13076  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13077  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13078  }
13079  }
13080  }
13081  }
13082 
13083  // Go over all moves. Post data transfer commands to command buffer.
13084  if(pDefragCtx->res == VK_SUCCESS)
13085  {
13086  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13087  {
13088  const VmaDefragmentationMove& move = moves[moveIndex];
13089 
13090  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13091  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13092 
13093  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13094 
13095  VkBufferCopy region = {
13096  move.srcOffset,
13097  move.dstOffset,
13098  move.size };
13099  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13100  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13101  }
13102  }
13103 
13104  // Save buffers to defrag context for later destruction.
13105  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13106  {
13107  pDefragCtx->res = VK_NOT_READY;
13108  }
13109 }
13110 
13111 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13112 {
13113  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13114  {
13115  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13116  if(pBlock->m_pMetadata->IsEmpty())
13117  {
13118  if(m_Blocks.size() > m_MinBlockCount)
13119  {
13120  if(pDefragmentationStats != VMA_NULL)
13121  {
13122  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13123  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13124  }
13125 
13126  VmaVectorRemove(m_Blocks, blockIndex);
13127  pBlock->Destroy(m_hAllocator);
13128  vma_delete(m_hAllocator, pBlock);
13129  }
13130  else
13131  {
13132  break;
13133  }
13134  }
13135  }
13136  UpdateHasEmptyBlock();
13137 }
13138 
13139 void VmaBlockVector::UpdateHasEmptyBlock()
13140 {
13141  m_HasEmptyBlock = false;
13142  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13143  {
13144  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13145  if(pBlock->m_pMetadata->IsEmpty())
13146  {
13147  m_HasEmptyBlock = true;
13148  break;
13149  }
13150  }
13151 }
13152 
13153 #if VMA_STATS_STRING_ENABLED
13154 
13155 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13156 {
13157  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13158 
13159  json.BeginObject();
13160 
13161  if(IsCustomPool())
13162  {
13163  const char* poolName = m_hParentPool->GetName();
13164  if(poolName != VMA_NULL && poolName[0] != '\0')
13165  {
13166  json.WriteString("Name");
13167  json.WriteString(poolName);
13168  }
13169 
13170  json.WriteString("MemoryTypeIndex");
13171  json.WriteNumber(m_MemoryTypeIndex);
13172 
13173  json.WriteString("BlockSize");
13174  json.WriteNumber(m_PreferredBlockSize);
13175 
13176  json.WriteString("BlockCount");
13177  json.BeginObject(true);
13178  if(m_MinBlockCount > 0)
13179  {
13180  json.WriteString("Min");
13181  json.WriteNumber((uint64_t)m_MinBlockCount);
13182  }
13183  if(m_MaxBlockCount < SIZE_MAX)
13184  {
13185  json.WriteString("Max");
13186  json.WriteNumber((uint64_t)m_MaxBlockCount);
13187  }
13188  json.WriteString("Cur");
13189  json.WriteNumber((uint64_t)m_Blocks.size());
13190  json.EndObject();
13191 
13192  if(m_FrameInUseCount > 0)
13193  {
13194  json.WriteString("FrameInUseCount");
13195  json.WriteNumber(m_FrameInUseCount);
13196  }
13197 
13198  if(m_Algorithm != 0)
13199  {
13200  json.WriteString("Algorithm");
13201  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13202  }
13203  }
13204  else
13205  {
13206  json.WriteString("PreferredBlockSize");
13207  json.WriteNumber(m_PreferredBlockSize);
13208  }
13209 
13210  json.WriteString("Blocks");
13211  json.BeginObject();
13212  for(size_t i = 0; i < m_Blocks.size(); ++i)
13213  {
13214  json.BeginString();
13215  json.ContinueString(m_Blocks[i]->GetId());
13216  json.EndString();
13217 
13218  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13219  }
13220  json.EndObject();
13221 
13222  json.EndObject();
13223 }
13224 
13225 #endif // #if VMA_STATS_STRING_ENABLED
13226 
13227 void VmaBlockVector::Defragment(
13228  class VmaBlockVectorDefragmentationContext* pCtx,
13230  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13231  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13232  VkCommandBuffer commandBuffer)
13233 {
13234  pCtx->res = VK_SUCCESS;
13235 
13236  const VkMemoryPropertyFlags memPropFlags =
13237  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13238  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13239 
13240  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13241  isHostVisible;
13242  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13243  !IsCorruptionDetectionEnabled() &&
13244  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13245 
13246  // There are options to defragment this memory type.
13247  if(canDefragmentOnCpu || canDefragmentOnGpu)
13248  {
13249  bool defragmentOnGpu;
13250  // There is only one option to defragment this memory type.
13251  if(canDefragmentOnGpu != canDefragmentOnCpu)
13252  {
13253  defragmentOnGpu = canDefragmentOnGpu;
13254  }
13255  // Both options are available: Heuristics to choose the best one.
13256  else
13257  {
13258  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13259  m_hAllocator->IsIntegratedGpu();
13260  }
13261 
13262  bool overlappingMoveSupported = !defragmentOnGpu;
13263 
13264  if(m_hAllocator->m_UseMutex)
13265  {
13267  {
13268  if(!m_Mutex.TryLockWrite())
13269  {
13270  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13271  return;
13272  }
13273  }
13274  else
13275  {
13276  m_Mutex.LockWrite();
13277  pCtx->mutexLocked = true;
13278  }
13279  }
13280 
13281  pCtx->Begin(overlappingMoveSupported, flags);
13282 
13283  // Defragment.
13284 
13285  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13286  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13287  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13288 
13289  // Accumulate statistics.
13290  if(pStats != VMA_NULL)
13291  {
13292  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13293  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13294  pStats->bytesMoved += bytesMoved;
13295  pStats->allocationsMoved += allocationsMoved;
13296  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13297  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13298  if(defragmentOnGpu)
13299  {
13300  maxGpuBytesToMove -= bytesMoved;
13301  maxGpuAllocationsToMove -= allocationsMoved;
13302  }
13303  else
13304  {
13305  maxCpuBytesToMove -= bytesMoved;
13306  maxCpuAllocationsToMove -= allocationsMoved;
13307  }
13308  }
13309 
13311  {
13312  if(m_hAllocator->m_UseMutex)
13313  m_Mutex.UnlockWrite();
13314 
13315  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13316  pCtx->res = VK_NOT_READY;
13317 
13318  return;
13319  }
13320 
13321  if(pCtx->res >= VK_SUCCESS)
13322  {
13323  if(defragmentOnGpu)
13324  {
13325  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13326  }
13327  else
13328  {
13329  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13330  }
13331  }
13332  }
13333 }
13334 
13335 void VmaBlockVector::DefragmentationEnd(
13336  class VmaBlockVectorDefragmentationContext* pCtx,
13337  uint32_t flags,
13338  VmaDefragmentationStats* pStats)
13339 {
13340  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13341  {
13342  VMA_ASSERT(pCtx->mutexLocked == false);
13343 
13344  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13345  // lock protecting us. Since we mutate state here, we have to take the lock out now
13346  m_Mutex.LockWrite();
13347  pCtx->mutexLocked = true;
13348  }
13349 
13350  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13351  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13352  {
13353  // Destroy buffers.
13354  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13355  {
13356  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13357  if(blockCtx.hBuffer)
13358  {
13359  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13360  }
13361  }
13362 
13363  if(pCtx->res >= VK_SUCCESS)
13364  {
13365  FreeEmptyBlocks(pStats);
13366  }
13367  }
13368 
13369  if(pCtx->mutexLocked)
13370  {
13371  VMA_ASSERT(m_hAllocator->m_UseMutex);
13372  m_Mutex.UnlockWrite();
13373  }
13374 }
13375 
13376 uint32_t VmaBlockVector::ProcessDefragmentations(
13377  class VmaBlockVectorDefragmentationContext *pCtx,
13378  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13379 {
13380  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13381 
13382  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13383 
13384  for(uint32_t i = 0; i < moveCount; ++ i)
13385  {
13386  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13387 
13388  pMove->allocation = move.hAllocation;
13389  pMove->memory = move.pDstBlock->GetDeviceMemory();
13390  pMove->offset = move.dstOffset;
13391 
13392  ++ pMove;
13393  }
13394 
13395  pCtx->defragmentationMovesProcessed += moveCount;
13396 
13397  return moveCount;
13398 }
13399 
13400 void VmaBlockVector::CommitDefragmentations(
13401  class VmaBlockVectorDefragmentationContext *pCtx,
13402  VmaDefragmentationStats* pStats)
13403 {
13404  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13405 
13406  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13407  {
13408  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13409 
13410  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13411  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13412  }
13413 
13414  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13415  FreeEmptyBlocks(pStats);
13416 }
13417 
13418 size_t VmaBlockVector::CalcAllocationCount() const
13419 {
13420  size_t result = 0;
13421  for(size_t i = 0; i < m_Blocks.size(); ++i)
13422  {
13423  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13424  }
13425  return result;
13426 }
13427 
13428 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13429 {
13430  if(m_BufferImageGranularity == 1)
13431  {
13432  return false;
13433  }
13434  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13435  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13436  {
13437  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13438  VMA_ASSERT(m_Algorithm == 0);
13439  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13440  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13441  {
13442  return true;
13443  }
13444  }
13445  return false;
13446 }
13447 
13448 void VmaBlockVector::MakePoolAllocationsLost(
13449  uint32_t currentFrameIndex,
13450  size_t* pLostAllocationCount)
13451 {
13452  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13453  size_t lostAllocationCount = 0;
13454  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13455  {
13456  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13457  VMA_ASSERT(pBlock);
13458  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13459  }
13460  if(pLostAllocationCount != VMA_NULL)
13461  {
13462  *pLostAllocationCount = lostAllocationCount;
13463  }
13464 }
13465 
13466 VkResult VmaBlockVector::CheckCorruption()
13467 {
13468  if(!IsCorruptionDetectionEnabled())
13469  {
13470  return VK_ERROR_FEATURE_NOT_PRESENT;
13471  }
13472 
13473  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13474  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13475  {
13476  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13477  VMA_ASSERT(pBlock);
13478  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13479  if(res != VK_SUCCESS)
13480  {
13481  return res;
13482  }
13483  }
13484  return VK_SUCCESS;
13485 }
13486 
13487 void VmaBlockVector::AddStats(VmaStats* pStats)
13488 {
13489  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13490  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13491 
13492  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13493 
13494  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13495  {
13496  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13497  VMA_ASSERT(pBlock);
13498  VMA_HEAVY_ASSERT(pBlock->Validate());
13499  VmaStatInfo allocationStatInfo;
13500  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13501  VmaAddStatInfo(pStats->total, allocationStatInfo);
13502  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13503  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13504  }
13505 }
13506 
13508 // VmaDefragmentationAlgorithm_Generic members definition
13509 
13510 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13511  VmaAllocator hAllocator,
13512  VmaBlockVector* pBlockVector,
13513  uint32_t currentFrameIndex,
13514  bool overlappingMoveSupported) :
13515  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13516  m_AllocationCount(0),
13517  m_AllAllocations(false),
13518  m_BytesMoved(0),
13519  m_AllocationsMoved(0),
13520  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13521 {
13522  // Create block info for each block.
13523  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13524  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13525  {
13526  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13527  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13528  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13529  m_Blocks.push_back(pBlockInfo);
13530  }
13531 
13532  // Sort them by m_pBlock pointer value.
13533  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13534 }
13535 
13536 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13537 {
13538  for(size_t i = m_Blocks.size(); i--; )
13539  {
13540  vma_delete(m_hAllocator, m_Blocks[i]);
13541  }
13542 }
13543 
13544 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13545 {
13546  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13547  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13548  {
13549  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13550  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13551  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13552  {
13553  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13554  (*it)->m_Allocations.push_back(allocInfo);
13555  }
13556  else
13557  {
13558  VMA_ASSERT(0);
13559  }
13560 
13561  ++m_AllocationCount;
13562  }
13563 }
13564 
13565 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13566  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13567  VkDeviceSize maxBytesToMove,
13568  uint32_t maxAllocationsToMove,
13569  bool freeOldAllocations)
13570 {
13571  if(m_Blocks.empty())
13572  {
13573  return VK_SUCCESS;
13574  }
13575 
13576  // This is a choice based on research.
13577  // Option 1:
13578  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13579  // Option 2:
13580  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13581  // Option 3:
13582  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13583 
13584  size_t srcBlockMinIndex = 0;
13585  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13586  /*
13587  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13588  {
13589  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13590  if(blocksWithNonMovableCount > 0)
13591  {
13592  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13593  }
13594  }
13595  */
13596 
13597  size_t srcBlockIndex = m_Blocks.size() - 1;
13598  size_t srcAllocIndex = SIZE_MAX;
13599  for(;;)
13600  {
13601  // 1. Find next allocation to move.
13602  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13603  // 1.2. Then start from last to first m_Allocations.
13604  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13605  {
13606  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13607  {
13608  // Finished: no more allocations to process.
13609  if(srcBlockIndex == srcBlockMinIndex)
13610  {
13611  return VK_SUCCESS;
13612  }
13613  else
13614  {
13615  --srcBlockIndex;
13616  srcAllocIndex = SIZE_MAX;
13617  }
13618  }
13619  else
13620  {
13621  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13622  }
13623  }
13624 
13625  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13626  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13627 
13628  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13629  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13630  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13631  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13632 
13633  // 2. Try to find new place for this allocation in preceding or current block.
13634  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13635  {
13636  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13637  VmaAllocationRequest dstAllocRequest;
13638  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13639  m_CurrentFrameIndex,
13640  m_pBlockVector->GetFrameInUseCount(),
13641  m_pBlockVector->GetBufferImageGranularity(),
13642  size,
13643  alignment,
13644  false, // upperAddress
13645  suballocType,
13646  false, // canMakeOtherLost
13647  strategy,
13648  &dstAllocRequest) &&
13649  MoveMakesSense(
13650  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13651  {
13652  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13653 
13654  // Reached limit on number of allocations or bytes to move.
13655  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13656  (m_BytesMoved + size > maxBytesToMove))
13657  {
13658  return VK_SUCCESS;
13659  }
13660 
13661  VmaDefragmentationMove move = {};
13662  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13663  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13664  move.srcOffset = srcOffset;
13665  move.dstOffset = dstAllocRequest.offset;
13666  move.size = size;
13667  move.hAllocation = allocInfo.m_hAllocation;
13668  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13669  move.pDstBlock = pDstBlockInfo->m_pBlock;
13670 
13671  moves.push_back(move);
13672 
13673  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13674  dstAllocRequest,
13675  suballocType,
13676  size,
13677  allocInfo.m_hAllocation);
13678 
13679  if(freeOldAllocations)
13680  {
13681  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13682  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13683  }
13684 
13685  if(allocInfo.m_pChanged != VMA_NULL)
13686  {
13687  *allocInfo.m_pChanged = VK_TRUE;
13688  }
13689 
13690  ++m_AllocationsMoved;
13691  m_BytesMoved += size;
13692 
13693  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13694 
13695  break;
13696  }
13697  }
13698 
13699  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13700 
13701  if(srcAllocIndex > 0)
13702  {
13703  --srcAllocIndex;
13704  }
13705  else
13706  {
13707  if(srcBlockIndex > 0)
13708  {
13709  --srcBlockIndex;
13710  srcAllocIndex = SIZE_MAX;
13711  }
13712  else
13713  {
13714  return VK_SUCCESS;
13715  }
13716  }
13717  }
13718 }
13719 
13720 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13721 {
13722  size_t result = 0;
13723  for(size_t i = 0; i < m_Blocks.size(); ++i)
13724  {
13725  if(m_Blocks[i]->m_HasNonMovableAllocations)
13726  {
13727  ++result;
13728  }
13729  }
13730  return result;
13731 }
13732 
13733 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13734  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13735  VkDeviceSize maxBytesToMove,
13736  uint32_t maxAllocationsToMove,
13738 {
13739  if(!m_AllAllocations && m_AllocationCount == 0)
13740  {
13741  return VK_SUCCESS;
13742  }
13743 
13744  const size_t blockCount = m_Blocks.size();
13745  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13746  {
13747  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13748 
13749  if(m_AllAllocations)
13750  {
13751  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13752  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13753  it != pMetadata->m_Suballocations.end();
13754  ++it)
13755  {
13756  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13757  {
13758  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13759  pBlockInfo->m_Allocations.push_back(allocInfo);
13760  }
13761  }
13762  }
13763 
13764  pBlockInfo->CalcHasNonMovableAllocations();
13765 
13766  // This is a choice based on research.
13767  // Option 1:
13768  pBlockInfo->SortAllocationsByOffsetDescending();
13769  // Option 2:
13770  //pBlockInfo->SortAllocationsBySizeDescending();
13771  }
13772 
13773  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13774  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13775 
13776  // This is a choice based on research.
13777  const uint32_t roundCount = 2;
13778 
13779  // Execute defragmentation rounds (the main part).
13780  VkResult result = VK_SUCCESS;
13781  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13782  {
13783  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
13784  }
13785 
13786  return result;
13787 }
13788 
13789 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13790  size_t dstBlockIndex, VkDeviceSize dstOffset,
13791  size_t srcBlockIndex, VkDeviceSize srcOffset)
13792 {
13793  if(dstBlockIndex < srcBlockIndex)
13794  {
13795  return true;
13796  }
13797  if(dstBlockIndex > srcBlockIndex)
13798  {
13799  return false;
13800  }
13801  if(dstOffset < srcOffset)
13802  {
13803  return true;
13804  }
13805  return false;
13806 }
13807 
13809 // VmaDefragmentationAlgorithm_Fast
13810 
13811 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13812  VmaAllocator hAllocator,
13813  VmaBlockVector* pBlockVector,
13814  uint32_t currentFrameIndex,
13815  bool overlappingMoveSupported) :
13816  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13817  m_OverlappingMoveSupported(overlappingMoveSupported),
13818  m_AllocationCount(0),
13819  m_AllAllocations(false),
13820  m_BytesMoved(0),
13821  m_AllocationsMoved(0),
13822  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13823 {
13824  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13825 
13826 }
13827 
13828 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13829 {
13830 }
13831 
13832 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13833  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13834  VkDeviceSize maxBytesToMove,
13835  uint32_t maxAllocationsToMove,
13837 {
13838  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13839 
13840  const size_t blockCount = m_pBlockVector->GetBlockCount();
13841  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13842  {
13843  return VK_SUCCESS;
13844  }
13845 
13846  PreprocessMetadata();
13847 
13848  // Sort blocks in order from most destination.
13849 
13850  m_BlockInfos.resize(blockCount);
13851  for(size_t i = 0; i < blockCount; ++i)
13852  {
13853  m_BlockInfos[i].origBlockIndex = i;
13854  }
13855 
13856  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13857  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13858  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13859  });
13860 
13861  // THE MAIN ALGORITHM
13862 
13863  FreeSpaceDatabase freeSpaceDb;
13864 
13865  size_t dstBlockInfoIndex = 0;
13866  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13867  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13868  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13869  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13870  VkDeviceSize dstOffset = 0;
13871 
13872  bool end = false;
13873  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13874  {
13875  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13876  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13877  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13878  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13879  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13880  {
13881  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13882  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13883  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13884  if(m_AllocationsMoved == maxAllocationsToMove ||
13885  m_BytesMoved + srcAllocSize > maxBytesToMove)
13886  {
13887  end = true;
13888  break;
13889  }
13890  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13891 
13892  VmaDefragmentationMove move = {};
13893  // Try to place it in one of free spaces from the database.
13894  size_t freeSpaceInfoIndex;
13895  VkDeviceSize dstAllocOffset;
13896  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13897  freeSpaceInfoIndex, dstAllocOffset))
13898  {
13899  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13900  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13901  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13902 
13903  // Same block
13904  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13905  {
13906  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13907 
13908  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13909 
13910  VmaSuballocation suballoc = *srcSuballocIt;
13911  suballoc.offset = dstAllocOffset;
13912  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13913  m_BytesMoved += srcAllocSize;
13914  ++m_AllocationsMoved;
13915 
13916  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13917  ++nextSuballocIt;
13918  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13919  srcSuballocIt = nextSuballocIt;
13920 
13921  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13922 
13923  move.srcBlockIndex = srcOrigBlockIndex;
13924  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13925  move.srcOffset = srcAllocOffset;
13926  move.dstOffset = dstAllocOffset;
13927  move.size = srcAllocSize;
13928 
13929  moves.push_back(move);
13930  }
13931  // Different block
13932  else
13933  {
13934  // MOVE OPTION 2: Move the allocation to a different block.
13935 
13936  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13937 
13938  VmaSuballocation suballoc = *srcSuballocIt;
13939  suballoc.offset = dstAllocOffset;
13940  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13941  m_BytesMoved += srcAllocSize;
13942  ++m_AllocationsMoved;
13943 
13944  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13945  ++nextSuballocIt;
13946  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13947  srcSuballocIt = nextSuballocIt;
13948 
13949  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13950 
13951  move.srcBlockIndex = srcOrigBlockIndex;
13952  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13953  move.srcOffset = srcAllocOffset;
13954  move.dstOffset = dstAllocOffset;
13955  move.size = srcAllocSize;
13956 
13957  moves.push_back(move);
13958  }
13959  }
13960  else
13961  {
13962  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13963 
13964  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13965  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13966  dstAllocOffset + srcAllocSize > dstBlockSize)
13967  {
13968  // But before that, register remaining free space at the end of dst block.
13969  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13970 
13971  ++dstBlockInfoIndex;
13972  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13973  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13974  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13975  dstBlockSize = pDstMetadata->GetSize();
13976  dstOffset = 0;
13977  dstAllocOffset = 0;
13978  }
13979 
13980  // Same block
13981  if(dstBlockInfoIndex == srcBlockInfoIndex)
13982  {
13983  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13984 
13985  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13986 
13987  bool skipOver = overlap;
13988  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13989  {
13990  // If destination and source place overlap, skip if it would move it
13991  // by only < 1/64 of its size.
13992  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13993  }
13994 
13995  if(skipOver)
13996  {
13997  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13998 
13999  dstOffset = srcAllocOffset + srcAllocSize;
14000  ++srcSuballocIt;
14001  }
14002  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14003  else
14004  {
14005  srcSuballocIt->offset = dstAllocOffset;
14006  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14007  dstOffset = dstAllocOffset + srcAllocSize;
14008  m_BytesMoved += srcAllocSize;
14009  ++m_AllocationsMoved;
14010  ++srcSuballocIt;
14011 
14012  move.srcBlockIndex = srcOrigBlockIndex;
14013  move.dstBlockIndex = dstOrigBlockIndex;
14014  move.srcOffset = srcAllocOffset;
14015  move.dstOffset = dstAllocOffset;
14016  move.size = srcAllocSize;
14017 
14018  moves.push_back(move);
14019  }
14020  }
14021  // Different block
14022  else
14023  {
14024  // MOVE OPTION 2: Move the allocation to a different block.
14025 
14026  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14027  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14028 
14029  VmaSuballocation suballoc = *srcSuballocIt;
14030  suballoc.offset = dstAllocOffset;
14031  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14032  dstOffset = dstAllocOffset + srcAllocSize;
14033  m_BytesMoved += srcAllocSize;
14034  ++m_AllocationsMoved;
14035 
14036  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14037  ++nextSuballocIt;
14038  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14039  srcSuballocIt = nextSuballocIt;
14040 
14041  pDstMetadata->m_Suballocations.push_back(suballoc);
14042 
14043  move.srcBlockIndex = srcOrigBlockIndex;
14044  move.dstBlockIndex = dstOrigBlockIndex;
14045  move.srcOffset = srcAllocOffset;
14046  move.dstOffset = dstAllocOffset;
14047  move.size = srcAllocSize;
14048 
14049  moves.push_back(move);
14050  }
14051  }
14052  }
14053  }
14054 
14055  m_BlockInfos.clear();
14056 
14057  PostprocessMetadata();
14058 
14059  return VK_SUCCESS;
14060 }
14061 
14062 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14063 {
14064  const size_t blockCount = m_pBlockVector->GetBlockCount();
14065  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14066  {
14067  VmaBlockMetadata_Generic* const pMetadata =
14068  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14069  pMetadata->m_FreeCount = 0;
14070  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14071  pMetadata->m_FreeSuballocationsBySize.clear();
14072  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14073  it != pMetadata->m_Suballocations.end(); )
14074  {
14075  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14076  {
14077  VmaSuballocationList::iterator nextIt = it;
14078  ++nextIt;
14079  pMetadata->m_Suballocations.erase(it);
14080  it = nextIt;
14081  }
14082  else
14083  {
14084  ++it;
14085  }
14086  }
14087  }
14088 }
14089 
14090 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14091 {
14092  const size_t blockCount = m_pBlockVector->GetBlockCount();
14093  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14094  {
14095  VmaBlockMetadata_Generic* const pMetadata =
14096  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14097  const VkDeviceSize blockSize = pMetadata->GetSize();
14098 
14099  // No allocations in this block - entire area is free.
14100  if(pMetadata->m_Suballocations.empty())
14101  {
14102  pMetadata->m_FreeCount = 1;
14103  //pMetadata->m_SumFreeSize is already set to blockSize.
14104  VmaSuballocation suballoc = {
14105  0, // offset
14106  blockSize, // size
14107  VMA_NULL, // hAllocation
14108  VMA_SUBALLOCATION_TYPE_FREE };
14109  pMetadata->m_Suballocations.push_back(suballoc);
14110  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14111  }
14112  // There are some allocations in this block.
14113  else
14114  {
14115  VkDeviceSize offset = 0;
14116  VmaSuballocationList::iterator it;
14117  for(it = pMetadata->m_Suballocations.begin();
14118  it != pMetadata->m_Suballocations.end();
14119  ++it)
14120  {
14121  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14122  VMA_ASSERT(it->offset >= offset);
14123 
14124  // Need to insert preceding free space.
14125  if(it->offset > offset)
14126  {
14127  ++pMetadata->m_FreeCount;
14128  const VkDeviceSize freeSize = it->offset - offset;
14129  VmaSuballocation suballoc = {
14130  offset, // offset
14131  freeSize, // size
14132  VMA_NULL, // hAllocation
14133  VMA_SUBALLOCATION_TYPE_FREE };
14134  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14135  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14136  {
14137  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14138  }
14139  }
14140 
14141  pMetadata->m_SumFreeSize -= it->size;
14142  offset = it->offset + it->size;
14143  }
14144 
14145  // Need to insert trailing free space.
14146  if(offset < blockSize)
14147  {
14148  ++pMetadata->m_FreeCount;
14149  const VkDeviceSize freeSize = blockSize - offset;
14150  VmaSuballocation suballoc = {
14151  offset, // offset
14152  freeSize, // size
14153  VMA_NULL, // hAllocation
14154  VMA_SUBALLOCATION_TYPE_FREE };
14155  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14156  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14157  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14158  {
14159  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14160  }
14161  }
14162 
14163  VMA_SORT(
14164  pMetadata->m_FreeSuballocationsBySize.begin(),
14165  pMetadata->m_FreeSuballocationsBySize.end(),
14166  VmaSuballocationItemSizeLess());
14167  }
14168 
14169  VMA_HEAVY_ASSERT(pMetadata->Validate());
14170  }
14171 }
14172 
14173 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14174 {
14175  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14176  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14177  while(it != pMetadata->m_Suballocations.end())
14178  {
14179  if(it->offset < suballoc.offset)
14180  {
14181  ++it;
14182  }
14183  }
14184  pMetadata->m_Suballocations.insert(it, suballoc);
14185 }
14186 
14188 // VmaBlockVectorDefragmentationContext
14189 
14190 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14191  VmaAllocator hAllocator,
14192  VmaPool hCustomPool,
14193  VmaBlockVector* pBlockVector,
14194  uint32_t currFrameIndex) :
14195  res(VK_SUCCESS),
14196  mutexLocked(false),
14197  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14198  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14199  defragmentationMovesProcessed(0),
14200  defragmentationMovesCommitted(0),
14201  hasDefragmentationPlan(0),
14202  m_hAllocator(hAllocator),
14203  m_hCustomPool(hCustomPool),
14204  m_pBlockVector(pBlockVector),
14205  m_CurrFrameIndex(currFrameIndex),
14206  m_pAlgorithm(VMA_NULL),
14207  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14208  m_AllAllocations(false)
14209 {
14210 }
14211 
14212 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14213 {
14214  vma_delete(m_hAllocator, m_pAlgorithm);
14215 }
14216 
14217 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14218 {
14219  AllocInfo info = { hAlloc, pChanged };
14220  m_Allocations.push_back(info);
14221 }
14222 
14223 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14224 {
14225  const bool allAllocations = m_AllAllocations ||
14226  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14227 
14228  /********************************
14229  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14230  ********************************/
14231 
14232  /*
14233  Fast algorithm is supported only when certain criteria are met:
14234  - VMA_DEBUG_MARGIN is 0.
14235  - All allocations in this block vector are moveable.
14236  - There is no possibility of image/buffer granularity conflict.
14237  - The defragmentation is not incremental
14238  */
14239  if(VMA_DEBUG_MARGIN == 0 &&
14240  allAllocations &&
14241  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14243  {
14244  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14245  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14246  }
14247  else
14248  {
14249  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14250  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14251  }
14252 
14253  if(allAllocations)
14254  {
14255  m_pAlgorithm->AddAll();
14256  }
14257  else
14258  {
14259  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14260  {
14261  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14262  }
14263  }
14264 }
14265 
14267 // VmaDefragmentationContext
14268 
14269 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14270  VmaAllocator hAllocator,
14271  uint32_t currFrameIndex,
14272  uint32_t flags,
14273  VmaDefragmentationStats* pStats) :
14274  m_hAllocator(hAllocator),
14275  m_CurrFrameIndex(currFrameIndex),
14276  m_Flags(flags),
14277  m_pStats(pStats),
14278  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14279 {
14280  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14281 }
14282 
14283 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14284 {
14285  for(size_t i = m_CustomPoolContexts.size(); i--; )
14286  {
14287  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14288  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14289  vma_delete(m_hAllocator, pBlockVectorCtx);
14290  }
14291  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14292  {
14293  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14294  if(pBlockVectorCtx)
14295  {
14296  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14297  vma_delete(m_hAllocator, pBlockVectorCtx);
14298  }
14299  }
14300 }
14301 
14302 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
14303 {
14304  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14305  {
14306  VmaPool pool = pPools[poolIndex];
14307  VMA_ASSERT(pool);
14308  // Pools with algorithm other than default are not defragmented.
14309  if(pool->m_BlockVector.GetAlgorithm() == 0)
14310  {
14311  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14312 
14313  for(size_t i = m_CustomPoolContexts.size(); i--; )
14314  {
14315  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14316  {
14317  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14318  break;
14319  }
14320  }
14321 
14322  if(!pBlockVectorDefragCtx)
14323  {
14324  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14325  m_hAllocator,
14326  pool,
14327  &pool->m_BlockVector,
14328  m_CurrFrameIndex);
14329  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14330  }
14331 
14332  pBlockVectorDefragCtx->AddAll();
14333  }
14334  }
14335 }
14336 
14337 void VmaDefragmentationContext_T::AddAllocations(
14338  uint32_t allocationCount,
14339  const VmaAllocation* pAllocations,
14340  VkBool32* pAllocationsChanged)
14341 {
14342  // Dispatch pAllocations among defragmentators. Create them when necessary.
14343  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14344  {
14345  const VmaAllocation hAlloc = pAllocations[allocIndex];
14346  VMA_ASSERT(hAlloc);
14347  // DedicatedAlloc cannot be defragmented.
14348  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14349  // Lost allocation cannot be defragmented.
14350  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14351  {
14352  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14353 
14354  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14355  // This allocation belongs to custom pool.
14356  if(hAllocPool != VK_NULL_HANDLE)
14357  {
14358  // Pools with algorithm other than default are not defragmented.
14359  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14360  {
14361  for(size_t i = m_CustomPoolContexts.size(); i--; )
14362  {
14363  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14364  {
14365  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14366  break;
14367  }
14368  }
14369  if(!pBlockVectorDefragCtx)
14370  {
14371  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14372  m_hAllocator,
14373  hAllocPool,
14374  &hAllocPool->m_BlockVector,
14375  m_CurrFrameIndex);
14376  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14377  }
14378  }
14379  }
14380  // This allocation belongs to default pool.
14381  else
14382  {
14383  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14384  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14385  if(!pBlockVectorDefragCtx)
14386  {
14387  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14388  m_hAllocator,
14389  VMA_NULL, // hCustomPool
14390  m_hAllocator->m_pBlockVectors[memTypeIndex],
14391  m_CurrFrameIndex);
14392  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14393  }
14394  }
14395 
14396  if(pBlockVectorDefragCtx)
14397  {
14398  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14399  &pAllocationsChanged[allocIndex] : VMA_NULL;
14400  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14401  }
14402  }
14403  }
14404 }
14405 
14406 VkResult VmaDefragmentationContext_T::Defragment(
14407  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14408  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14409  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14410 {
14411  if(pStats)
14412  {
14413  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14414  }
14415 
14417  {
14418  // For incremental defragmetnations, we just earmark how much we can move
14419  // The real meat is in the defragmentation steps
14420  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14421  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14422 
14423  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14424  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14425 
14426  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14427  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14428  return VK_SUCCESS;
14429 
14430  return VK_NOT_READY;
14431  }
14432 
14433  if(commandBuffer == VK_NULL_HANDLE)
14434  {
14435  maxGpuBytesToMove = 0;
14436  maxGpuAllocationsToMove = 0;
14437  }
14438 
14439  VkResult res = VK_SUCCESS;
14440 
14441  // Process default pools.
14442  for(uint32_t memTypeIndex = 0;
14443  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14444  ++memTypeIndex)
14445  {
14446  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14447  if(pBlockVectorCtx)
14448  {
14449  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14450  pBlockVectorCtx->GetBlockVector()->Defragment(
14451  pBlockVectorCtx,
14452  pStats, flags,
14453  maxCpuBytesToMove, maxCpuAllocationsToMove,
14454  maxGpuBytesToMove, maxGpuAllocationsToMove,
14455  commandBuffer);
14456  if(pBlockVectorCtx->res != VK_SUCCESS)
14457  {
14458  res = pBlockVectorCtx->res;
14459  }
14460  }
14461  }
14462 
14463  // Process custom pools.
14464  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14465  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14466  ++customCtxIndex)
14467  {
14468  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14469  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14470  pBlockVectorCtx->GetBlockVector()->Defragment(
14471  pBlockVectorCtx,
14472  pStats, flags,
14473  maxCpuBytesToMove, maxCpuAllocationsToMove,
14474  maxGpuBytesToMove, maxGpuAllocationsToMove,
14475  commandBuffer);
14476  if(pBlockVectorCtx->res != VK_SUCCESS)
14477  {
14478  res = pBlockVectorCtx->res;
14479  }
14480  }
14481 
14482  return res;
14483 }
14484 
14485 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14486 {
14487  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14488  uint32_t movesLeft = pInfo->moveCount;
14489 
14490  // Process default pools.
14491  for(uint32_t memTypeIndex = 0;
14492  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14493  ++memTypeIndex)
14494  {
14495  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14496  if(pBlockVectorCtx)
14497  {
14498  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14499 
14500  if(!pBlockVectorCtx->hasDefragmentationPlan)
14501  {
14502  pBlockVectorCtx->GetBlockVector()->Defragment(
14503  pBlockVectorCtx,
14504  m_pStats, m_Flags,
14505  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14506  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14507  VK_NULL_HANDLE);
14508 
14509  if(pBlockVectorCtx->res < VK_SUCCESS)
14510  continue;
14511 
14512  pBlockVectorCtx->hasDefragmentationPlan = true;
14513  }
14514 
14515  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14516  pBlockVectorCtx,
14517  pCurrentMove, movesLeft);
14518 
14519  movesLeft -= processed;
14520  pCurrentMove += processed;
14521  }
14522  }
14523 
14524  // Process custom pools.
14525  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14526  customCtxIndex < customCtxCount;
14527  ++customCtxIndex)
14528  {
14529  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14530  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14531 
14532  if(!pBlockVectorCtx->hasDefragmentationPlan)
14533  {
14534  pBlockVectorCtx->GetBlockVector()->Defragment(
14535  pBlockVectorCtx,
14536  m_pStats, m_Flags,
14537  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14538  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14539  VK_NULL_HANDLE);
14540 
14541  if(pBlockVectorCtx->res < VK_SUCCESS)
14542  continue;
14543 
14544  pBlockVectorCtx->hasDefragmentationPlan = true;
14545  }
14546 
14547  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14548  pBlockVectorCtx,
14549  pCurrentMove, movesLeft);
14550 
14551  movesLeft -= processed;
14552  pCurrentMove += processed;
14553  }
14554 
14555  pInfo->moveCount = pInfo->moveCount - movesLeft;
14556 
14557  return VK_SUCCESS;
14558 }
14559 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14560 {
14561  VkResult res = VK_SUCCESS;
14562 
14563  // Process default pools.
14564  for(uint32_t memTypeIndex = 0;
14565  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14566  ++memTypeIndex)
14567  {
14568  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14569  if(pBlockVectorCtx)
14570  {
14571  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14572 
14573  if(!pBlockVectorCtx->hasDefragmentationPlan)
14574  {
14575  res = VK_NOT_READY;
14576  continue;
14577  }
14578 
14579  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14580  pBlockVectorCtx, m_pStats);
14581 
14582  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14583  res = VK_NOT_READY;
14584  }
14585  }
14586 
14587  // Process custom pools.
14588  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14589  customCtxIndex < customCtxCount;
14590  ++customCtxIndex)
14591  {
14592  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14593  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14594 
14595  if(!pBlockVectorCtx->hasDefragmentationPlan)
14596  {
14597  res = VK_NOT_READY;
14598  continue;
14599  }
14600 
14601  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14602  pBlockVectorCtx, m_pStats);
14603 
14604  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14605  res = VK_NOT_READY;
14606  }
14607 
14608  return res;
14609 }
14610 
14612 // VmaRecorder
14613 
14614 #if VMA_RECORDING_ENABLED
14615 
14616 VmaRecorder::VmaRecorder() :
14617  m_UseMutex(true),
14618  m_Flags(0),
14619  m_File(VMA_NULL),
14620  m_Freq(INT64_MAX),
14621  m_StartCounter(INT64_MAX)
14622 {
14623 }
14624 
14625 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14626 {
14627  m_UseMutex = useMutex;
14628  m_Flags = settings.flags;
14629 
14630  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
14631  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
14632 
14633  // Open file for writing.
14634  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14635  if(err != 0)
14636  {
14637  return VK_ERROR_INITIALIZATION_FAILED;
14638  }
14639 
14640  // Write header.
14641  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14642  fprintf(m_File, "%s\n", "1,8");
14643 
14644  return VK_SUCCESS;
14645 }
14646 
14647 VmaRecorder::~VmaRecorder()
14648 {
14649  if(m_File != VMA_NULL)
14650  {
14651  fclose(m_File);
14652  }
14653 }
14654 
14655 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14656 {
14657  CallParams callParams;
14658  GetBasicParams(callParams);
14659 
14660  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14661  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14662  Flush();
14663 }
14664 
14665 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14666 {
14667  CallParams callParams;
14668  GetBasicParams(callParams);
14669 
14670  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14671  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14672  Flush();
14673 }
14674 
14675 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14676 {
14677  CallParams callParams;
14678  GetBasicParams(callParams);
14679 
14680  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14681  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14682  createInfo.memoryTypeIndex,
14683  createInfo.flags,
14684  createInfo.blockSize,
14685  (uint64_t)createInfo.minBlockCount,
14686  (uint64_t)createInfo.maxBlockCount,
14687  createInfo.frameInUseCount,
14688  pool);
14689  Flush();
14690 }
14691 
14692 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14693 {
14694  CallParams callParams;
14695  GetBasicParams(callParams);
14696 
14697  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14698  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14699  pool);
14700  Flush();
14701 }
14702 
14703 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14704  const VkMemoryRequirements& vkMemReq,
14705  const VmaAllocationCreateInfo& createInfo,
14706  VmaAllocation allocation)
14707 {
14708  CallParams callParams;
14709  GetBasicParams(callParams);
14710 
14711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14712  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14713  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14714  vkMemReq.size,
14715  vkMemReq.alignment,
14716  vkMemReq.memoryTypeBits,
14717  createInfo.flags,
14718  createInfo.usage,
14719  createInfo.requiredFlags,
14720  createInfo.preferredFlags,
14721  createInfo.memoryTypeBits,
14722  createInfo.pool,
14723  allocation,
14724  userDataStr.GetString());
14725  Flush();
14726 }
14727 
14728 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14729  const VkMemoryRequirements& vkMemReq,
14730  const VmaAllocationCreateInfo& createInfo,
14731  uint64_t allocationCount,
14732  const VmaAllocation* pAllocations)
14733 {
14734  CallParams callParams;
14735  GetBasicParams(callParams);
14736 
14737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14738  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14739  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14740  vkMemReq.size,
14741  vkMemReq.alignment,
14742  vkMemReq.memoryTypeBits,
14743  createInfo.flags,
14744  createInfo.usage,
14745  createInfo.requiredFlags,
14746  createInfo.preferredFlags,
14747  createInfo.memoryTypeBits,
14748  createInfo.pool);
14749  PrintPointerList(allocationCount, pAllocations);
14750  fprintf(m_File, ",%s\n", userDataStr.GetString());
14751  Flush();
14752 }
14753 
14754 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14755  const VkMemoryRequirements& vkMemReq,
14756  bool requiresDedicatedAllocation,
14757  bool prefersDedicatedAllocation,
14758  const VmaAllocationCreateInfo& createInfo,
14759  VmaAllocation allocation)
14760 {
14761  CallParams callParams;
14762  GetBasicParams(callParams);
14763 
14764  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14765  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14766  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14767  vkMemReq.size,
14768  vkMemReq.alignment,
14769  vkMemReq.memoryTypeBits,
14770  requiresDedicatedAllocation ? 1 : 0,
14771  prefersDedicatedAllocation ? 1 : 0,
14772  createInfo.flags,
14773  createInfo.usage,
14774  createInfo.requiredFlags,
14775  createInfo.preferredFlags,
14776  createInfo.memoryTypeBits,
14777  createInfo.pool,
14778  allocation,
14779  userDataStr.GetString());
14780  Flush();
14781 }
14782 
14783 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14784  const VkMemoryRequirements& vkMemReq,
14785  bool requiresDedicatedAllocation,
14786  bool prefersDedicatedAllocation,
14787  const VmaAllocationCreateInfo& createInfo,
14788  VmaAllocation allocation)
14789 {
14790  CallParams callParams;
14791  GetBasicParams(callParams);
14792 
14793  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14794  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14795  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14796  vkMemReq.size,
14797  vkMemReq.alignment,
14798  vkMemReq.memoryTypeBits,
14799  requiresDedicatedAllocation ? 1 : 0,
14800  prefersDedicatedAllocation ? 1 : 0,
14801  createInfo.flags,
14802  createInfo.usage,
14803  createInfo.requiredFlags,
14804  createInfo.preferredFlags,
14805  createInfo.memoryTypeBits,
14806  createInfo.pool,
14807  allocation,
14808  userDataStr.GetString());
14809  Flush();
14810 }
14811 
14812 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14813  VmaAllocation allocation)
14814 {
14815  CallParams callParams;
14816  GetBasicParams(callParams);
14817 
14818  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14819  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14820  allocation);
14821  Flush();
14822 }
14823 
14824 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14825  uint64_t allocationCount,
14826  const VmaAllocation* pAllocations)
14827 {
14828  CallParams callParams;
14829  GetBasicParams(callParams);
14830 
14831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14832  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14833  PrintPointerList(allocationCount, pAllocations);
14834  fprintf(m_File, "\n");
14835  Flush();
14836 }
14837 
14838 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14839  VmaAllocation allocation,
14840  const void* pUserData)
14841 {
14842  CallParams callParams;
14843  GetBasicParams(callParams);
14844 
14845  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14846  UserDataString userDataStr(
14847  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14848  pUserData);
14849  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14850  allocation,
14851  userDataStr.GetString());
14852  Flush();
14853 }
14854 
14855 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14856  VmaAllocation allocation)
14857 {
14858  CallParams callParams;
14859  GetBasicParams(callParams);
14860 
14861  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14862  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14863  allocation);
14864  Flush();
14865 }
14866 
14867 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14868  VmaAllocation allocation)
14869 {
14870  CallParams callParams;
14871  GetBasicParams(callParams);
14872 
14873  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14874  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14875  allocation);
14876  Flush();
14877 }
14878 
14879 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14880  VmaAllocation allocation)
14881 {
14882  CallParams callParams;
14883  GetBasicParams(callParams);
14884 
14885  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14886  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14887  allocation);
14888  Flush();
14889 }
14890 
14891 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14892  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14893 {
14894  CallParams callParams;
14895  GetBasicParams(callParams);
14896 
14897  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14898  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14899  allocation,
14900  offset,
14901  size);
14902  Flush();
14903 }
14904 
14905 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14906  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14907 {
14908  CallParams callParams;
14909  GetBasicParams(callParams);
14910 
14911  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14912  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14913  allocation,
14914  offset,
14915  size);
14916  Flush();
14917 }
14918 
14919 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14920  const VkBufferCreateInfo& bufCreateInfo,
14921  const VmaAllocationCreateInfo& allocCreateInfo,
14922  VmaAllocation allocation)
14923 {
14924  CallParams callParams;
14925  GetBasicParams(callParams);
14926 
14927  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14928  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14929  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14930  bufCreateInfo.flags,
14931  bufCreateInfo.size,
14932  bufCreateInfo.usage,
14933  bufCreateInfo.sharingMode,
14934  allocCreateInfo.flags,
14935  allocCreateInfo.usage,
14936  allocCreateInfo.requiredFlags,
14937  allocCreateInfo.preferredFlags,
14938  allocCreateInfo.memoryTypeBits,
14939  allocCreateInfo.pool,
14940  allocation,
14941  userDataStr.GetString());
14942  Flush();
14943 }
14944 
14945 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14946  const VkImageCreateInfo& imageCreateInfo,
14947  const VmaAllocationCreateInfo& allocCreateInfo,
14948  VmaAllocation allocation)
14949 {
14950  CallParams callParams;
14951  GetBasicParams(callParams);
14952 
14953  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14954  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14955  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14956  imageCreateInfo.flags,
14957  imageCreateInfo.imageType,
14958  imageCreateInfo.format,
14959  imageCreateInfo.extent.width,
14960  imageCreateInfo.extent.height,
14961  imageCreateInfo.extent.depth,
14962  imageCreateInfo.mipLevels,
14963  imageCreateInfo.arrayLayers,
14964  imageCreateInfo.samples,
14965  imageCreateInfo.tiling,
14966  imageCreateInfo.usage,
14967  imageCreateInfo.sharingMode,
14968  imageCreateInfo.initialLayout,
14969  allocCreateInfo.flags,
14970  allocCreateInfo.usage,
14971  allocCreateInfo.requiredFlags,
14972  allocCreateInfo.preferredFlags,
14973  allocCreateInfo.memoryTypeBits,
14974  allocCreateInfo.pool,
14975  allocation,
14976  userDataStr.GetString());
14977  Flush();
14978 }
14979 
14980 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14981  VmaAllocation allocation)
14982 {
14983  CallParams callParams;
14984  GetBasicParams(callParams);
14985 
14986  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14987  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14988  allocation);
14989  Flush();
14990 }
14991 
14992 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14993  VmaAllocation allocation)
14994 {
14995  CallParams callParams;
14996  GetBasicParams(callParams);
14997 
14998  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14999  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15000  allocation);
15001  Flush();
15002 }
15003 
15004 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15005  VmaAllocation allocation)
15006 {
15007  CallParams callParams;
15008  GetBasicParams(callParams);
15009 
15010  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15011  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15012  allocation);
15013  Flush();
15014 }
15015 
15016 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15017  VmaAllocation allocation)
15018 {
15019  CallParams callParams;
15020  GetBasicParams(callParams);
15021 
15022  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15023  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15024  allocation);
15025  Flush();
15026 }
15027 
15028 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15029  VmaPool pool)
15030 {
15031  CallParams callParams;
15032  GetBasicParams(callParams);
15033 
15034  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15035  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15036  pool);
15037  Flush();
15038 }
15039 
15040 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15041  const VmaDefragmentationInfo2& info,
15043 {
15044  CallParams callParams;
15045  GetBasicParams(callParams);
15046 
15047  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15048  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15049  info.flags);
15050  PrintPointerList(info.allocationCount, info.pAllocations);
15051  fprintf(m_File, ",");
15052  PrintPointerList(info.poolCount, info.pPools);
15053  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15054  info.maxCpuBytesToMove,
15056  info.maxGpuBytesToMove,
15058  info.commandBuffer,
15059  ctx);
15060  Flush();
15061 }
15062 
15063 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15065 {
15066  CallParams callParams;
15067  GetBasicParams(callParams);
15068 
15069  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15070  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15071  ctx);
15072  Flush();
15073 }
15074 
15075 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15076  VmaPool pool,
15077  const char* name)
15078 {
15079  CallParams callParams;
15080  GetBasicParams(callParams);
15081 
15082  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15083  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15084  pool, name != VMA_NULL ? name : "");
15085  Flush();
15086 }
15087 
15088 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15089 {
15090  if(pUserData != VMA_NULL)
15091  {
15092  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15093  {
15094  m_Str = (const char*)pUserData;
15095  }
15096  else
15097  {
15098  sprintf_s(m_PtrStr, "%p", pUserData);
15099  m_Str = m_PtrStr;
15100  }
15101  }
15102  else
15103  {
15104  m_Str = "";
15105  }
15106 }
15107 
15108 void VmaRecorder::WriteConfiguration(
15109  const VkPhysicalDeviceProperties& devProps,
15110  const VkPhysicalDeviceMemoryProperties& memProps,
15111  uint32_t vulkanApiVersion,
15112  bool dedicatedAllocationExtensionEnabled,
15113  bool bindMemory2ExtensionEnabled,
15114  bool memoryBudgetExtensionEnabled,
15115  bool deviceCoherentMemoryExtensionEnabled)
15116 {
15117  fprintf(m_File, "Config,Begin\n");
15118 
15119  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15120 
15121  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15122  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15123  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15124  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15125  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15126  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15127 
15128  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15129  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15130  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15131 
15132  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15133  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15134  {
15135  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15136  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15137  }
15138  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15139  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15140  {
15141  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15142  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15143  }
15144 
15145  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15146  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15147  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15148  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15149 
15150  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15151  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15152  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15153  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15154  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15155  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15156  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15157  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15158  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15159 
15160  fprintf(m_File, "Config,End\n");
15161 }
15162 
15163 void VmaRecorder::GetBasicParams(CallParams& outParams)
15164 {
15165  outParams.threadId = GetCurrentThreadId();
15166 
15167  LARGE_INTEGER counter;
15168  QueryPerformanceCounter(&counter);
15169  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
15170 }
15171 
15172 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15173 {
15174  if(count)
15175  {
15176  fprintf(m_File, "%p", pItems[0]);
15177  for(uint64_t i = 1; i < count; ++i)
15178  {
15179  fprintf(m_File, " %p", pItems[i]);
15180  }
15181  }
15182 }
15183 
15184 void VmaRecorder::Flush()
15185 {
15186  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15187  {
15188  fflush(m_File);
15189  }
15190 }
15191 
15192 #endif // #if VMA_RECORDING_ENABLED
15193 
15195 // VmaAllocationObjectAllocator
15196 
15197 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15198  m_Allocator(pAllocationCallbacks, 1024)
15199 {
15200 }
15201 
15202 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15203 {
15204  VmaMutexLock mutexLock(m_Mutex);
15205  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15206 }
15207 
15208 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15209 {
15210  VmaMutexLock mutexLock(m_Mutex);
15211  m_Allocator.Free(hAlloc);
15212 }
15213 
15215 // VmaAllocator_T
15216 
15217 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15218  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15219  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15220  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15221  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15222  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15223  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15224  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15225  m_hDevice(pCreateInfo->device),
15226  m_hInstance(pCreateInfo->instance),
15227  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15228  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15229  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15230  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15231  m_HeapSizeLimitMask(0),
15232  m_PreferredLargeHeapBlockSize(0),
15233  m_PhysicalDevice(pCreateInfo->physicalDevice),
15234  m_CurrentFrameIndex(0),
15235  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15236  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15237  m_NextPoolId(0),
15238  m_GlobalMemoryTypeBits(UINT32_MAX)
15240  ,m_pRecorder(VMA_NULL)
15241 #endif
15242 {
15243  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15244  {
15245  m_UseKhrDedicatedAllocation = false;
15246  m_UseKhrBindMemory2 = false;
15247  }
15248 
15249  if(VMA_DEBUG_DETECT_CORRUPTION)
15250  {
15251  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15252  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15253  }
15254 
15255  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15256 
15257  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15258  {
15259 #if !(VMA_DEDICATED_ALLOCATION)
15261  {
15262  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15263  }
15264 #endif
15265 #if !(VMA_BIND_MEMORY2)
15266  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15267  {
15268  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15269  }
15270 #endif
15271  }
15272 #if !(VMA_MEMORY_BUDGET)
15273  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15274  {
15275  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15276  }
15277 #endif
15278 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15279  if(m_UseKhrBufferDeviceAddress)
15280  {
15281  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15282  }
15283 #endif
15284 #if VMA_VULKAN_VERSION < 1002000
15285  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15286  {
15287  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15288  }
15289 #endif
15290 #if VMA_VULKAN_VERSION < 1001000
15291  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15292  {
15293  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15294  }
15295 #endif
15296 
15297  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15298  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15299  memset(&m_MemProps, 0, sizeof(m_MemProps));
15300 
15301  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15302  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15303  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15304 
15305  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15306  {
15307  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15308  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15309  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15310  }
15311 
15312  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15313 
15314  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15315  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15316 
15317  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15318  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15319  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15320  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15321 
15322  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15323  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15324 
15325  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15326 
15327  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15328  {
15329  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15330  {
15331  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15332  if(limit != VK_WHOLE_SIZE)
15333  {
15334  m_HeapSizeLimitMask |= 1u << heapIndex;
15335  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15336  {
15337  m_MemProps.memoryHeaps[heapIndex].size = limit;
15338  }
15339  }
15340  }
15341  }
15342 
15343  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15344  {
15345  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15346 
15347  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15348  this,
15349  VK_NULL_HANDLE, // hParentPool
15350  memTypeIndex,
15351  preferredBlockSize,
15352  0,
15353  SIZE_MAX,
15354  GetBufferImageGranularity(),
15355  pCreateInfo->frameInUseCount,
15356  false, // explicitBlockSize
15357  false); // linearAlgorithm
15358  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15359  // becase minBlockCount is 0.
15360  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15361 
15362  }
15363 }
15364 
15365 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15366 {
15367  VkResult res = VK_SUCCESS;
15368 
15369  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15370  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15371  {
15372 #if VMA_RECORDING_ENABLED
15373  m_pRecorder = vma_new(this, VmaRecorder)();
15374  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15375  if(res != VK_SUCCESS)
15376  {
15377  return res;
15378  }
15379  m_pRecorder->WriteConfiguration(
15380  m_PhysicalDeviceProperties,
15381  m_MemProps,
15382  m_VulkanApiVersion,
15383  m_UseKhrDedicatedAllocation,
15384  m_UseKhrBindMemory2,
15385  m_UseExtMemoryBudget,
15386  m_UseAmdDeviceCoherentMemory);
15387  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15388 #else
15389  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15390  return VK_ERROR_FEATURE_NOT_PRESENT;
15391 #endif
15392  }
15393 
15394 #if VMA_MEMORY_BUDGET
15395  if(m_UseExtMemoryBudget)
15396  {
15397  UpdateVulkanBudget();
15398  }
15399 #endif // #if VMA_MEMORY_BUDGET
15400 
15401  return res;
15402 }
15403 
15404 VmaAllocator_T::~VmaAllocator_T()
15405 {
15406 #if VMA_RECORDING_ENABLED
15407  if(m_pRecorder != VMA_NULL)
15408  {
15409  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15410  vma_delete(this, m_pRecorder);
15411  }
15412 #endif
15413 
15414  VMA_ASSERT(m_Pools.empty());
15415 
15416  for(size_t i = GetMemoryTypeCount(); i--; )
15417  {
15418  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15419  {
15420  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15421  }
15422 
15423  vma_delete(this, m_pDedicatedAllocations[i]);
15424  vma_delete(this, m_pBlockVectors[i]);
15425  }
15426 }
15427 
15428 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15429 {
15430 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15431  ImportVulkanFunctions_Static();
15432 #endif
15433 
15434  if(pVulkanFunctions != VMA_NULL)
15435  {
15436  ImportVulkanFunctions_Custom(pVulkanFunctions);
15437  }
15438 
15439 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15440  ImportVulkanFunctions_Dynamic();
15441 #endif
15442 
15443  ValidateVulkanFunctions();
15444 }
15445 
15446 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15447 
15448 void VmaAllocator_T::ImportVulkanFunctions_Static()
15449 {
15450  // Vulkan 1.0
15451  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15452  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15453  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15454  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15455  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15456  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15457  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15458  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15459  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15460  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15461  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15462  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15463  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15464  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15465  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15466  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15467  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15468 
15469  // Vulkan 1.1
15470 #if VMA_VULKAN_VERSION >= 1001000
15471  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15472  {
15473  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15474  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15475  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15476  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15477  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15478  }
15479 #endif
15480 }
15481 
15482 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15483 
15484 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15485 {
15486  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15487 
15488 #define VMA_COPY_IF_NOT_NULL(funcName) \
15489  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15490 
15491  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15492  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15493  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15494  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15495  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15496  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15497  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15498  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15499  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15500  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15501  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15502  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15503  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15504  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15505  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15506  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15507  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15508 
15509 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15510  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15511  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15512 #endif
15513 
15514 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15515  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15516  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15517 #endif
15518 
15519 #if VMA_MEMORY_BUDGET
15520  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15521 #endif
15522 
15523 #undef VMA_COPY_IF_NOT_NULL
15524 }
15525 
15526 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15527 
15528 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
15529 {
15530 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
15531  if(m_VulkanFunctions.memberName == VMA_NULL) \
15532  m_VulkanFunctions.memberName = \
15533  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
15534 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
15535  if(m_VulkanFunctions.memberName == VMA_NULL) \
15536  m_VulkanFunctions.memberName = \
15537  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
15538 
15539  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
15540  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
15541  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
15542  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
15543  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
15544  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
15545  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
15546  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
15547  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
15548  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
15549  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
15550  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
15551  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
15552  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
15553  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
15554  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
15555  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
15556 
15557 #if VMA_DEDICATED_ALLOCATION
15558  if(m_UseKhrDedicatedAllocation)
15559  {
15560  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
15561  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
15562  }
15563 #endif
15564 
15565 #if VMA_BIND_MEMORY2
15566  if(m_UseKhrBindMemory2)
15567  {
15568  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
15569  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
15570  }
15571 #endif // #if VMA_BIND_MEMORY2
15572 
15573 #if VMA_MEMORY_BUDGET
15574  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15575  {
15576  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
15577  }
15578 #endif // #if VMA_MEMORY_BUDGET
15579 
15580 #undef VMA_FETCH_DEVICE_FUNC
15581 #undef VMA_FETCH_INSTANCE_FUNC
15582 }
15583 
15584 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15585 
15586 void VmaAllocator_T::ValidateVulkanFunctions()
15587 {
15588  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15589  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15590  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15591  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15592  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15593  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15594  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15595  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15596  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15597  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15598  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15599  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15600  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15601  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15602  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15603  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15604  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15605 
15606 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15607  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15608  {
15609  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15610  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15611  }
15612 #endif
15613 
15614 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15615  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15616  {
15617  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15618  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15619  }
15620 #endif
15621 
15622 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15623  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15624  {
15625  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15626  }
15627 #endif
15628 }
15629 
15630 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15631 {
15632  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15633  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15634  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15635  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15636 }
15637 
15638 VkResult VmaAllocator_T::AllocateMemoryOfType(
15639  VkDeviceSize size,
15640  VkDeviceSize alignment,
15641  bool dedicatedAllocation,
15642  VkBuffer dedicatedBuffer,
15643  VkBufferUsageFlags dedicatedBufferUsage,
15644  VkImage dedicatedImage,
15645  const VmaAllocationCreateInfo& createInfo,
15646  uint32_t memTypeIndex,
15647  VmaSuballocationType suballocType,
15648  size_t allocationCount,
15649  VmaAllocation* pAllocations)
15650 {
15651  VMA_ASSERT(pAllocations != VMA_NULL);
15652  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15653 
15654  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15655 
15656  // If memory type is not HOST_VISIBLE, disable MAPPED.
15657  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15658  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15659  {
15660  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15661  }
15662  // If memory is lazily allocated, it should be always dedicated.
15663  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15664  {
15666  }
15667 
15668  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15669  VMA_ASSERT(blockVector);
15670 
15671  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15672  bool preferDedicatedMemory =
15673  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15674  dedicatedAllocation ||
15675  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15676  size > preferredBlockSize / 2;
15677 
15678  if(preferDedicatedMemory &&
15679  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15680  finalCreateInfo.pool == VK_NULL_HANDLE)
15681  {
15683  }
15684 
15685  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15686  {
15687  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15688  {
15689  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15690  }
15691  else
15692  {
15693  return AllocateDedicatedMemory(
15694  size,
15695  suballocType,
15696  memTypeIndex,
15697  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15698  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15699  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15700  finalCreateInfo.pUserData,
15701  dedicatedBuffer,
15702  dedicatedBufferUsage,
15703  dedicatedImage,
15704  allocationCount,
15705  pAllocations);
15706  }
15707  }
15708  else
15709  {
15710  VkResult res = blockVector->Allocate(
15711  m_CurrentFrameIndex.load(),
15712  size,
15713  alignment,
15714  finalCreateInfo,
15715  suballocType,
15716  allocationCount,
15717  pAllocations);
15718  if(res == VK_SUCCESS)
15719  {
15720  return res;
15721  }
15722 
15723  // 5. Try dedicated memory.
15724  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15725  {
15726  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15727  }
15728  else
15729  {
15730  res = AllocateDedicatedMemory(
15731  size,
15732  suballocType,
15733  memTypeIndex,
15734  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15735  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15736  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15737  finalCreateInfo.pUserData,
15738  dedicatedBuffer,
15739  dedicatedBufferUsage,
15740  dedicatedImage,
15741  allocationCount,
15742  pAllocations);
15743  if(res == VK_SUCCESS)
15744  {
15745  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
15746  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
15747  return VK_SUCCESS;
15748  }
15749  else
15750  {
15751  // Everything failed: Return error code.
15752  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15753  return res;
15754  }
15755  }
15756  }
15757 }
15758 
15759 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15760  VkDeviceSize size,
15761  VmaSuballocationType suballocType,
15762  uint32_t memTypeIndex,
15763  bool withinBudget,
15764  bool map,
15765  bool isUserDataString,
15766  void* pUserData,
15767  VkBuffer dedicatedBuffer,
15768  VkBufferUsageFlags dedicatedBufferUsage,
15769  VkImage dedicatedImage,
15770  size_t allocationCount,
15771  VmaAllocation* pAllocations)
15772 {
15773  VMA_ASSERT(allocationCount > 0 && pAllocations);
15774 
15775  if(withinBudget)
15776  {
15777  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15778  VmaBudget heapBudget = {};
15779  GetBudget(&heapBudget, heapIndex, 1);
15780  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15781  {
15782  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15783  }
15784  }
15785 
15786  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15787  allocInfo.memoryTypeIndex = memTypeIndex;
15788  allocInfo.allocationSize = size;
15789 
15790 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15791  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15792  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15793  {
15794  if(dedicatedBuffer != VK_NULL_HANDLE)
15795  {
15796  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15797  dedicatedAllocInfo.buffer = dedicatedBuffer;
15798  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15799  }
15800  else if(dedicatedImage != VK_NULL_HANDLE)
15801  {
15802  dedicatedAllocInfo.image = dedicatedImage;
15803  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15804  }
15805  }
15806 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15807 
15808 #if VMA_BUFFER_DEVICE_ADDRESS
15809  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
15810  if(m_UseKhrBufferDeviceAddress)
15811  {
15812  bool canContainBufferWithDeviceAddress = true;
15813  if(dedicatedBuffer != VK_NULL_HANDLE)
15814  {
15815  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
15816  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
15817  }
15818  else if(dedicatedImage != VK_NULL_HANDLE)
15819  {
15820  canContainBufferWithDeviceAddress = false;
15821  }
15822  if(canContainBufferWithDeviceAddress)
15823  {
15824  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
15825  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
15826  }
15827  }
15828 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
15829 
15830  size_t allocIndex;
15831  VkResult res = VK_SUCCESS;
15832  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15833  {
15834  res = AllocateDedicatedMemoryPage(
15835  size,
15836  suballocType,
15837  memTypeIndex,
15838  allocInfo,
15839  map,
15840  isUserDataString,
15841  pUserData,
15842  pAllocations + allocIndex);
15843  if(res != VK_SUCCESS)
15844  {
15845  break;
15846  }
15847  }
15848 
15849  if(res == VK_SUCCESS)
15850  {
15851  // Register them in m_pDedicatedAllocations.
15852  {
15853  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15854  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15855  VMA_ASSERT(pDedicatedAllocations);
15856  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15857  {
15858  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15859  }
15860  }
15861 
15862  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15863  }
15864  else
15865  {
15866  // Free all already created allocations.
15867  while(allocIndex--)
15868  {
15869  VmaAllocation currAlloc = pAllocations[allocIndex];
15870  VkDeviceMemory hMemory = currAlloc->GetMemory();
15871 
15872  /*
15873  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15874  before vkFreeMemory.
15875 
15876  if(currAlloc->GetMappedData() != VMA_NULL)
15877  {
15878  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15879  }
15880  */
15881 
15882  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15883  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15884  currAlloc->SetUserData(this, VMA_NULL);
15885  m_AllocationObjectAllocator.Free(currAlloc);
15886  }
15887 
15888  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15889  }
15890 
15891  return res;
15892 }
15893 
15894 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15895  VkDeviceSize size,
15896  VmaSuballocationType suballocType,
15897  uint32_t memTypeIndex,
15898  const VkMemoryAllocateInfo& allocInfo,
15899  bool map,
15900  bool isUserDataString,
15901  void* pUserData,
15902  VmaAllocation* pAllocation)
15903 {
15904  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15905  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15906  if(res < 0)
15907  {
15908  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15909  return res;
15910  }
15911 
15912  void* pMappedData = VMA_NULL;
15913  if(map)
15914  {
15915  res = (*m_VulkanFunctions.vkMapMemory)(
15916  m_hDevice,
15917  hMemory,
15918  0,
15919  VK_WHOLE_SIZE,
15920  0,
15921  &pMappedData);
15922  if(res < 0)
15923  {
15924  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15925  FreeVulkanMemory(memTypeIndex, size, hMemory);
15926  return res;
15927  }
15928  }
15929 
15930  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
15931  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15932  (*pAllocation)->SetUserData(this, pUserData);
15933  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15934  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15935  {
15936  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15937  }
15938 
15939  return VK_SUCCESS;
15940 }
15941 
15942 void VmaAllocator_T::GetBufferMemoryRequirements(
15943  VkBuffer hBuffer,
15944  VkMemoryRequirements& memReq,
15945  bool& requiresDedicatedAllocation,
15946  bool& prefersDedicatedAllocation) const
15947 {
15948 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15949  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15950  {
15951  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15952  memReqInfo.buffer = hBuffer;
15953 
15954  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15955 
15956  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15957  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15958 
15959  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15960 
15961  memReq = memReq2.memoryRequirements;
15962  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15963  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15964  }
15965  else
15966 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15967  {
15968  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15969  requiresDedicatedAllocation = false;
15970  prefersDedicatedAllocation = false;
15971  }
15972 }
15973 
15974 void VmaAllocator_T::GetImageMemoryRequirements(
15975  VkImage hImage,
15976  VkMemoryRequirements& memReq,
15977  bool& requiresDedicatedAllocation,
15978  bool& prefersDedicatedAllocation) const
15979 {
15980 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15981  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15982  {
15983  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15984  memReqInfo.image = hImage;
15985 
15986  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15987 
15988  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15989  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15990 
15991  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15992 
15993  memReq = memReq2.memoryRequirements;
15994  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15995  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15996  }
15997  else
15998 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15999  {
16000  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16001  requiresDedicatedAllocation = false;
16002  prefersDedicatedAllocation = false;
16003  }
16004 }
16005 
16006 VkResult VmaAllocator_T::AllocateMemory(
16007  const VkMemoryRequirements& vkMemReq,
16008  bool requiresDedicatedAllocation,
16009  bool prefersDedicatedAllocation,
16010  VkBuffer dedicatedBuffer,
16011  VkBufferUsageFlags dedicatedBufferUsage,
16012  VkImage dedicatedImage,
16013  const VmaAllocationCreateInfo& createInfo,
16014  VmaSuballocationType suballocType,
16015  size_t allocationCount,
16016  VmaAllocation* pAllocations)
16017 {
16018  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16019 
16020  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16021 
16022  if(vkMemReq.size == 0)
16023  {
16024  return VK_ERROR_VALIDATION_FAILED_EXT;
16025  }
16026  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16027  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16028  {
16029  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16030  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16031  }
16032  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16034  {
16035  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16036  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16037  }
16038  if(requiresDedicatedAllocation)
16039  {
16040  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16041  {
16042  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16043  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16044  }
16045  if(createInfo.pool != VK_NULL_HANDLE)
16046  {
16047  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16048  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16049  }
16050  }
16051  if((createInfo.pool != VK_NULL_HANDLE) &&
16052  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16053  {
16054  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16055  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16056  }
16057 
16058  if(createInfo.pool != VK_NULL_HANDLE)
16059  {
16060  const VkDeviceSize alignmentForPool = VMA_MAX(
16061  vkMemReq.alignment,
16062  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
16063 
16064  VmaAllocationCreateInfo createInfoForPool = createInfo;
16065  // If memory type is not HOST_VISIBLE, disable MAPPED.
16066  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16067  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16068  {
16069  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16070  }
16071 
16072  return createInfo.pool->m_BlockVector.Allocate(
16073  m_CurrentFrameIndex.load(),
16074  vkMemReq.size,
16075  alignmentForPool,
16076  createInfoForPool,
16077  suballocType,
16078  allocationCount,
16079  pAllocations);
16080  }
16081  else
16082  {
16083  // Bit mask of memory Vulkan types acceptable for this allocation.
16084  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16085  uint32_t memTypeIndex = UINT32_MAX;
16086  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16087  if(res == VK_SUCCESS)
16088  {
16089  VkDeviceSize alignmentForMemType = VMA_MAX(
16090  vkMemReq.alignment,
16091  GetMemoryTypeMinAlignment(memTypeIndex));
16092 
16093  res = AllocateMemoryOfType(
16094  vkMemReq.size,
16095  alignmentForMemType,
16096  requiresDedicatedAllocation || prefersDedicatedAllocation,
16097  dedicatedBuffer,
16098  dedicatedBufferUsage,
16099  dedicatedImage,
16100  createInfo,
16101  memTypeIndex,
16102  suballocType,
16103  allocationCount,
16104  pAllocations);
16105  // Succeeded on first try.
16106  if(res == VK_SUCCESS)
16107  {
16108  return res;
16109  }
16110  // Allocation from this memory type failed. Try other compatible memory types.
16111  else
16112  {
16113  for(;;)
16114  {
16115  // Remove old memTypeIndex from list of possibilities.
16116  memoryTypeBits &= ~(1u << memTypeIndex);
16117  // Find alternative memTypeIndex.
16118  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16119  if(res == VK_SUCCESS)
16120  {
16121  alignmentForMemType = VMA_MAX(
16122  vkMemReq.alignment,
16123  GetMemoryTypeMinAlignment(memTypeIndex));
16124 
16125  res = AllocateMemoryOfType(
16126  vkMemReq.size,
16127  alignmentForMemType,
16128  requiresDedicatedAllocation || prefersDedicatedAllocation,
16129  dedicatedBuffer,
16130  dedicatedBufferUsage,
16131  dedicatedImage,
16132  createInfo,
16133  memTypeIndex,
16134  suballocType,
16135  allocationCount,
16136  pAllocations);
16137  // Allocation from this alternative memory type succeeded.
16138  if(res == VK_SUCCESS)
16139  {
16140  return res;
16141  }
16142  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16143  }
16144  // No other matching memory type index could be found.
16145  else
16146  {
16147  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16148  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16149  }
16150  }
16151  }
16152  }
16153  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16154  else
16155  return res;
16156  }
16157 }
16158 
16159 void VmaAllocator_T::FreeMemory(
16160  size_t allocationCount,
16161  const VmaAllocation* pAllocations)
16162 {
16163  VMA_ASSERT(pAllocations);
16164 
16165  for(size_t allocIndex = allocationCount; allocIndex--; )
16166  {
16167  VmaAllocation allocation = pAllocations[allocIndex];
16168 
16169  if(allocation != VK_NULL_HANDLE)
16170  {
16171  if(TouchAllocation(allocation))
16172  {
16173  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16174  {
16175  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16176  }
16177 
16178  switch(allocation->GetType())
16179  {
16180  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16181  {
16182  VmaBlockVector* pBlockVector = VMA_NULL;
16183  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16184  if(hPool != VK_NULL_HANDLE)
16185  {
16186  pBlockVector = &hPool->m_BlockVector;
16187  }
16188  else
16189  {
16190  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16191  pBlockVector = m_pBlockVectors[memTypeIndex];
16192  }
16193  pBlockVector->Free(allocation);
16194  }
16195  break;
16196  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16197  FreeDedicatedMemory(allocation);
16198  break;
16199  default:
16200  VMA_ASSERT(0);
16201  }
16202  }
16203 
16204  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16205  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16206  allocation->SetUserData(this, VMA_NULL);
16207  m_AllocationObjectAllocator.Free(allocation);
16208  }
16209  }
16210 }
16211 
16212 VkResult VmaAllocator_T::ResizeAllocation(
16213  const VmaAllocation alloc,
16214  VkDeviceSize newSize)
16215 {
16216  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16217  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16218  {
16219  return VK_ERROR_VALIDATION_FAILED_EXT;
16220  }
16221  if(newSize == alloc->GetSize())
16222  {
16223  return VK_SUCCESS;
16224  }
16225  return VK_ERROR_OUT_OF_POOL_MEMORY;
16226 }
16227 
16228 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16229 {
16230  // Initialize.
16231  InitStatInfo(pStats->total);
16232  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16233  InitStatInfo(pStats->memoryType[i]);
16234  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16235  InitStatInfo(pStats->memoryHeap[i]);
16236 
16237  // Process default pools.
16238  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16239  {
16240  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16241  VMA_ASSERT(pBlockVector);
16242  pBlockVector->AddStats(pStats);
16243  }
16244 
16245  // Process custom pools.
16246  {
16247  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16248  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16249  {
16250  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16251  }
16252  }
16253 
16254  // Process dedicated allocations.
16255  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16256  {
16257  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16258  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16259  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16260  VMA_ASSERT(pDedicatedAllocVector);
16261  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16262  {
16263  VmaStatInfo allocationStatInfo;
16264  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16265  VmaAddStatInfo(pStats->total, allocationStatInfo);
16266  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16267  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16268  }
16269  }
16270 
16271  // Postprocess.
16272  VmaPostprocessCalcStatInfo(pStats->total);
16273  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16274  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16275  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16276  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16277 }
16278 
16279 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16280 {
16281 #if VMA_MEMORY_BUDGET
16282  if(m_UseExtMemoryBudget)
16283  {
16284  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16285  {
16286  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16287  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16288  {
16289  const uint32_t heapIndex = firstHeap + i;
16290 
16291  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16292  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16293 
16294  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16295  {
16296  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16297  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16298  }
16299  else
16300  {
16301  outBudget->usage = 0;
16302  }
16303 
16304  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16305  outBudget->budget = VMA_MIN(
16306  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16307  }
16308  }
16309  else
16310  {
16311  UpdateVulkanBudget(); // Outside of mutex lock
16312  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16313  }
16314  }
16315  else
16316 #endif
16317  {
16318  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16319  {
16320  const uint32_t heapIndex = firstHeap + i;
16321 
16322  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16323  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16324 
16325  outBudget->usage = outBudget->blockBytes;
16326  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16327  }
16328  }
16329 }
16330 
16331 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16332 
16333 VkResult VmaAllocator_T::DefragmentationBegin(
16334  const VmaDefragmentationInfo2& info,
16335  VmaDefragmentationStats* pStats,
16336  VmaDefragmentationContext* pContext)
16337 {
16338  if(info.pAllocationsChanged != VMA_NULL)
16339  {
16340  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16341  }
16342 
16343  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16344  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16345 
16346  (*pContext)->AddPools(info.poolCount, info.pPools);
16347  (*pContext)->AddAllocations(
16349 
16350  VkResult res = (*pContext)->Defragment(
16353  info.commandBuffer, pStats, info.flags);
16354 
16355  if(res != VK_NOT_READY)
16356  {
16357  vma_delete(this, *pContext);
16358  *pContext = VMA_NULL;
16359  }
16360 
16361  return res;
16362 }
16363 
16364 VkResult VmaAllocator_T::DefragmentationEnd(
16365  VmaDefragmentationContext context)
16366 {
16367  vma_delete(this, context);
16368  return VK_SUCCESS;
16369 }
16370 
16371 VkResult VmaAllocator_T::DefragmentationPassBegin(
16373  VmaDefragmentationContext context)
16374 {
16375  return context->DefragmentPassBegin(pInfo);
16376 }
16377 VkResult VmaAllocator_T::DefragmentationPassEnd(
16378  VmaDefragmentationContext context)
16379 {
16380  return context->DefragmentPassEnd();
16381 
16382 }
16383 
16384 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16385 {
16386  if(hAllocation->CanBecomeLost())
16387  {
16388  /*
16389  Warning: This is a carefully designed algorithm.
16390  Do not modify unless you really know what you're doing :)
16391  */
16392  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16393  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16394  for(;;)
16395  {
16396  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16397  {
16398  pAllocationInfo->memoryType = UINT32_MAX;
16399  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16400  pAllocationInfo->offset = 0;
16401  pAllocationInfo->size = hAllocation->GetSize();
16402  pAllocationInfo->pMappedData = VMA_NULL;
16403  pAllocationInfo->pUserData = hAllocation->GetUserData();
16404  return;
16405  }
16406  else if(localLastUseFrameIndex == localCurrFrameIndex)
16407  {
16408  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16409  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16410  pAllocationInfo->offset = hAllocation->GetOffset();
16411  pAllocationInfo->size = hAllocation->GetSize();
16412  pAllocationInfo->pMappedData = VMA_NULL;
16413  pAllocationInfo->pUserData = hAllocation->GetUserData();
16414  return;
16415  }
16416  else // Last use time earlier than current time.
16417  {
16418  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16419  {
16420  localLastUseFrameIndex = localCurrFrameIndex;
16421  }
16422  }
16423  }
16424  }
16425  else
16426  {
16427 #if VMA_STATS_STRING_ENABLED
16428  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16429  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16430  for(;;)
16431  {
16432  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16433  if(localLastUseFrameIndex == localCurrFrameIndex)
16434  {
16435  break;
16436  }
16437  else // Last use time earlier than current time.
16438  {
16439  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16440  {
16441  localLastUseFrameIndex = localCurrFrameIndex;
16442  }
16443  }
16444  }
16445 #endif
16446 
16447  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16448  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16449  pAllocationInfo->offset = hAllocation->GetOffset();
16450  pAllocationInfo->size = hAllocation->GetSize();
16451  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16452  pAllocationInfo->pUserData = hAllocation->GetUserData();
16453  }
16454 }
16455 
16456 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16457 {
16458  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16459  if(hAllocation->CanBecomeLost())
16460  {
16461  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16462  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16463  for(;;)
16464  {
16465  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16466  {
16467  return false;
16468  }
16469  else if(localLastUseFrameIndex == localCurrFrameIndex)
16470  {
16471  return true;
16472  }
16473  else // Last use time earlier than current time.
16474  {
16475  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16476  {
16477  localLastUseFrameIndex = localCurrFrameIndex;
16478  }
16479  }
16480  }
16481  }
16482  else
16483  {
16484 #if VMA_STATS_STRING_ENABLED
16485  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16486  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16487  for(;;)
16488  {
16489  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16490  if(localLastUseFrameIndex == localCurrFrameIndex)
16491  {
16492  break;
16493  }
16494  else // Last use time earlier than current time.
16495  {
16496  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16497  {
16498  localLastUseFrameIndex = localCurrFrameIndex;
16499  }
16500  }
16501  }
16502 #endif
16503 
16504  return true;
16505  }
16506 }
16507 
16508 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16509 {
16510  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16511 
16512  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16513 
16514  if(newCreateInfo.maxBlockCount == 0)
16515  {
16516  newCreateInfo.maxBlockCount = SIZE_MAX;
16517  }
16518  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16519  {
16520  return VK_ERROR_INITIALIZATION_FAILED;
16521  }
16522  // Memory type index out of range or forbidden.
16523  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16524  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16525  {
16526  return VK_ERROR_FEATURE_NOT_PRESENT;
16527  }
16528 
16529  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16530 
16531  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16532 
16533  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16534  if(res != VK_SUCCESS)
16535  {
16536  vma_delete(this, *pPool);
16537  *pPool = VMA_NULL;
16538  return res;
16539  }
16540 
16541  // Add to m_Pools.
16542  {
16543  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16544  (*pPool)->SetId(m_NextPoolId++);
16545  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16546  }
16547 
16548  return VK_SUCCESS;
16549 }
16550 
16551 void VmaAllocator_T::DestroyPool(VmaPool pool)
16552 {
16553  // Remove from m_Pools.
16554  {
16555  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16556  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16557  VMA_ASSERT(success && "Pool not found in Allocator.");
16558  }
16559 
16560  vma_delete(this, pool);
16561 }
16562 
16563 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16564 {
16565  pool->m_BlockVector.GetPoolStats(pPoolStats);
16566 }
16567 
16568 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16569 {
16570  m_CurrentFrameIndex.store(frameIndex);
16571 
16572 #if VMA_MEMORY_BUDGET
16573  if(m_UseExtMemoryBudget)
16574  {
16575  UpdateVulkanBudget();
16576  }
16577 #endif // #if VMA_MEMORY_BUDGET
16578 }
16579 
16580 void VmaAllocator_T::MakePoolAllocationsLost(
16581  VmaPool hPool,
16582  size_t* pLostAllocationCount)
16583 {
16584  hPool->m_BlockVector.MakePoolAllocationsLost(
16585  m_CurrentFrameIndex.load(),
16586  pLostAllocationCount);
16587 }
16588 
16589 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16590 {
16591  return hPool->m_BlockVector.CheckCorruption();
16592 }
16593 
16594 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16595 {
16596  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16597 
16598  // Process default pools.
16599  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16600  {
16601  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16602  {
16603  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16604  VMA_ASSERT(pBlockVector);
16605  VkResult localRes = pBlockVector->CheckCorruption();
16606  switch(localRes)
16607  {
16608  case VK_ERROR_FEATURE_NOT_PRESENT:
16609  break;
16610  case VK_SUCCESS:
16611  finalRes = VK_SUCCESS;
16612  break;
16613  default:
16614  return localRes;
16615  }
16616  }
16617  }
16618 
16619  // Process custom pools.
16620  {
16621  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16622  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16623  {
16624  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16625  {
16626  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16627  switch(localRes)
16628  {
16629  case VK_ERROR_FEATURE_NOT_PRESENT:
16630  break;
16631  case VK_SUCCESS:
16632  finalRes = VK_SUCCESS;
16633  break;
16634  default:
16635  return localRes;
16636  }
16637  }
16638  }
16639  }
16640 
16641  return finalRes;
16642 }
16643 
16644 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16645 {
16646  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16647  (*pAllocation)->InitLost();
16648 }
16649 
16650 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16651 {
16652  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16653 
16654  // HeapSizeLimit is in effect for this heap.
16655  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16656  {
16657  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16658  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16659  for(;;)
16660  {
16661  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16662  if(blockBytesAfterAllocation > heapSize)
16663  {
16664  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16665  }
16666  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16667  {
16668  break;
16669  }
16670  }
16671  }
16672  else
16673  {
16674  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16675  }
16676 
16677  // VULKAN CALL vkAllocateMemory.
16678  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16679 
16680  if(res == VK_SUCCESS)
16681  {
16682 #if VMA_MEMORY_BUDGET
16683  ++m_Budget.m_OperationsSinceBudgetFetch;
16684 #endif
16685 
16686  // Informative callback.
16687  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16688  {
16689  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
16690  }
16691  }
16692  else
16693  {
16694  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16695  }
16696 
16697  return res;
16698 }
16699 
16700 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
16701 {
16702  // Informative callback.
16703  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
16704  {
16705  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
16706  }
16707 
16708  // VULKAN CALL vkFreeMemory.
16709  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
16710 
16711  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
16712 }
16713 
16714 VkResult VmaAllocator_T::BindVulkanBuffer(
16715  VkDeviceMemory memory,
16716  VkDeviceSize memoryOffset,
16717  VkBuffer buffer,
16718  const void* pNext)
16719 {
16720  if(pNext != VMA_NULL)
16721  {
16722 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16723  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16724  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
16725  {
16726  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
16727  bindBufferMemoryInfo.pNext = pNext;
16728  bindBufferMemoryInfo.buffer = buffer;
16729  bindBufferMemoryInfo.memory = memory;
16730  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16731  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16732  }
16733  else
16734 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16735  {
16736  return VK_ERROR_EXTENSION_NOT_PRESENT;
16737  }
16738  }
16739  else
16740  {
16741  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
16742  }
16743 }
16744 
16745 VkResult VmaAllocator_T::BindVulkanImage(
16746  VkDeviceMemory memory,
16747  VkDeviceSize memoryOffset,
16748  VkImage image,
16749  const void* pNext)
16750 {
16751  if(pNext != VMA_NULL)
16752  {
16753 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16754  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16755  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
16756  {
16757  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
16758  bindBufferMemoryInfo.pNext = pNext;
16759  bindBufferMemoryInfo.image = image;
16760  bindBufferMemoryInfo.memory = memory;
16761  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16762  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16763  }
16764  else
16765 #endif // #if VMA_BIND_MEMORY2
16766  {
16767  return VK_ERROR_EXTENSION_NOT_PRESENT;
16768  }
16769  }
16770  else
16771  {
16772  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
16773  }
16774 }
16775 
16776 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
16777 {
16778  if(hAllocation->CanBecomeLost())
16779  {
16780  return VK_ERROR_MEMORY_MAP_FAILED;
16781  }
16782 
16783  switch(hAllocation->GetType())
16784  {
16785  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16786  {
16787  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16788  char *pBytes = VMA_NULL;
16789  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
16790  if(res == VK_SUCCESS)
16791  {
16792  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16793  hAllocation->BlockAllocMap();
16794  }
16795  return res;
16796  }
16797  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16798  return hAllocation->DedicatedAllocMap(this, ppData);
16799  default:
16800  VMA_ASSERT(0);
16801  return VK_ERROR_MEMORY_MAP_FAILED;
16802  }
16803 }
16804 
16805 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16806 {
16807  switch(hAllocation->GetType())
16808  {
16809  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16810  {
16811  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16812  hAllocation->BlockAllocUnmap();
16813  pBlock->Unmap(this, 1);
16814  }
16815  break;
16816  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16817  hAllocation->DedicatedAllocUnmap(this);
16818  break;
16819  default:
16820  VMA_ASSERT(0);
16821  }
16822 }
16823 
16824 VkResult VmaAllocator_T::BindBufferMemory(
16825  VmaAllocation hAllocation,
16826  VkDeviceSize allocationLocalOffset,
16827  VkBuffer hBuffer,
16828  const void* pNext)
16829 {
16830  VkResult res = VK_SUCCESS;
16831  switch(hAllocation->GetType())
16832  {
16833  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16834  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16835  break;
16836  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16837  {
16838  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16839  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16840  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16841  break;
16842  }
16843  default:
16844  VMA_ASSERT(0);
16845  }
16846  return res;
16847 }
16848 
16849 VkResult VmaAllocator_T::BindImageMemory(
16850  VmaAllocation hAllocation,
16851  VkDeviceSize allocationLocalOffset,
16852  VkImage hImage,
16853  const void* pNext)
16854 {
16855  VkResult res = VK_SUCCESS;
16856  switch(hAllocation->GetType())
16857  {
16858  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16859  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16860  break;
16861  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16862  {
16863  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16864  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16865  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16866  break;
16867  }
16868  default:
16869  VMA_ASSERT(0);
16870  }
16871  return res;
16872 }
16873 
16874 void VmaAllocator_T::FlushOrInvalidateAllocation(
16875  VmaAllocation hAllocation,
16876  VkDeviceSize offset, VkDeviceSize size,
16877  VMA_CACHE_OPERATION op)
16878 {
16879  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16880  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16881  {
16882  const VkDeviceSize allocationSize = hAllocation->GetSize();
16883  VMA_ASSERT(offset <= allocationSize);
16884 
16885  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16886 
16887  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16888  memRange.memory = hAllocation->GetMemory();
16889 
16890  switch(hAllocation->GetType())
16891  {
16892  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16893  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16894  if(size == VK_WHOLE_SIZE)
16895  {
16896  memRange.size = allocationSize - memRange.offset;
16897  }
16898  else
16899  {
16900  VMA_ASSERT(offset + size <= allocationSize);
16901  memRange.size = VMA_MIN(
16902  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16903  allocationSize - memRange.offset);
16904  }
16905  break;
16906 
16907  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16908  {
16909  // 1. Still within this allocation.
16910  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16911  if(size == VK_WHOLE_SIZE)
16912  {
16913  size = allocationSize - offset;
16914  }
16915  else
16916  {
16917  VMA_ASSERT(offset + size <= allocationSize);
16918  }
16919  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16920 
16921  // 2. Adjust to whole block.
16922  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16923  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16924  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16925  memRange.offset += allocationOffset;
16926  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16927 
16928  break;
16929  }
16930 
16931  default:
16932  VMA_ASSERT(0);
16933  }
16934 
16935  switch(op)
16936  {
16937  case VMA_CACHE_FLUSH:
16938  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16939  break;
16940  case VMA_CACHE_INVALIDATE:
16941  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16942  break;
16943  default:
16944  VMA_ASSERT(0);
16945  }
16946  }
16947  // else: Just ignore this call.
16948 }
16949 
16950 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16951 {
16952  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16953 
16954  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16955  {
16956  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16957  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16958  VMA_ASSERT(pDedicatedAllocations);
16959  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16960  VMA_ASSERT(success);
16961  }
16962 
16963  VkDeviceMemory hMemory = allocation->GetMemory();
16964 
16965  /*
16966  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16967  before vkFreeMemory.
16968 
16969  if(allocation->GetMappedData() != VMA_NULL)
16970  {
16971  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16972  }
16973  */
16974 
16975  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16976 
16977  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16978 }
16979 
16980 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16981 {
16982  VkBufferCreateInfo dummyBufCreateInfo;
16983  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16984 
16985  uint32_t memoryTypeBits = 0;
16986 
16987  // Create buffer.
16988  VkBuffer buf = VK_NULL_HANDLE;
16989  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16990  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16991  if(res == VK_SUCCESS)
16992  {
16993  // Query for supported memory types.
16994  VkMemoryRequirements memReq;
16995  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16996  memoryTypeBits = memReq.memoryTypeBits;
16997 
16998  // Destroy buffer.
16999  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17000  }
17001 
17002  return memoryTypeBits;
17003 }
17004 
17005 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17006 {
17007  // Make sure memory information is already fetched.
17008  VMA_ASSERT(GetMemoryTypeCount() > 0);
17009 
17010  uint32_t memoryTypeBits = UINT32_MAX;
17011 
17012  if(!m_UseAmdDeviceCoherentMemory)
17013  {
17014  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17015  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17016  {
17017  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17018  {
17019  memoryTypeBits &= ~(1u << memTypeIndex);
17020  }
17021  }
17022  }
17023 
17024  return memoryTypeBits;
17025 }
17026 
17027 #if VMA_MEMORY_BUDGET
17028 
17029 void VmaAllocator_T::UpdateVulkanBudget()
17030 {
17031  VMA_ASSERT(m_UseExtMemoryBudget);
17032 
17033  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17034 
17035  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17036  VmaPnextChainPushFront(&memProps, &budgetProps);
17037 
17038  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17039 
17040  {
17041  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17042 
17043  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17044  {
17045  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17046  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17047  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17048 
17049  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17050  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17051  {
17052  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17053  }
17054  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17055  {
17056  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17057  }
17058  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17059  {
17060  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17061  }
17062  }
17063  m_Budget.m_OperationsSinceBudgetFetch = 0;
17064  }
17065 }
17066 
17067 #endif // #if VMA_MEMORY_BUDGET
17068 
17069 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17070 {
17071  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17072  !hAllocation->CanBecomeLost() &&
17073  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17074  {
17075  void* pData = VMA_NULL;
17076  VkResult res = Map(hAllocation, &pData);
17077  if(res == VK_SUCCESS)
17078  {
17079  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17080  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17081  Unmap(hAllocation);
17082  }
17083  else
17084  {
17085  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17086  }
17087  }
17088 }
17089 
17090 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
17091 {
17092  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
17093  if(memoryTypeBits == UINT32_MAX)
17094  {
17095  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
17096  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
17097  }
17098  return memoryTypeBits;
17099 }
17100 
17101 #if VMA_STATS_STRING_ENABLED
17102 
17103 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
17104 {
17105  bool dedicatedAllocationsStarted = false;
17106  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17107  {
17108  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17109  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17110  VMA_ASSERT(pDedicatedAllocVector);
17111  if(pDedicatedAllocVector->empty() == false)
17112  {
17113  if(dedicatedAllocationsStarted == false)
17114  {
17115  dedicatedAllocationsStarted = true;
17116  json.WriteString("DedicatedAllocations");
17117  json.BeginObject();
17118  }
17119 
17120  json.BeginString("Type ");
17121  json.ContinueString(memTypeIndex);
17122  json.EndString();
17123 
17124  json.BeginArray();
17125 
17126  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17127  {
17128  json.BeginObject(true);
17129  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17130  hAlloc->PrintParameters(json);
17131  json.EndObject();
17132  }
17133 
17134  json.EndArray();
17135  }
17136  }
17137  if(dedicatedAllocationsStarted)
17138  {
17139  json.EndObject();
17140  }
17141 
17142  {
17143  bool allocationsStarted = false;
17144  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17145  {
17146  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17147  {
17148  if(allocationsStarted == false)
17149  {
17150  allocationsStarted = true;
17151  json.WriteString("DefaultPools");
17152  json.BeginObject();
17153  }
17154 
17155  json.BeginString("Type ");
17156  json.ContinueString(memTypeIndex);
17157  json.EndString();
17158 
17159  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17160  }
17161  }
17162  if(allocationsStarted)
17163  {
17164  json.EndObject();
17165  }
17166  }
17167 
17168  // Custom pools
17169  {
17170  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17171  const size_t poolCount = m_Pools.size();
17172  if(poolCount > 0)
17173  {
17174  json.WriteString("Pools");
17175  json.BeginObject();
17176  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17177  {
17178  json.BeginString();
17179  json.ContinueString(m_Pools[poolIndex]->GetId());
17180  json.EndString();
17181 
17182  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17183  }
17184  json.EndObject();
17185  }
17186  }
17187 }
17188 
17189 #endif // #if VMA_STATS_STRING_ENABLED
17190 
17192 // Public interface
17193 
17194 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17195  const VmaAllocatorCreateInfo* pCreateInfo,
17196  VmaAllocator* pAllocator)
17197 {
17198  VMA_ASSERT(pCreateInfo && pAllocator);
17199  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17200  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17201  VMA_DEBUG_LOG("vmaCreateAllocator");
17202  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17203  return (*pAllocator)->Init(pCreateInfo);
17204 }
17205 
17206 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17207  VmaAllocator allocator)
17208 {
17209  if(allocator != VK_NULL_HANDLE)
17210  {
17211  VMA_DEBUG_LOG("vmaDestroyAllocator");
17212  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17213  vma_delete(&allocationCallbacks, allocator);
17214  }
17215 }
17216 
17217 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17218 {
17219  VMA_ASSERT(allocator && pAllocatorInfo);
17220  pAllocatorInfo->instance = allocator->m_hInstance;
17221  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17222  pAllocatorInfo->device = allocator->m_hDevice;
17223 }
17224 
17225 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17226  VmaAllocator allocator,
17227  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17228 {
17229  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17230  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17231 }
17232 
17233 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17234  VmaAllocator allocator,
17235  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17236 {
17237  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17238  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17239 }
17240 
17241 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17242  VmaAllocator allocator,
17243  uint32_t memoryTypeIndex,
17244  VkMemoryPropertyFlags* pFlags)
17245 {
17246  VMA_ASSERT(allocator && pFlags);
17247  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17248  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17249 }
17250 
17251 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17252  VmaAllocator allocator,
17253  uint32_t frameIndex)
17254 {
17255  VMA_ASSERT(allocator);
17256  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17257 
17258  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17259 
17260  allocator->SetCurrentFrameIndex(frameIndex);
17261 }
17262 
17263 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17264  VmaAllocator allocator,
17265  VmaStats* pStats)
17266 {
17267  VMA_ASSERT(allocator && pStats);
17268  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17269  allocator->CalculateStats(pStats);
17270 }
17271 
17272 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17273  VmaAllocator allocator,
17274  VmaBudget* pBudget)
17275 {
17276  VMA_ASSERT(allocator && pBudget);
17277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17278  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17279 }
17280 
17281 #if VMA_STATS_STRING_ENABLED
17282 
17283 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17284  VmaAllocator allocator,
17285  char** ppStatsString,
17286  VkBool32 detailedMap)
17287 {
17288  VMA_ASSERT(allocator && ppStatsString);
17289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17290 
17291  VmaStringBuilder sb(allocator);
17292  {
17293  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17294  json.BeginObject();
17295 
17296  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17297  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17298 
17299  VmaStats stats;
17300  allocator->CalculateStats(&stats);
17301 
17302  json.WriteString("Total");
17303  VmaPrintStatInfo(json, stats.total);
17304 
17305  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17306  {
17307  json.BeginString("Heap ");
17308  json.ContinueString(heapIndex);
17309  json.EndString();
17310  json.BeginObject();
17311 
17312  json.WriteString("Size");
17313  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17314 
17315  json.WriteString("Flags");
17316  json.BeginArray(true);
17317  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17318  {
17319  json.WriteString("DEVICE_LOCAL");
17320  }
17321  json.EndArray();
17322 
17323  json.WriteString("Budget");
17324  json.BeginObject();
17325  {
17326  json.WriteString("BlockBytes");
17327  json.WriteNumber(budget[heapIndex].blockBytes);
17328  json.WriteString("AllocationBytes");
17329  json.WriteNumber(budget[heapIndex].allocationBytes);
17330  json.WriteString("Usage");
17331  json.WriteNumber(budget[heapIndex].usage);
17332  json.WriteString("Budget");
17333  json.WriteNumber(budget[heapIndex].budget);
17334  }
17335  json.EndObject();
17336 
17337  if(stats.memoryHeap[heapIndex].blockCount > 0)
17338  {
17339  json.WriteString("Stats");
17340  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17341  }
17342 
17343  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17344  {
17345  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17346  {
17347  json.BeginString("Type ");
17348  json.ContinueString(typeIndex);
17349  json.EndString();
17350 
17351  json.BeginObject();
17352 
17353  json.WriteString("Flags");
17354  json.BeginArray(true);
17355  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17356  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17357  {
17358  json.WriteString("DEVICE_LOCAL");
17359  }
17360  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17361  {
17362  json.WriteString("HOST_VISIBLE");
17363  }
17364  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17365  {
17366  json.WriteString("HOST_COHERENT");
17367  }
17368  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17369  {
17370  json.WriteString("HOST_CACHED");
17371  }
17372  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17373  {
17374  json.WriteString("LAZILY_ALLOCATED");
17375  }
17376  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17377  {
17378  json.WriteString(" PROTECTED");
17379  }
17380  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17381  {
17382  json.WriteString(" DEVICE_COHERENT");
17383  }
17384  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17385  {
17386  json.WriteString(" DEVICE_UNCACHED");
17387  }
17388  json.EndArray();
17389 
17390  if(stats.memoryType[typeIndex].blockCount > 0)
17391  {
17392  json.WriteString("Stats");
17393  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17394  }
17395 
17396  json.EndObject();
17397  }
17398  }
17399 
17400  json.EndObject();
17401  }
17402  if(detailedMap == VK_TRUE)
17403  {
17404  allocator->PrintDetailedMap(json);
17405  }
17406 
17407  json.EndObject();
17408  }
17409 
17410  const size_t len = sb.GetLength();
17411  char* const pChars = vma_new_array(allocator, char, len + 1);
17412  if(len > 0)
17413  {
17414  memcpy(pChars, sb.GetData(), len);
17415  }
17416  pChars[len] = '\0';
17417  *ppStatsString = pChars;
17418 }
17419 
17420 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17421  VmaAllocator allocator,
17422  char* pStatsString)
17423 {
17424  if(pStatsString != VMA_NULL)
17425  {
17426  VMA_ASSERT(allocator);
17427  size_t len = strlen(pStatsString);
17428  vma_delete_array(allocator, pStatsString, len + 1);
17429  }
17430 }
17431 
17432 #endif // #if VMA_STATS_STRING_ENABLED
17433 
17434 /*
17435 This function is not protected by any mutex because it just reads immutable data.
17436 */
17437 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17438  VmaAllocator allocator,
17439  uint32_t memoryTypeBits,
17440  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17441  uint32_t* pMemoryTypeIndex)
17442 {
17443  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17444  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17445  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17446 
17447  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17448 
17449  if(pAllocationCreateInfo->memoryTypeBits != 0)
17450  {
17451  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17452  }
17453 
17454  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17455  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17456  uint32_t notPreferredFlags = 0;
17457 
17458  // Convert usage to requiredFlags and preferredFlags.
17459  switch(pAllocationCreateInfo->usage)
17460  {
17462  break;
17464  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17465  {
17466  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17467  }
17468  break;
17470  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17471  break;
17473  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17474  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17475  {
17476  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17477  }
17478  break;
17480  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17481  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17482  break;
17484  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17485  break;
17487  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17488  break;
17489  default:
17490  VMA_ASSERT(0);
17491  break;
17492  }
17493 
17494  // Avoid DEVICE_COHERENT unless explicitly requested.
17495  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17496  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17497  {
17498  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17499  }
17500 
17501  *pMemoryTypeIndex = UINT32_MAX;
17502  uint32_t minCost = UINT32_MAX;
17503  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17504  memTypeIndex < allocator->GetMemoryTypeCount();
17505  ++memTypeIndex, memTypeBit <<= 1)
17506  {
17507  // This memory type is acceptable according to memoryTypeBits bitmask.
17508  if((memTypeBit & memoryTypeBits) != 0)
17509  {
17510  const VkMemoryPropertyFlags currFlags =
17511  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17512  // This memory type contains requiredFlags.
17513  if((requiredFlags & ~currFlags) == 0)
17514  {
17515  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17516  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17517  VmaCountBitsSet(currFlags & notPreferredFlags);
17518  // Remember memory type with lowest cost.
17519  if(currCost < minCost)
17520  {
17521  *pMemoryTypeIndex = memTypeIndex;
17522  if(currCost == 0)
17523  {
17524  return VK_SUCCESS;
17525  }
17526  minCost = currCost;
17527  }
17528  }
17529  }
17530  }
17531  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17532 }
17533 
17534 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17535  VmaAllocator allocator,
17536  const VkBufferCreateInfo* pBufferCreateInfo,
17537  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17538  uint32_t* pMemoryTypeIndex)
17539 {
17540  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17541  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17542  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17543  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17544 
17545  const VkDevice hDev = allocator->m_hDevice;
17546  VkBuffer hBuffer = VK_NULL_HANDLE;
17547  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17548  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17549  if(res == VK_SUCCESS)
17550  {
17551  VkMemoryRequirements memReq = {};
17552  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17553  hDev, hBuffer, &memReq);
17554 
17555  res = vmaFindMemoryTypeIndex(
17556  allocator,
17557  memReq.memoryTypeBits,
17558  pAllocationCreateInfo,
17559  pMemoryTypeIndex);
17560 
17561  allocator->GetVulkanFunctions().vkDestroyBuffer(
17562  hDev, hBuffer, allocator->GetAllocationCallbacks());
17563  }
17564  return res;
17565 }
17566 
17567 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17568  VmaAllocator allocator,
17569  const VkImageCreateInfo* pImageCreateInfo,
17570  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17571  uint32_t* pMemoryTypeIndex)
17572 {
17573  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17574  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17575  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17576  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17577 
17578  const VkDevice hDev = allocator->m_hDevice;
17579  VkImage hImage = VK_NULL_HANDLE;
17580  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17581  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17582  if(res == VK_SUCCESS)
17583  {
17584  VkMemoryRequirements memReq = {};
17585  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17586  hDev, hImage, &memReq);
17587 
17588  res = vmaFindMemoryTypeIndex(
17589  allocator,
17590  memReq.memoryTypeBits,
17591  pAllocationCreateInfo,
17592  pMemoryTypeIndex);
17593 
17594  allocator->GetVulkanFunctions().vkDestroyImage(
17595  hDev, hImage, allocator->GetAllocationCallbacks());
17596  }
17597  return res;
17598 }
17599 
17600 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17601  VmaAllocator allocator,
17602  const VmaPoolCreateInfo* pCreateInfo,
17603  VmaPool* pPool)
17604 {
17605  VMA_ASSERT(allocator && pCreateInfo && pPool);
17606 
17607  VMA_DEBUG_LOG("vmaCreatePool");
17608 
17609  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17610 
17611  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17612 
17613 #if VMA_RECORDING_ENABLED
17614  if(allocator->GetRecorder() != VMA_NULL)
17615  {
17616  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17617  }
17618 #endif
17619 
17620  return res;
17621 }
17622 
17623 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17624  VmaAllocator allocator,
17625  VmaPool pool)
17626 {
17627  VMA_ASSERT(allocator);
17628 
17629  if(pool == VK_NULL_HANDLE)
17630  {
17631  return;
17632  }
17633 
17634  VMA_DEBUG_LOG("vmaDestroyPool");
17635 
17636  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17637 
17638 #if VMA_RECORDING_ENABLED
17639  if(allocator->GetRecorder() != VMA_NULL)
17640  {
17641  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17642  }
17643 #endif
17644 
17645  allocator->DestroyPool(pool);
17646 }
17647 
17648 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
17649  VmaAllocator allocator,
17650  VmaPool pool,
17651  VmaPoolStats* pPoolStats)
17652 {
17653  VMA_ASSERT(allocator && pool && pPoolStats);
17654 
17655  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17656 
17657  allocator->GetPoolStats(pool, pPoolStats);
17658 }
17659 
17660 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
17661  VmaAllocator allocator,
17662  VmaPool pool,
17663  size_t* pLostAllocationCount)
17664 {
17665  VMA_ASSERT(allocator && pool);
17666 
17667  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17668 
17669 #if VMA_RECORDING_ENABLED
17670  if(allocator->GetRecorder() != VMA_NULL)
17671  {
17672  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
17673  }
17674 #endif
17675 
17676  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
17677 }
17678 
17679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
17680 {
17681  VMA_ASSERT(allocator && pool);
17682 
17683  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17684 
17685  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
17686 
17687  return allocator->CheckPoolCorruption(pool);
17688 }
17689 
17690 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
17691  VmaAllocator allocator,
17692  VmaPool pool,
17693  const char** ppName)
17694 {
17695  VMA_ASSERT(allocator && pool && ppName);
17696 
17697  VMA_DEBUG_LOG("vmaGetPoolName");
17698 
17699  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17700 
17701  *ppName = pool->GetName();
17702 }
17703 
17704 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
17705  VmaAllocator allocator,
17706  VmaPool pool,
17707  const char* pName)
17708 {
17709  VMA_ASSERT(allocator && pool);
17710 
17711  VMA_DEBUG_LOG("vmaSetPoolName");
17712 
17713  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17714 
17715  pool->SetName(pName);
17716 
17717 #if VMA_RECORDING_ENABLED
17718  if(allocator->GetRecorder() != VMA_NULL)
17719  {
17720  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
17721  }
17722 #endif
17723 }
17724 
17725 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
17726  VmaAllocator allocator,
17727  const VkMemoryRequirements* pVkMemoryRequirements,
17728  const VmaAllocationCreateInfo* pCreateInfo,
17729  VmaAllocation* pAllocation,
17730  VmaAllocationInfo* pAllocationInfo)
17731 {
17732  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
17733 
17734  VMA_DEBUG_LOG("vmaAllocateMemory");
17735 
17736  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17737 
17738  VkResult result = allocator->AllocateMemory(
17739  *pVkMemoryRequirements,
17740  false, // requiresDedicatedAllocation
17741  false, // prefersDedicatedAllocation
17742  VK_NULL_HANDLE, // dedicatedBuffer
17743  UINT32_MAX, // dedicatedBufferUsage
17744  VK_NULL_HANDLE, // dedicatedImage
17745  *pCreateInfo,
17746  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17747  1, // allocationCount
17748  pAllocation);
17749 
17750 #if VMA_RECORDING_ENABLED
17751  if(allocator->GetRecorder() != VMA_NULL)
17752  {
17753  allocator->GetRecorder()->RecordAllocateMemory(
17754  allocator->GetCurrentFrameIndex(),
17755  *pVkMemoryRequirements,
17756  *pCreateInfo,
17757  *pAllocation);
17758  }
17759 #endif
17760 
17761  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17762  {
17763  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17764  }
17765 
17766  return result;
17767 }
17768 
17769 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
17770  VmaAllocator allocator,
17771  const VkMemoryRequirements* pVkMemoryRequirements,
17772  const VmaAllocationCreateInfo* pCreateInfo,
17773  size_t allocationCount,
17774  VmaAllocation* pAllocations,
17775  VmaAllocationInfo* pAllocationInfo)
17776 {
17777  if(allocationCount == 0)
17778  {
17779  return VK_SUCCESS;
17780  }
17781 
17782  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
17783 
17784  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
17785 
17786  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17787 
17788  VkResult result = allocator->AllocateMemory(
17789  *pVkMemoryRequirements,
17790  false, // requiresDedicatedAllocation
17791  false, // prefersDedicatedAllocation
17792  VK_NULL_HANDLE, // dedicatedBuffer
17793  UINT32_MAX, // dedicatedBufferUsage
17794  VK_NULL_HANDLE, // dedicatedImage
17795  *pCreateInfo,
17796  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17797  allocationCount,
17798  pAllocations);
17799 
17800 #if VMA_RECORDING_ENABLED
17801  if(allocator->GetRecorder() != VMA_NULL)
17802  {
17803  allocator->GetRecorder()->RecordAllocateMemoryPages(
17804  allocator->GetCurrentFrameIndex(),
17805  *pVkMemoryRequirements,
17806  *pCreateInfo,
17807  (uint64_t)allocationCount,
17808  pAllocations);
17809  }
17810 #endif
17811 
17812  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17813  {
17814  for(size_t i = 0; i < allocationCount; ++i)
17815  {
17816  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
17817  }
17818  }
17819 
17820  return result;
17821 }
17822 
17823 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
17824  VmaAllocator allocator,
17825  VkBuffer buffer,
17826  const VmaAllocationCreateInfo* pCreateInfo,
17827  VmaAllocation* pAllocation,
17828  VmaAllocationInfo* pAllocationInfo)
17829 {
17830  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17831 
17832  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
17833 
17834  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17835 
17836  VkMemoryRequirements vkMemReq = {};
17837  bool requiresDedicatedAllocation = false;
17838  bool prefersDedicatedAllocation = false;
17839  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
17840  requiresDedicatedAllocation,
17841  prefersDedicatedAllocation);
17842 
17843  VkResult result = allocator->AllocateMemory(
17844  vkMemReq,
17845  requiresDedicatedAllocation,
17846  prefersDedicatedAllocation,
17847  buffer, // dedicatedBuffer
17848  UINT32_MAX, // dedicatedBufferUsage
17849  VK_NULL_HANDLE, // dedicatedImage
17850  *pCreateInfo,
17851  VMA_SUBALLOCATION_TYPE_BUFFER,
17852  1, // allocationCount
17853  pAllocation);
17854 
17855 #if VMA_RECORDING_ENABLED
17856  if(allocator->GetRecorder() != VMA_NULL)
17857  {
17858  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
17859  allocator->GetCurrentFrameIndex(),
17860  vkMemReq,
17861  requiresDedicatedAllocation,
17862  prefersDedicatedAllocation,
17863  *pCreateInfo,
17864  *pAllocation);
17865  }
17866 #endif
17867 
17868  if(pAllocationInfo && result == VK_SUCCESS)
17869  {
17870  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17871  }
17872 
17873  return result;
17874 }
17875 
17876 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17877  VmaAllocator allocator,
17878  VkImage image,
17879  const VmaAllocationCreateInfo* pCreateInfo,
17880  VmaAllocation* pAllocation,
17881  VmaAllocationInfo* pAllocationInfo)
17882 {
17883  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17884 
17885  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17886 
17887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17888 
17889  VkMemoryRequirements vkMemReq = {};
17890  bool requiresDedicatedAllocation = false;
17891  bool prefersDedicatedAllocation = false;
17892  allocator->GetImageMemoryRequirements(image, vkMemReq,
17893  requiresDedicatedAllocation, prefersDedicatedAllocation);
17894 
17895  VkResult result = allocator->AllocateMemory(
17896  vkMemReq,
17897  requiresDedicatedAllocation,
17898  prefersDedicatedAllocation,
17899  VK_NULL_HANDLE, // dedicatedBuffer
17900  UINT32_MAX, // dedicatedBufferUsage
17901  image, // dedicatedImage
17902  *pCreateInfo,
17903  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17904  1, // allocationCount
17905  pAllocation);
17906 
17907 #if VMA_RECORDING_ENABLED
17908  if(allocator->GetRecorder() != VMA_NULL)
17909  {
17910  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17911  allocator->GetCurrentFrameIndex(),
17912  vkMemReq,
17913  requiresDedicatedAllocation,
17914  prefersDedicatedAllocation,
17915  *pCreateInfo,
17916  *pAllocation);
17917  }
17918 #endif
17919 
17920  if(pAllocationInfo && result == VK_SUCCESS)
17921  {
17922  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17923  }
17924 
17925  return result;
17926 }
17927 
17928 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17929  VmaAllocator allocator,
17930  VmaAllocation allocation)
17931 {
17932  VMA_ASSERT(allocator);
17933 
17934  if(allocation == VK_NULL_HANDLE)
17935  {
17936  return;
17937  }
17938 
17939  VMA_DEBUG_LOG("vmaFreeMemory");
17940 
17941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17942 
17943 #if VMA_RECORDING_ENABLED
17944  if(allocator->GetRecorder() != VMA_NULL)
17945  {
17946  allocator->GetRecorder()->RecordFreeMemory(
17947  allocator->GetCurrentFrameIndex(),
17948  allocation);
17949  }
17950 #endif
17951 
17952  allocator->FreeMemory(
17953  1, // allocationCount
17954  &allocation);
17955 }
17956 
17957 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17958  VmaAllocator allocator,
17959  size_t allocationCount,
17960  const VmaAllocation* pAllocations)
17961 {
17962  if(allocationCount == 0)
17963  {
17964  return;
17965  }
17966 
17967  VMA_ASSERT(allocator);
17968 
17969  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17970 
17971  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17972 
17973 #if VMA_RECORDING_ENABLED
17974  if(allocator->GetRecorder() != VMA_NULL)
17975  {
17976  allocator->GetRecorder()->RecordFreeMemoryPages(
17977  allocator->GetCurrentFrameIndex(),
17978  (uint64_t)allocationCount,
17979  pAllocations);
17980  }
17981 #endif
17982 
17983  allocator->FreeMemory(allocationCount, pAllocations);
17984 }
17985 
17986 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17987  VmaAllocator allocator,
17988  VmaAllocation allocation,
17989  VkDeviceSize newSize)
17990 {
17991  VMA_ASSERT(allocator && allocation);
17992 
17993  VMA_DEBUG_LOG("vmaResizeAllocation");
17994 
17995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17996 
17997  return allocator->ResizeAllocation(allocation, newSize);
17998 }
17999 
18000 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18001  VmaAllocator allocator,
18002  VmaAllocation allocation,
18003  VmaAllocationInfo* pAllocationInfo)
18004 {
18005  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18006 
18007  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18008 
18009 #if VMA_RECORDING_ENABLED
18010  if(allocator->GetRecorder() != VMA_NULL)
18011  {
18012  allocator->GetRecorder()->RecordGetAllocationInfo(
18013  allocator->GetCurrentFrameIndex(),
18014  allocation);
18015  }
18016 #endif
18017 
18018  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18019 }
18020 
18021 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18022  VmaAllocator allocator,
18023  VmaAllocation allocation)
18024 {
18025  VMA_ASSERT(allocator && allocation);
18026 
18027  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18028 
18029 #if VMA_RECORDING_ENABLED
18030  if(allocator->GetRecorder() != VMA_NULL)
18031  {
18032  allocator->GetRecorder()->RecordTouchAllocation(
18033  allocator->GetCurrentFrameIndex(),
18034  allocation);
18035  }
18036 #endif
18037 
18038  return allocator->TouchAllocation(allocation);
18039 }
18040 
18041 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18042  VmaAllocator allocator,
18043  VmaAllocation allocation,
18044  void* pUserData)
18045 {
18046  VMA_ASSERT(allocator && allocation);
18047 
18048  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18049 
18050  allocation->SetUserData(allocator, pUserData);
18051 
18052 #if VMA_RECORDING_ENABLED
18053  if(allocator->GetRecorder() != VMA_NULL)
18054  {
18055  allocator->GetRecorder()->RecordSetAllocationUserData(
18056  allocator->GetCurrentFrameIndex(),
18057  allocation,
18058  pUserData);
18059  }
18060 #endif
18061 }
18062 
18063 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18064  VmaAllocator allocator,
18065  VmaAllocation* pAllocation)
18066 {
18067  VMA_ASSERT(allocator && pAllocation);
18068 
18069  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18070 
18071  allocator->CreateLostAllocation(pAllocation);
18072 
18073 #if VMA_RECORDING_ENABLED
18074  if(allocator->GetRecorder() != VMA_NULL)
18075  {
18076  allocator->GetRecorder()->RecordCreateLostAllocation(
18077  allocator->GetCurrentFrameIndex(),
18078  *pAllocation);
18079  }
18080 #endif
18081 }
18082 
18083 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18084  VmaAllocator allocator,
18085  VmaAllocation allocation,
18086  void** ppData)
18087 {
18088  VMA_ASSERT(allocator && allocation && ppData);
18089 
18090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18091 
18092  VkResult res = allocator->Map(allocation, ppData);
18093 
18094 #if VMA_RECORDING_ENABLED
18095  if(allocator->GetRecorder() != VMA_NULL)
18096  {
18097  allocator->GetRecorder()->RecordMapMemory(
18098  allocator->GetCurrentFrameIndex(),
18099  allocation);
18100  }
18101 #endif
18102 
18103  return res;
18104 }
18105 
18106 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18107  VmaAllocator allocator,
18108  VmaAllocation allocation)
18109 {
18110  VMA_ASSERT(allocator && allocation);
18111 
18112  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18113 
18114 #if VMA_RECORDING_ENABLED
18115  if(allocator->GetRecorder() != VMA_NULL)
18116  {
18117  allocator->GetRecorder()->RecordUnmapMemory(
18118  allocator->GetCurrentFrameIndex(),
18119  allocation);
18120  }
18121 #endif
18122 
18123  allocator->Unmap(allocation);
18124 }
18125 
18126 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18127 {
18128  VMA_ASSERT(allocator && allocation);
18129 
18130  VMA_DEBUG_LOG("vmaFlushAllocation");
18131 
18132  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18133 
18134  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18135 
18136 #if VMA_RECORDING_ENABLED
18137  if(allocator->GetRecorder() != VMA_NULL)
18138  {
18139  allocator->GetRecorder()->RecordFlushAllocation(
18140  allocator->GetCurrentFrameIndex(),
18141  allocation, offset, size);
18142  }
18143 #endif
18144 }
18145 
18146 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18147 {
18148  VMA_ASSERT(allocator && allocation);
18149 
18150  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18151 
18152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18153 
18154  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18155 
18156 #if VMA_RECORDING_ENABLED
18157  if(allocator->GetRecorder() != VMA_NULL)
18158  {
18159  allocator->GetRecorder()->RecordInvalidateAllocation(
18160  allocator->GetCurrentFrameIndex(),
18161  allocation, offset, size);
18162  }
18163 #endif
18164 }
18165 
18166 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18167 {
18168  VMA_ASSERT(allocator);
18169 
18170  VMA_DEBUG_LOG("vmaCheckCorruption");
18171 
18172  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18173 
18174  return allocator->CheckCorruption(memoryTypeBits);
18175 }
18176 
18177 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18178  VmaAllocator allocator,
18179  const VmaAllocation* pAllocations,
18180  size_t allocationCount,
18181  VkBool32* pAllocationsChanged,
18182  const VmaDefragmentationInfo *pDefragmentationInfo,
18183  VmaDefragmentationStats* pDefragmentationStats)
18184 {
18185  // Deprecated interface, reimplemented using new one.
18186 
18187  VmaDefragmentationInfo2 info2 = {};
18188  info2.allocationCount = (uint32_t)allocationCount;
18189  info2.pAllocations = pAllocations;
18190  info2.pAllocationsChanged = pAllocationsChanged;
18191  if(pDefragmentationInfo != VMA_NULL)
18192  {
18193  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18194  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18195  }
18196  else
18197  {
18198  info2.maxCpuAllocationsToMove = UINT32_MAX;
18199  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18200  }
18201  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18202 
18204  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18205  if(res == VK_NOT_READY)
18206  {
18207  res = vmaDefragmentationEnd( allocator, ctx);
18208  }
18209  return res;
18210 }
18211 
18212 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18213  VmaAllocator allocator,
18214  const VmaDefragmentationInfo2* pInfo,
18215  VmaDefragmentationStats* pStats,
18216  VmaDefragmentationContext *pContext)
18217 {
18218  VMA_ASSERT(allocator && pInfo && pContext);
18219 
18220  // Degenerate case: Nothing to defragment.
18221  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18222  {
18223  return VK_SUCCESS;
18224  }
18225 
18226  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18227  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18228  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18229  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18230 
18231  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18232 
18233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18234 
18235  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18236 
18237 #if VMA_RECORDING_ENABLED
18238  if(allocator->GetRecorder() != VMA_NULL)
18239  {
18240  allocator->GetRecorder()->RecordDefragmentationBegin(
18241  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18242  }
18243 #endif
18244 
18245  return res;
18246 }
18247 
18248 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18249  VmaAllocator allocator,
18250  VmaDefragmentationContext context)
18251 {
18252  VMA_ASSERT(allocator);
18253 
18254  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18255 
18256  if(context != VK_NULL_HANDLE)
18257  {
18258  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18259 
18260 #if VMA_RECORDING_ENABLED
18261  if(allocator->GetRecorder() != VMA_NULL)
18262  {
18263  allocator->GetRecorder()->RecordDefragmentationEnd(
18264  allocator->GetCurrentFrameIndex(), context);
18265  }
18266 #endif
18267 
18268  return allocator->DefragmentationEnd(context);
18269  }
18270  else
18271  {
18272  return VK_SUCCESS;
18273  }
18274 }
18275 
18276 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18277  VmaAllocator allocator,
18278  VmaDefragmentationContext context,
18280  )
18281 {
18282  VMA_ASSERT(allocator);
18283  VMA_ASSERT(pInfo);
18284  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
18285 
18286  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18287 
18288  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18289 
18290  if(context == VK_NULL_HANDLE)
18291  {
18292  pInfo->moveCount = 0;
18293  return VK_SUCCESS;
18294  }
18295 
18296  return allocator->DefragmentationPassBegin(pInfo, context);
18297 }
18298 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18299  VmaAllocator allocator,
18300  VmaDefragmentationContext context)
18301 {
18302  VMA_ASSERT(allocator);
18303 
18304  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18305  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18306 
18307  if(context == VK_NULL_HANDLE)
18308  return VK_SUCCESS;
18309 
18310  return allocator->DefragmentationPassEnd(context);
18311 }
18312 
18313 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18314  VmaAllocator allocator,
18315  VmaAllocation allocation,
18316  VkBuffer buffer)
18317 {
18318  VMA_ASSERT(allocator && allocation && buffer);
18319 
18320  VMA_DEBUG_LOG("vmaBindBufferMemory");
18321 
18322  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18323 
18324  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18325 }
18326 
18327 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18328  VmaAllocator allocator,
18329  VmaAllocation allocation,
18330  VkDeviceSize allocationLocalOffset,
18331  VkBuffer buffer,
18332  const void* pNext)
18333 {
18334  VMA_ASSERT(allocator && allocation && buffer);
18335 
18336  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18337 
18338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18339 
18340  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18341 }
18342 
18343 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18344  VmaAllocator allocator,
18345  VmaAllocation allocation,
18346  VkImage image)
18347 {
18348  VMA_ASSERT(allocator && allocation && image);
18349 
18350  VMA_DEBUG_LOG("vmaBindImageMemory");
18351 
18352  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18353 
18354  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18355 }
18356 
18357 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18358  VmaAllocator allocator,
18359  VmaAllocation allocation,
18360  VkDeviceSize allocationLocalOffset,
18361  VkImage image,
18362  const void* pNext)
18363 {
18364  VMA_ASSERT(allocator && allocation && image);
18365 
18366  VMA_DEBUG_LOG("vmaBindImageMemory2");
18367 
18368  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18369 
18370  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18371 }
18372 
18373 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18374  VmaAllocator allocator,
18375  const VkBufferCreateInfo* pBufferCreateInfo,
18376  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18377  VkBuffer* pBuffer,
18378  VmaAllocation* pAllocation,
18379  VmaAllocationInfo* pAllocationInfo)
18380 {
18381  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18382 
18383  if(pBufferCreateInfo->size == 0)
18384  {
18385  return VK_ERROR_VALIDATION_FAILED_EXT;
18386  }
18387  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18388  !allocator->m_UseKhrBufferDeviceAddress)
18389  {
18390  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18391  return VK_ERROR_VALIDATION_FAILED_EXT;
18392  }
18393 
18394  VMA_DEBUG_LOG("vmaCreateBuffer");
18395 
18396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18397 
18398  *pBuffer = VK_NULL_HANDLE;
18399  *pAllocation = VK_NULL_HANDLE;
18400 
18401  // 1. Create VkBuffer.
18402  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18403  allocator->m_hDevice,
18404  pBufferCreateInfo,
18405  allocator->GetAllocationCallbacks(),
18406  pBuffer);
18407  if(res >= 0)
18408  {
18409  // 2. vkGetBufferMemoryRequirements.
18410  VkMemoryRequirements vkMemReq = {};
18411  bool requiresDedicatedAllocation = false;
18412  bool prefersDedicatedAllocation = false;
18413  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18414  requiresDedicatedAllocation, prefersDedicatedAllocation);
18415 
18416  // 3. Allocate memory using allocator.
18417  res = allocator->AllocateMemory(
18418  vkMemReq,
18419  requiresDedicatedAllocation,
18420  prefersDedicatedAllocation,
18421  *pBuffer, // dedicatedBuffer
18422  pBufferCreateInfo->usage, // dedicatedBufferUsage
18423  VK_NULL_HANDLE, // dedicatedImage
18424  *pAllocationCreateInfo,
18425  VMA_SUBALLOCATION_TYPE_BUFFER,
18426  1, // allocationCount
18427  pAllocation);
18428 
18429 #if VMA_RECORDING_ENABLED
18430  if(allocator->GetRecorder() != VMA_NULL)
18431  {
18432  allocator->GetRecorder()->RecordCreateBuffer(
18433  allocator->GetCurrentFrameIndex(),
18434  *pBufferCreateInfo,
18435  *pAllocationCreateInfo,
18436  *pAllocation);
18437  }
18438 #endif
18439 
18440  if(res >= 0)
18441  {
18442  // 3. Bind buffer with memory.
18443  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18444  {
18445  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18446  }
18447  if(res >= 0)
18448  {
18449  // All steps succeeded.
18450  #if VMA_STATS_STRING_ENABLED
18451  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18452  #endif
18453  if(pAllocationInfo != VMA_NULL)
18454  {
18455  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18456  }
18457 
18458  return VK_SUCCESS;
18459  }
18460  allocator->FreeMemory(
18461  1, // allocationCount
18462  pAllocation);
18463  *pAllocation = VK_NULL_HANDLE;
18464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18465  *pBuffer = VK_NULL_HANDLE;
18466  return res;
18467  }
18468  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18469  *pBuffer = VK_NULL_HANDLE;
18470  return res;
18471  }
18472  return res;
18473 }
18474 
18475 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18476  VmaAllocator allocator,
18477  VkBuffer buffer,
18478  VmaAllocation allocation)
18479 {
18480  VMA_ASSERT(allocator);
18481 
18482  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18483  {
18484  return;
18485  }
18486 
18487  VMA_DEBUG_LOG("vmaDestroyBuffer");
18488 
18489  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18490 
18491 #if VMA_RECORDING_ENABLED
18492  if(allocator->GetRecorder() != VMA_NULL)
18493  {
18494  allocator->GetRecorder()->RecordDestroyBuffer(
18495  allocator->GetCurrentFrameIndex(),
18496  allocation);
18497  }
18498 #endif
18499 
18500  if(buffer != VK_NULL_HANDLE)
18501  {
18502  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18503  }
18504 
18505  if(allocation != VK_NULL_HANDLE)
18506  {
18507  allocator->FreeMemory(
18508  1, // allocationCount
18509  &allocation);
18510  }
18511 }
18512 
18513 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18514  VmaAllocator allocator,
18515  const VkImageCreateInfo* pImageCreateInfo,
18516  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18517  VkImage* pImage,
18518  VmaAllocation* pAllocation,
18519  VmaAllocationInfo* pAllocationInfo)
18520 {
18521  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18522 
18523  if(pImageCreateInfo->extent.width == 0 ||
18524  pImageCreateInfo->extent.height == 0 ||
18525  pImageCreateInfo->extent.depth == 0 ||
18526  pImageCreateInfo->mipLevels == 0 ||
18527  pImageCreateInfo->arrayLayers == 0)
18528  {
18529  return VK_ERROR_VALIDATION_FAILED_EXT;
18530  }
18531 
18532  VMA_DEBUG_LOG("vmaCreateImage");
18533 
18534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18535 
18536  *pImage = VK_NULL_HANDLE;
18537  *pAllocation = VK_NULL_HANDLE;
18538 
18539  // 1. Create VkImage.
18540  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18541  allocator->m_hDevice,
18542  pImageCreateInfo,
18543  allocator->GetAllocationCallbacks(),
18544  pImage);
18545  if(res >= 0)
18546  {
18547  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18548  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18549  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18550 
18551  // 2. Allocate memory using allocator.
18552  VkMemoryRequirements vkMemReq = {};
18553  bool requiresDedicatedAllocation = false;
18554  bool prefersDedicatedAllocation = false;
18555  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18556  requiresDedicatedAllocation, prefersDedicatedAllocation);
18557 
18558  res = allocator->AllocateMemory(
18559  vkMemReq,
18560  requiresDedicatedAllocation,
18561  prefersDedicatedAllocation,
18562  VK_NULL_HANDLE, // dedicatedBuffer
18563  UINT32_MAX, // dedicatedBufferUsage
18564  *pImage, // dedicatedImage
18565  *pAllocationCreateInfo,
18566  suballocType,
18567  1, // allocationCount
18568  pAllocation);
18569 
18570 #if VMA_RECORDING_ENABLED
18571  if(allocator->GetRecorder() != VMA_NULL)
18572  {
18573  allocator->GetRecorder()->RecordCreateImage(
18574  allocator->GetCurrentFrameIndex(),
18575  *pImageCreateInfo,
18576  *pAllocationCreateInfo,
18577  *pAllocation);
18578  }
18579 #endif
18580 
18581  if(res >= 0)
18582  {
18583  // 3. Bind image with memory.
18584  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18585  {
18586  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
18587  }
18588  if(res >= 0)
18589  {
18590  // All steps succeeded.
18591  #if VMA_STATS_STRING_ENABLED
18592  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
18593  #endif
18594  if(pAllocationInfo != VMA_NULL)
18595  {
18596  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18597  }
18598 
18599  return VK_SUCCESS;
18600  }
18601  allocator->FreeMemory(
18602  1, // allocationCount
18603  pAllocation);
18604  *pAllocation = VK_NULL_HANDLE;
18605  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18606  *pImage = VK_NULL_HANDLE;
18607  return res;
18608  }
18609  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18610  *pImage = VK_NULL_HANDLE;
18611  return res;
18612  }
18613  return res;
18614 }
18615 
18616 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
18617  VmaAllocator allocator,
18618  VkImage image,
18619  VmaAllocation allocation)
18620 {
18621  VMA_ASSERT(allocator);
18622 
18623  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18624  {
18625  return;
18626  }
18627 
18628  VMA_DEBUG_LOG("vmaDestroyImage");
18629 
18630  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18631 
18632 #if VMA_RECORDING_ENABLED
18633  if(allocator->GetRecorder() != VMA_NULL)
18634  {
18635  allocator->GetRecorder()->RecordDestroyImage(
18636  allocator->GetCurrentFrameIndex(),
18637  allocation);
18638  }
18639 #endif
18640 
18641  if(image != VK_NULL_HANDLE)
18642  {
18643  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
18644  }
18645  if(allocation != VK_NULL_HANDLE)
18646  {
18647  allocator->FreeMemory(
18648  1, // allocationCount
18649  &allocation);
18650  }
18651 }
18652 
18653 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2244
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2202
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2090
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2239
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2200
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2265
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2862
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3421
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2291
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2153
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2463
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3489
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VmaDefragmentationInfo2::pPools
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3455
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2608
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2934
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2691
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2101
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2940
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2671
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2231
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2086
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2658
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2271
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2237
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2205
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3077
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2431
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2765
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2437
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2204
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1894
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3490
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3499
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2702
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2363
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2488
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2216
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2259
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2632
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2373
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3407
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2953
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2695
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2126
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3516
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2686
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2546
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2427
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2890
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2902
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3439
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3487
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2943
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2193
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2191
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3405
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3082
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2614
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2201
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2883
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2758
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2445
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2621
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2141
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3418
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2209
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3470
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3520
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2937
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2203
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2241
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2578
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2600
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3406
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2329
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2208
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2210
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3484
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2441
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2907
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2343
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2424
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3524
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3498
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2568
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2435
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2189
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2433
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2274
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2096
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2915
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2887
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2088
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2594
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2665
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2893
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2610
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2429
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2207
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3415
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3511
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2467
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2357
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2881
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2739
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2712
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2436
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2277
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2716
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2268
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2316
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3488
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2499
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2199
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3096
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2262
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3409
VmaDefragmentationInfo2::pAllocations
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3430
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2214
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2092
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2437
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2585
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2709
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2706
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2171
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2734
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2436
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3087
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2247
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2206
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3101
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2845
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2444
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2322
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2956
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3497
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2443
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2723
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2334
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2551
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3475
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2213
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2929
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2215
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3460
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3068
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2645
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2212
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2065
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2946
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2827
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3522
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2437
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2676
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2072
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2728
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2211
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2725
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2744
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3436
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3526
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2752
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2368
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3528
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2255
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2436
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3063
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2873
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2478
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2721
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2877
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3465
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3506
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2682
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.