Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1889 /*
1890 Define this macro to 0/1 to disable/enable support for recording functionality,
1891 available through VmaAllocatorCreateInfo::pRecordSettings.
1892 */
1893 #ifndef VMA_RECORDING_ENABLED
1894  #define VMA_RECORDING_ENABLED 0
1895 #endif
1896 
1897 #ifndef NOMINMAX
1898  #define NOMINMAX // For windows.h
1899 #endif
1900 
1901 #ifndef VULKAN_H_
1902  #include <vulkan/vulkan.h>
1903 #endif
1904 
1905 #if VMA_RECORDING_ENABLED
1906  #include <windows.h>
1907 #endif
1908 
1909 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1910 // where AAA = major, BBB = minor, CCC = patch.
1911 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1912 #if !defined(VMA_VULKAN_VERSION)
1913  #if defined(VK_VERSION_1_2)
1914  #define VMA_VULKAN_VERSION 1002000
1915  #elif defined(VK_VERSION_1_1)
1916  #define VMA_VULKAN_VERSION 1001000
1917  #else
1918  #define VMA_VULKAN_VERSION 1000000
1919  #endif
1920 #endif
1921 
1922 #if !defined(VMA_DEDICATED_ALLOCATION)
1923  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1924  #define VMA_DEDICATED_ALLOCATION 1
1925  #else
1926  #define VMA_DEDICATED_ALLOCATION 0
1927  #endif
1928 #endif
1929 
1930 #if !defined(VMA_BIND_MEMORY2)
1931  #if VK_KHR_bind_memory2
1932  #define VMA_BIND_MEMORY2 1
1933  #else
1934  #define VMA_BIND_MEMORY2 0
1935  #endif
1936 #endif
1937 
1938 #if !defined(VMA_MEMORY_BUDGET)
1939  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1940  #define VMA_MEMORY_BUDGET 1
1941  #else
1942  #define VMA_MEMORY_BUDGET 0
1943  #endif
1944 #endif
1945 
1946 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
1947 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
1948  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
1949  #define VMA_BUFFER_DEVICE_ADDRESS 1
1950  #else
1951  #define VMA_BUFFER_DEVICE_ADDRESS 0
1952  #endif
1953 #endif
1954 
1955 // Define these macros to decorate all public functions with additional code,
1956 // before and after returned type, appropriately. This may be useful for
1957 // exporing the functions when compiling VMA as a separate library. Example:
1958 // #define VMA_CALL_PRE __declspec(dllexport)
1959 // #define VMA_CALL_POST __cdecl
1960 #ifndef VMA_CALL_PRE
1961  #define VMA_CALL_PRE
1962 #endif
1963 #ifndef VMA_CALL_POST
1964  #define VMA_CALL_POST
1965 #endif
1966 
1976 VK_DEFINE_HANDLE(VmaAllocator)
1977 
1978 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1980  VmaAllocator allocator,
1981  uint32_t memoryType,
1982  VkDeviceMemory memory,
1983  VkDeviceSize size,
1984  void* pUserData);
1986 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1987  VmaAllocator allocator,
1988  uint32_t memoryType,
1989  VkDeviceMemory memory,
1990  VkDeviceSize size,
1991  void* pUserData);
1992 
2006  void* pUserData;
2008 
2104 
2107 typedef VkFlags VmaAllocatorCreateFlags;
2108 
2113 typedef struct VmaVulkanFunctions {
2114  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2115  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2116  PFN_vkAllocateMemory vkAllocateMemory;
2117  PFN_vkFreeMemory vkFreeMemory;
2118  PFN_vkMapMemory vkMapMemory;
2119  PFN_vkUnmapMemory vkUnmapMemory;
2120  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2121  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2122  PFN_vkBindBufferMemory vkBindBufferMemory;
2123  PFN_vkBindImageMemory vkBindImageMemory;
2124  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2125  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2126  PFN_vkCreateBuffer vkCreateBuffer;
2127  PFN_vkDestroyBuffer vkDestroyBuffer;
2128  PFN_vkCreateImage vkCreateImage;
2129  PFN_vkDestroyImage vkDestroyImage;
2130  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2131 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2132  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
2133  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
2134 #endif
2135 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2136  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
2137  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
2138 #endif
2139 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2140  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
2141 #endif
2143 
2145 typedef enum VmaRecordFlagBits {
2152 
2155 typedef VkFlags VmaRecordFlags;
2156 
2158 typedef struct VmaRecordSettings
2159 {
2169  const char* pFilePath;
2171 
2174 {
2178 
2179  VkPhysicalDevice physicalDevice;
2181 
2182  VkDevice device;
2184 
2187 
2188  const VkAllocationCallbacks* pAllocationCallbacks;
2190 
2230  const VkDeviceSize* pHeapSizeLimit;
2247  VkInstance instance;
2258 
2260 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2261  const VmaAllocatorCreateInfo* pCreateInfo,
2262  VmaAllocator* pAllocator);
2263 
2265 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2266  VmaAllocator allocator);
2267 
2270 typedef struct VmaAllocatorInfo
2271 {
2276  VkInstance instance;
2281  VkPhysicalDevice physicalDevice;
2286  VkDevice device;
2288 
2294 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo);
2295 
2300 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2301  VmaAllocator allocator,
2302  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2303 
2308 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2309  VmaAllocator allocator,
2310  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2311 
2318 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2319  VmaAllocator allocator,
2320  uint32_t memoryTypeIndex,
2321  VkMemoryPropertyFlags* pFlags);
2322 
2331 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2332  VmaAllocator allocator,
2333  uint32_t frameIndex);
2334 
2337 typedef struct VmaStatInfo
2338 {
2340  uint32_t blockCount;
2346  VkDeviceSize usedBytes;
2348  VkDeviceSize unusedBytes;
2351 } VmaStatInfo;
2352 
2354 typedef struct VmaStats
2355 {
2356  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2357  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2359 } VmaStats;
2360 
2370 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2371  VmaAllocator allocator,
2372  VmaStats* pStats);
2373 
2376 typedef struct VmaBudget
2377 {
2380  VkDeviceSize blockBytes;
2381 
2391  VkDeviceSize allocationBytes;
2392 
2401  VkDeviceSize usage;
2402 
2412  VkDeviceSize budget;
2413 } VmaBudget;
2414 
2425 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2426  VmaAllocator allocator,
2427  VmaBudget* pBudget);
2428 
2429 #ifndef VMA_STATS_STRING_ENABLED
2430 #define VMA_STATS_STRING_ENABLED 1
2431 #endif
2432 
2433 #if VMA_STATS_STRING_ENABLED
2434 
2436 
2438 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2439  VmaAllocator allocator,
2440  char** ppStatsString,
2441  VkBool32 detailedMap);
2442 
2443 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2444  VmaAllocator allocator,
2445  char* pStatsString);
2446 
2447 #endif // #if VMA_STATS_STRING_ENABLED
2448 
2457 VK_DEFINE_HANDLE(VmaPool)
2458 
2459 typedef enum VmaMemoryUsage
2460 {
2522 
2524 } VmaMemoryUsage;
2525 
2535 
2600 
2616 
2626 
2633 
2637 
2639 {
2652  VkMemoryPropertyFlags requiredFlags;
2657  VkMemoryPropertyFlags preferredFlags;
2665  uint32_t memoryTypeBits;
2678  void* pUserData;
2680 
2697 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2698  VmaAllocator allocator,
2699  uint32_t memoryTypeBits,
2700  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2701  uint32_t* pMemoryTypeIndex);
2702 
2715 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2716  VmaAllocator allocator,
2717  const VkBufferCreateInfo* pBufferCreateInfo,
2718  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2719  uint32_t* pMemoryTypeIndex);
2720 
2733 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2734  VmaAllocator allocator,
2735  const VkImageCreateInfo* pImageCreateInfo,
2736  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2737  uint32_t* pMemoryTypeIndex);
2738 
2759 
2776 
2787 
2793 
2796 typedef VkFlags VmaPoolCreateFlags;
2797 
2800 typedef struct VmaPoolCreateInfo {
2815  VkDeviceSize blockSize;
2844 
2847 typedef struct VmaPoolStats {
2850  VkDeviceSize size;
2853  VkDeviceSize unusedSize;
2866  VkDeviceSize unusedRangeSizeMax;
2869  size_t blockCount;
2870 } VmaPoolStats;
2871 
2878 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2879  VmaAllocator allocator,
2880  const VmaPoolCreateInfo* pCreateInfo,
2881  VmaPool* pPool);
2882 
2885 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2886  VmaAllocator allocator,
2887  VmaPool pool);
2888 
2895 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2896  VmaAllocator allocator,
2897  VmaPool pool,
2898  VmaPoolStats* pPoolStats);
2899 
2906 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2907  VmaAllocator allocator,
2908  VmaPool pool,
2909  size_t* pLostAllocationCount);
2910 
2925 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2926 
2933 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2934  VmaAllocator allocator,
2935  VmaPool pool,
2936  const char** ppName);
2937 
2943 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2944  VmaAllocator allocator,
2945  VmaPool pool,
2946  const char* pName);
2947 
2972 VK_DEFINE_HANDLE(VmaAllocation)
2973 
2974 
2976 typedef struct VmaAllocationInfo {
2981  uint32_t memoryType;
2990  VkDeviceMemory deviceMemory;
2995  VkDeviceSize offset;
3000  VkDeviceSize size;
3014  void* pUserData;
3016 
3027 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3028  VmaAllocator allocator,
3029  const VkMemoryRequirements* pVkMemoryRequirements,
3030  const VmaAllocationCreateInfo* pCreateInfo,
3031  VmaAllocation* pAllocation,
3032  VmaAllocationInfo* pAllocationInfo);
3033 
3053 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3054  VmaAllocator allocator,
3055  const VkMemoryRequirements* pVkMemoryRequirements,
3056  const VmaAllocationCreateInfo* pCreateInfo,
3057  size_t allocationCount,
3058  VmaAllocation* pAllocations,
3059  VmaAllocationInfo* pAllocationInfo);
3060 
3067 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3068  VmaAllocator allocator,
3069  VkBuffer buffer,
3070  const VmaAllocationCreateInfo* pCreateInfo,
3071  VmaAllocation* pAllocation,
3072  VmaAllocationInfo* pAllocationInfo);
3073 
3075 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3076  VmaAllocator allocator,
3077  VkImage image,
3078  const VmaAllocationCreateInfo* pCreateInfo,
3079  VmaAllocation* pAllocation,
3080  VmaAllocationInfo* pAllocationInfo);
3081 
3086 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3087  VmaAllocator allocator,
3088  VmaAllocation allocation);
3089 
3100 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3101  VmaAllocator allocator,
3102  size_t allocationCount,
3103  VmaAllocation* pAllocations);
3104 
3112 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3113  VmaAllocator allocator,
3114  VmaAllocation allocation,
3115  VkDeviceSize newSize);
3116 
3133 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3134  VmaAllocator allocator,
3135  VmaAllocation allocation,
3136  VmaAllocationInfo* pAllocationInfo);
3137 
3152 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3153  VmaAllocator allocator,
3154  VmaAllocation allocation);
3155 
3169 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3170  VmaAllocator allocator,
3171  VmaAllocation allocation,
3172  void* pUserData);
3173 
3184 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3185  VmaAllocator allocator,
3186  VmaAllocation* pAllocation);
3187 
3226 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3227  VmaAllocator allocator,
3228  VmaAllocation allocation,
3229  void** ppData);
3230 
3239 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3240  VmaAllocator allocator,
3241  VmaAllocation allocation);
3242 
3261 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3262 
3281 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3282 
3299 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3300 
3307 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3308 
3309 typedef enum VmaDefragmentationFlagBits {
3314 typedef VkFlags VmaDefragmentationFlags;
3315 
3320 typedef struct VmaDefragmentationInfo2 {
3344  uint32_t poolCount;
3365  VkDeviceSize maxCpuBytesToMove;
3375  VkDeviceSize maxGpuBytesToMove;
3389  VkCommandBuffer commandBuffer;
3391 
3394  VkDeviceMemory memory;
3395  VkDeviceSize offset;
3397 
3403  uint32_t moveCount;
3406 
3411 typedef struct VmaDefragmentationInfo {
3416  VkDeviceSize maxBytesToMove;
3423 
3425 typedef struct VmaDefragmentationStats {
3427  VkDeviceSize bytesMoved;
3429  VkDeviceSize bytesFreed;
3435 
3465 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3466  VmaAllocator allocator,
3467  const VmaDefragmentationInfo2* pInfo,
3468  VmaDefragmentationStats* pStats,
3469  VmaDefragmentationContext *pContext);
3470 
3476 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3477  VmaAllocator allocator,
3478  VmaDefragmentationContext context);
3479 
3480 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3481  VmaAllocator allocator,
3482  VmaDefragmentationContext context,
3484 );
3485 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3486  VmaAllocator allocator,
3488 );
3489 
3530 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3531  VmaAllocator allocator,
3532  VmaAllocation* pAllocations,
3533  size_t allocationCount,
3534  VkBool32* pAllocationsChanged,
3535  const VmaDefragmentationInfo *pDefragmentationInfo,
3536  VmaDefragmentationStats* pDefragmentationStats);
3537 
3550 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3551  VmaAllocator allocator,
3552  VmaAllocation allocation,
3553  VkBuffer buffer);
3554 
3565 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3566  VmaAllocator allocator,
3567  VmaAllocation allocation,
3568  VkDeviceSize allocationLocalOffset,
3569  VkBuffer buffer,
3570  const void* pNext);
3571 
3584 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3585  VmaAllocator allocator,
3586  VmaAllocation allocation,
3587  VkImage image);
3588 
3599 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3600  VmaAllocator allocator,
3601  VmaAllocation allocation,
3602  VkDeviceSize allocationLocalOffset,
3603  VkImage image,
3604  const void* pNext);
3605 
3632 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3633  VmaAllocator allocator,
3634  const VkBufferCreateInfo* pBufferCreateInfo,
3635  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3636  VkBuffer* pBuffer,
3637  VmaAllocation* pAllocation,
3638  VmaAllocationInfo* pAllocationInfo);
3639 
3651 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3652  VmaAllocator allocator,
3653  VkBuffer buffer,
3654  VmaAllocation allocation);
3655 
3657 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3658  VmaAllocator allocator,
3659  const VkImageCreateInfo* pImageCreateInfo,
3660  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3661  VkImage* pImage,
3662  VmaAllocation* pAllocation,
3663  VmaAllocationInfo* pAllocationInfo);
3664 
3676 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3677  VmaAllocator allocator,
3678  VkImage image,
3679  VmaAllocation allocation);
3680 
3681 #ifdef __cplusplus
3682 }
3683 #endif
3684 
3685 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3686 
3687 // For Visual Studio IntelliSense.
3688 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3689 #define VMA_IMPLEMENTATION
3690 #endif
3691 
3692 #ifdef VMA_IMPLEMENTATION
3693 #undef VMA_IMPLEMENTATION
3694 
3695 #include <cstdint>
3696 #include <cstdlib>
3697 #include <cstring>
3698 #include <utility>
3699 
3700 /*******************************************************************************
3701 CONFIGURATION SECTION
3702 
3703 Define some of these macros before each #include of this header or change them
3704 here if you need other then default behavior depending on your environment.
3705 */
3706 
3707 /*
3708 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3709 internally, like:
3710 
3711  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3712 */
3713 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3714  #define VMA_STATIC_VULKAN_FUNCTIONS 1
3715 #endif
3716 
3717 /*
3718 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3719 internally, like:
3720 
3721  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
3722 */
3723 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
3724  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
3725 #endif
3726 
3727 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3728 //#define VMA_USE_STL_CONTAINERS 1
3729 
3730 /* Set this macro to 1 to make the library including and using STL containers:
3731 std::pair, std::vector, std::list, std::unordered_map.
3732 
3733 Set it to 0 or undefined to make the library using its own implementation of
3734 the containers.
3735 */
3736 #if VMA_USE_STL_CONTAINERS
3737  #define VMA_USE_STL_VECTOR 1
3738  #define VMA_USE_STL_UNORDERED_MAP 1
3739  #define VMA_USE_STL_LIST 1
3740 #endif
3741 
3742 #ifndef VMA_USE_STL_SHARED_MUTEX
3743  // Compiler conforms to C++17.
3744  #if __cplusplus >= 201703L
3745  #define VMA_USE_STL_SHARED_MUTEX 1
3746  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3747  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3748  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3749  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3750  #define VMA_USE_STL_SHARED_MUTEX 1
3751  #else
3752  #define VMA_USE_STL_SHARED_MUTEX 0
3753  #endif
3754 #endif
3755 
3756 /*
3757 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3758 Library has its own container implementation.
3759 */
3760 #if VMA_USE_STL_VECTOR
3761  #include <vector>
3762 #endif
3763 
3764 #if VMA_USE_STL_UNORDERED_MAP
3765  #include <unordered_map>
3766 #endif
3767 
3768 #if VMA_USE_STL_LIST
3769  #include <list>
3770 #endif
3771 
3772 /*
3773 Following headers are used in this CONFIGURATION section only, so feel free to
3774 remove them if not needed.
3775 */
3776 #include <cassert> // for assert
3777 #include <algorithm> // for min, max
3778 #include <mutex>
3779 
3780 #ifndef VMA_NULL
3781  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3782  #define VMA_NULL nullptr
3783 #endif
3784 
3785 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3786 #include <cstdlib>
3787 void *aligned_alloc(size_t alignment, size_t size)
3788 {
3789  // alignment must be >= sizeof(void*)
3790  if(alignment < sizeof(void*))
3791  {
3792  alignment = sizeof(void*);
3793  }
3794 
3795  return memalign(alignment, size);
3796 }
3797 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3798 #include <cstdlib>
3799 void *aligned_alloc(size_t alignment, size_t size)
3800 {
3801  // alignment must be >= sizeof(void*)
3802  if(alignment < sizeof(void*))
3803  {
3804  alignment = sizeof(void*);
3805  }
3806 
3807  void *pointer;
3808  if(posix_memalign(&pointer, alignment, size) == 0)
3809  return pointer;
3810  return VMA_NULL;
3811 }
3812 #endif
3813 
3814 // If your compiler is not compatible with C++11 and definition of
3815 // aligned_alloc() function is missing, uncommeting following line may help:
3816 
3817 //#include <malloc.h>
3818 
3819 // Normal assert to check for programmer's errors, especially in Debug configuration.
3820 #ifndef VMA_ASSERT
3821  #ifdef NDEBUG
3822  #define VMA_ASSERT(expr)
3823  #else
3824  #define VMA_ASSERT(expr) assert(expr)
3825  #endif
3826 #endif
3827 
3828 // Assert that will be called very often, like inside data structures e.g. operator[].
3829 // Making it non-empty can make program slow.
3830 #ifndef VMA_HEAVY_ASSERT
3831  #ifdef NDEBUG
3832  #define VMA_HEAVY_ASSERT(expr)
3833  #else
3834  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3835  #endif
3836 #endif
3837 
3838 #ifndef VMA_ALIGN_OF
3839  #define VMA_ALIGN_OF(type) (__alignof(type))
3840 #endif
3841 
3842 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3843  #if defined(_WIN32)
3844  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3845  #else
3846  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3847  #endif
3848 #endif
3849 
3850 #ifndef VMA_SYSTEM_FREE
3851  #if defined(_WIN32)
3852  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3853  #else
3854  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3855  #endif
3856 #endif
3857 
3858 #ifndef VMA_MIN
3859  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3860 #endif
3861 
3862 #ifndef VMA_MAX
3863  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3864 #endif
3865 
3866 #ifndef VMA_SWAP
3867  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3868 #endif
3869 
3870 #ifndef VMA_SORT
3871  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3872 #endif
3873 
3874 #ifndef VMA_DEBUG_LOG
3875  #define VMA_DEBUG_LOG(format, ...)
3876  /*
3877  #define VMA_DEBUG_LOG(format, ...) do { \
3878  printf(format, __VA_ARGS__); \
3879  printf("\n"); \
3880  } while(false)
3881  */
3882 #endif
3883 
3884 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3885 #if VMA_STATS_STRING_ENABLED
3886  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3887  {
3888  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3889  }
3890  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3891  {
3892  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3893  }
3894  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3895  {
3896  snprintf(outStr, strLen, "%p", ptr);
3897  }
3898 #endif
3899 
3900 #ifndef VMA_MUTEX
3901  class VmaMutex
3902  {
3903  public:
3904  void Lock() { m_Mutex.lock(); }
3905  void Unlock() { m_Mutex.unlock(); }
3906  bool TryLock() { return m_Mutex.try_lock(); }
3907  private:
3908  std::mutex m_Mutex;
3909  };
3910  #define VMA_MUTEX VmaMutex
3911 #endif
3912 
3913 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3914 #ifndef VMA_RW_MUTEX
3915  #if VMA_USE_STL_SHARED_MUTEX
3916  // Use std::shared_mutex from C++17.
3917  #include <shared_mutex>
3918  class VmaRWMutex
3919  {
3920  public:
3921  void LockRead() { m_Mutex.lock_shared(); }
3922  void UnlockRead() { m_Mutex.unlock_shared(); }
3923  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
3924  void LockWrite() { m_Mutex.lock(); }
3925  void UnlockWrite() { m_Mutex.unlock(); }
3926  bool TryLockWrite() { return m_Mutex.try_lock(); }
3927  private:
3928  std::shared_mutex m_Mutex;
3929  };
3930  #define VMA_RW_MUTEX VmaRWMutex
3931  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3932  // Use SRWLOCK from WinAPI.
3933  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3934  class VmaRWMutex
3935  {
3936  public:
3937  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3938  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3939  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3940  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
3941  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3942  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3943  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
3944  private:
3945  SRWLOCK m_Lock;
3946  };
3947  #define VMA_RW_MUTEX VmaRWMutex
3948  #else
3949  // Less efficient fallback: Use normal mutex.
3950  class VmaRWMutex
3951  {
3952  public:
3953  void LockRead() { m_Mutex.Lock(); }
3954  void UnlockRead() { m_Mutex.Unlock(); }
3955  bool TryLockRead() { return m_Mutex.TryLock(); }
3956  void LockWrite() { m_Mutex.Lock(); }
3957  void UnlockWrite() { m_Mutex.Unlock(); }
3958  bool TryLockWrite() { return m_Mutex.TryLock(); }
3959  private:
3960  VMA_MUTEX m_Mutex;
3961  };
3962  #define VMA_RW_MUTEX VmaRWMutex
3963  #endif // #if VMA_USE_STL_SHARED_MUTEX
3964 #endif // #ifndef VMA_RW_MUTEX
3965 
3966 /*
3967 If providing your own implementation, you need to implement a subset of std::atomic.
3968 */
3969 #ifndef VMA_ATOMIC_UINT32
3970  #include <atomic>
3971  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3972 #endif
3973 
3974 #ifndef VMA_ATOMIC_UINT64
3975  #include <atomic>
3976  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3977 #endif
3978 
3979 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3980 
3984  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3985 #endif
3986 
3987 #ifndef VMA_DEBUG_ALIGNMENT
3988 
3992  #define VMA_DEBUG_ALIGNMENT (1)
3993 #endif
3994 
3995 #ifndef VMA_DEBUG_MARGIN
3996 
4000  #define VMA_DEBUG_MARGIN (0)
4001 #endif
4002 
4003 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4004 
4008  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4009 #endif
4010 
4011 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4012 
4017  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4018 #endif
4019 
4020 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4021 
4025  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4026 #endif
4027 
4028 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4029 
4033  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4034 #endif
4035 
4036 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4037  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4039 #endif
4040 
4041 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4042  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4044 #endif
4045 
4046 #ifndef VMA_CLASS_NO_COPY
4047  #define VMA_CLASS_NO_COPY(className) \
4048  private: \
4049  className(const className&) = delete; \
4050  className& operator=(const className&) = delete;
4051 #endif
4052 
4053 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4054 
4055 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4056 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4057 
4058 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4059 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4060 
4061 /*******************************************************************************
4062 END OF CONFIGURATION
4063 */
4064 
4065 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4066 
4067 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4068 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4069 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4070 
4071 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4072 
4073 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4074  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4075 
4076 // Returns number of bits set to 1 in (v).
4077 static inline uint32_t VmaCountBitsSet(uint32_t v)
4078 {
4079  uint32_t c = v - ((v >> 1) & 0x55555555);
4080  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4081  c = ((c >> 4) + c) & 0x0F0F0F0F;
4082  c = ((c >> 8) + c) & 0x00FF00FF;
4083  c = ((c >> 16) + c) & 0x0000FFFF;
4084  return c;
4085 }
4086 
4087 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4088 // Use types like uint32_t, uint64_t as T.
4089 template <typename T>
4090 static inline T VmaAlignUp(T val, T align)
4091 {
4092  return (val + align - 1) / align * align;
4093 }
4094 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4095 // Use types like uint32_t, uint64_t as T.
4096 template <typename T>
4097 static inline T VmaAlignDown(T val, T align)
4098 {
4099  return val / align * align;
4100 }
4101 
4102 // Division with mathematical rounding to nearest number.
4103 template <typename T>
4104 static inline T VmaRoundDiv(T x, T y)
4105 {
4106  return (x + (y / (T)2)) / y;
4107 }
4108 
4109 /*
4110 Returns true if given number is a power of two.
4111 T must be unsigned integer number or signed integer but always nonnegative.
4112 For 0 returns true.
4113 */
4114 template <typename T>
4115 inline bool VmaIsPow2(T x)
4116 {
4117  return (x & (x-1)) == 0;
4118 }
4119 
4120 // Returns smallest power of 2 greater or equal to v.
4121 static inline uint32_t VmaNextPow2(uint32_t v)
4122 {
4123  v--;
4124  v |= v >> 1;
4125  v |= v >> 2;
4126  v |= v >> 4;
4127  v |= v >> 8;
4128  v |= v >> 16;
4129  v++;
4130  return v;
4131 }
4132 static inline uint64_t VmaNextPow2(uint64_t v)
4133 {
4134  v--;
4135  v |= v >> 1;
4136  v |= v >> 2;
4137  v |= v >> 4;
4138  v |= v >> 8;
4139  v |= v >> 16;
4140  v |= v >> 32;
4141  v++;
4142  return v;
4143 }
4144 
4145 // Returns largest power of 2 less or equal to v.
4146 static inline uint32_t VmaPrevPow2(uint32_t v)
4147 {
4148  v |= v >> 1;
4149  v |= v >> 2;
4150  v |= v >> 4;
4151  v |= v >> 8;
4152  v |= v >> 16;
4153  v = v ^ (v >> 1);
4154  return v;
4155 }
4156 static inline uint64_t VmaPrevPow2(uint64_t v)
4157 {
4158  v |= v >> 1;
4159  v |= v >> 2;
4160  v |= v >> 4;
4161  v |= v >> 8;
4162  v |= v >> 16;
4163  v |= v >> 32;
4164  v = v ^ (v >> 1);
4165  return v;
4166 }
4167 
4168 static inline bool VmaStrIsEmpty(const char* pStr)
4169 {
4170  return pStr == VMA_NULL || *pStr == '\0';
4171 }
4172 
4173 #if VMA_STATS_STRING_ENABLED
4174 
4175 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4176 {
4177  switch(algorithm)
4178  {
4180  return "Linear";
4182  return "Buddy";
4183  case 0:
4184  return "Default";
4185  default:
4186  VMA_ASSERT(0);
4187  return "";
4188  }
4189 }
4190 
4191 #endif // #if VMA_STATS_STRING_ENABLED
4192 
4193 #ifndef VMA_SORT
4194 
4195 template<typename Iterator, typename Compare>
4196 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4197 {
4198  Iterator centerValue = end; --centerValue;
4199  Iterator insertIndex = beg;
4200  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4201  {
4202  if(cmp(*memTypeIndex, *centerValue))
4203  {
4204  if(insertIndex != memTypeIndex)
4205  {
4206  VMA_SWAP(*memTypeIndex, *insertIndex);
4207  }
4208  ++insertIndex;
4209  }
4210  }
4211  if(insertIndex != centerValue)
4212  {
4213  VMA_SWAP(*insertIndex, *centerValue);
4214  }
4215  return insertIndex;
4216 }
4217 
4218 template<typename Iterator, typename Compare>
4219 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4220 {
4221  if(beg < end)
4222  {
4223  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4224  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4225  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4226  }
4227 }
4228 
4229 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4230 
4231 #endif // #ifndef VMA_SORT
4232 
4233 /*
4234 Returns true if two memory blocks occupy overlapping pages.
4235 ResourceA must be in less memory offset than ResourceB.
4236 
4237 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4238 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4239 */
4240 static inline bool VmaBlocksOnSamePage(
4241  VkDeviceSize resourceAOffset,
4242  VkDeviceSize resourceASize,
4243  VkDeviceSize resourceBOffset,
4244  VkDeviceSize pageSize)
4245 {
4246  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4247  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4248  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4249  VkDeviceSize resourceBStart = resourceBOffset;
4250  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4251  return resourceAEndPage == resourceBStartPage;
4252 }
4253 
4254 enum VmaSuballocationType
4255 {
4256  VMA_SUBALLOCATION_TYPE_FREE = 0,
4257  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4258  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4259  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4260  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4261  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4262  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4263 };
4264 
4265 /*
4266 Returns true if given suballocation types could conflict and must respect
4267 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4268 or linear image and another one is optimal image. If type is unknown, behave
4269 conservatively.
4270 */
4271 static inline bool VmaIsBufferImageGranularityConflict(
4272  VmaSuballocationType suballocType1,
4273  VmaSuballocationType suballocType2)
4274 {
4275  if(suballocType1 > suballocType2)
4276  {
4277  VMA_SWAP(suballocType1, suballocType2);
4278  }
4279 
4280  switch(suballocType1)
4281  {
4282  case VMA_SUBALLOCATION_TYPE_FREE:
4283  return false;
4284  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4285  return true;
4286  case VMA_SUBALLOCATION_TYPE_BUFFER:
4287  return
4288  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4289  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4290  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4291  return
4292  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4293  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4294  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4295  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4296  return
4297  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4298  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4299  return false;
4300  default:
4301  VMA_ASSERT(0);
4302  return true;
4303  }
4304 }
4305 
4306 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4307 {
4308 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4309  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4310  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4311  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4312  {
4313  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4314  }
4315 #else
4316  // no-op
4317 #endif
4318 }
4319 
4320 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4321 {
4322 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4323  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4324  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4325  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4326  {
4327  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4328  {
4329  return false;
4330  }
4331  }
4332 #endif
4333  return true;
4334 }
4335 
4336 /*
4337 Fills structure with parameters of an example buffer to be used for transfers
4338 during GPU memory defragmentation.
4339 */
4340 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4341 {
4342  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4343  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4344  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4345  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4346 }
4347 
4348 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4349 struct VmaMutexLock
4350 {
4351  VMA_CLASS_NO_COPY(VmaMutexLock)
4352 public:
4353  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4354  m_pMutex(useMutex ? &mutex : VMA_NULL)
4355  { if(m_pMutex) { m_pMutex->Lock(); } }
4356  ~VmaMutexLock()
4357  { if(m_pMutex) { m_pMutex->Unlock(); } }
4358 private:
4359  VMA_MUTEX* m_pMutex;
4360 };
4361 
4362 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4363 struct VmaMutexLockRead
4364 {
4365  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4366 public:
4367  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4368  m_pMutex(useMutex ? &mutex : VMA_NULL)
4369  { if(m_pMutex) { m_pMutex->LockRead(); } }
4370  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4371 private:
4372  VMA_RW_MUTEX* m_pMutex;
4373 };
4374 
4375 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4376 struct VmaMutexLockWrite
4377 {
4378  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4379 public:
4380  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4381  m_pMutex(useMutex ? &mutex : VMA_NULL)
4382  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4383  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4384 private:
4385  VMA_RW_MUTEX* m_pMutex;
4386 };
4387 
4388 #if VMA_DEBUG_GLOBAL_MUTEX
4389  static VMA_MUTEX gDebugGlobalMutex;
4390  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4391 #else
4392  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4393 #endif
4394 
4395 // Minimum size of a free suballocation to register it in the free suballocation collection.
4396 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4397 
4398 /*
4399 Performs binary search and returns iterator to first element that is greater or
4400 equal to (key), according to comparison (cmp).
4401 
4402 Cmp should return true if first argument is less than second argument.
4403 
4404 Returned value is the found element, if present in the collection or place where
4405 new element with value (key) should be inserted.
4406 */
4407 template <typename CmpLess, typename IterT, typename KeyT>
4408 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4409 {
4410  size_t down = 0, up = (end - beg);
4411  while(down < up)
4412  {
4413  const size_t mid = (down + up) / 2;
4414  if(cmp(*(beg+mid), key))
4415  {
4416  down = mid + 1;
4417  }
4418  else
4419  {
4420  up = mid;
4421  }
4422  }
4423  return beg + down;
4424 }
4425 
4426 template<typename CmpLess, typename IterT, typename KeyT>
4427 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4428 {
4429  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4430  beg, end, value, cmp);
4431  if(it == end ||
4432  (!cmp(*it, value) && !cmp(value, *it)))
4433  {
4434  return it;
4435  }
4436  return end;
4437 }
4438 
4439 /*
4440 Returns true if all pointers in the array are not-null and unique.
4441 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4442 T must be pointer type, e.g. VmaAllocation, VmaPool.
4443 */
4444 template<typename T>
4445 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4446 {
4447  for(uint32_t i = 0; i < count; ++i)
4448  {
4449  const T iPtr = arr[i];
4450  if(iPtr == VMA_NULL)
4451  {
4452  return false;
4453  }
4454  for(uint32_t j = i + 1; j < count; ++j)
4455  {
4456  if(iPtr == arr[j])
4457  {
4458  return false;
4459  }
4460  }
4461  }
4462  return true;
4463 }
4464 
4465 template<typename MainT, typename NewT>
4466 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4467 {
4468  newStruct->pNext = mainStruct->pNext;
4469  mainStruct->pNext = newStruct;
4470 }
4471 
4473 // Memory allocation
4474 
4475 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4476 {
4477  if((pAllocationCallbacks != VMA_NULL) &&
4478  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4479  {
4480  return (*pAllocationCallbacks->pfnAllocation)(
4481  pAllocationCallbacks->pUserData,
4482  size,
4483  alignment,
4484  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4485  }
4486  else
4487  {
4488  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4489  }
4490 }
4491 
4492 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4493 {
4494  if((pAllocationCallbacks != VMA_NULL) &&
4495  (pAllocationCallbacks->pfnFree != VMA_NULL))
4496  {
4497  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4498  }
4499  else
4500  {
4501  VMA_SYSTEM_FREE(ptr);
4502  }
4503 }
4504 
4505 template<typename T>
4506 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4507 {
4508  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4509 }
4510 
4511 template<typename T>
4512 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4513 {
4514  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4515 }
4516 
4517 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4518 
4519 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4520 
4521 template<typename T>
4522 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4523 {
4524  ptr->~T();
4525  VmaFree(pAllocationCallbacks, ptr);
4526 }
4527 
4528 template<typename T>
4529 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4530 {
4531  if(ptr != VMA_NULL)
4532  {
4533  for(size_t i = count; i--; )
4534  {
4535  ptr[i].~T();
4536  }
4537  VmaFree(pAllocationCallbacks, ptr);
4538  }
4539 }
4540 
4541 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4542 {
4543  if(srcStr != VMA_NULL)
4544  {
4545  const size_t len = strlen(srcStr);
4546  char* const result = vma_new_array(allocs, char, len + 1);
4547  memcpy(result, srcStr, len + 1);
4548  return result;
4549  }
4550  else
4551  {
4552  return VMA_NULL;
4553  }
4554 }
4555 
4556 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4557 {
4558  if(str != VMA_NULL)
4559  {
4560  const size_t len = strlen(str);
4561  vma_delete_array(allocs, str, len + 1);
4562  }
4563 }
4564 
4565 // STL-compatible allocator.
4566 template<typename T>
4567 class VmaStlAllocator
4568 {
4569 public:
4570  const VkAllocationCallbacks* const m_pCallbacks;
4571  typedef T value_type;
4572 
4573  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4574  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4575 
4576  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4577  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4578 
4579  template<typename U>
4580  bool operator==(const VmaStlAllocator<U>& rhs) const
4581  {
4582  return m_pCallbacks == rhs.m_pCallbacks;
4583  }
4584  template<typename U>
4585  bool operator!=(const VmaStlAllocator<U>& rhs) const
4586  {
4587  return m_pCallbacks != rhs.m_pCallbacks;
4588  }
4589 
4590  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4591 };
4592 
4593 #if VMA_USE_STL_VECTOR
4594 
4595 #define VmaVector std::vector
4596 
4597 template<typename T, typename allocatorT>
4598 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4599 {
4600  vec.insert(vec.begin() + index, item);
4601 }
4602 
4603 template<typename T, typename allocatorT>
4604 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4605 {
4606  vec.erase(vec.begin() + index);
4607 }
4608 
4609 #else // #if VMA_USE_STL_VECTOR
4610 
4611 /* Class with interface compatible with subset of std::vector.
4612 T must be POD because constructors and destructors are not called and memcpy is
4613 used for these objects. */
4614 template<typename T, typename AllocatorT>
4615 class VmaVector
4616 {
4617 public:
4618  typedef T value_type;
4619 
4620  VmaVector(const AllocatorT& allocator) :
4621  m_Allocator(allocator),
4622  m_pArray(VMA_NULL),
4623  m_Count(0),
4624  m_Capacity(0)
4625  {
4626  }
4627 
4628  VmaVector(size_t count, const AllocatorT& allocator) :
4629  m_Allocator(allocator),
4630  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4631  m_Count(count),
4632  m_Capacity(count)
4633  {
4634  }
4635 
4636  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4637  // value is unused.
4638  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4639  : VmaVector(count, allocator) {}
4640 
4641  VmaVector(const VmaVector<T, AllocatorT>& src) :
4642  m_Allocator(src.m_Allocator),
4643  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4644  m_Count(src.m_Count),
4645  m_Capacity(src.m_Count)
4646  {
4647  if(m_Count != 0)
4648  {
4649  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4650  }
4651  }
4652 
4653  ~VmaVector()
4654  {
4655  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4656  }
4657 
4658  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4659  {
4660  if(&rhs != this)
4661  {
4662  resize(rhs.m_Count);
4663  if(m_Count != 0)
4664  {
4665  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4666  }
4667  }
4668  return *this;
4669  }
4670 
4671  bool empty() const { return m_Count == 0; }
4672  size_t size() const { return m_Count; }
4673  T* data() { return m_pArray; }
4674  const T* data() const { return m_pArray; }
4675 
4676  T& operator[](size_t index)
4677  {
4678  VMA_HEAVY_ASSERT(index < m_Count);
4679  return m_pArray[index];
4680  }
4681  const T& operator[](size_t index) const
4682  {
4683  VMA_HEAVY_ASSERT(index < m_Count);
4684  return m_pArray[index];
4685  }
4686 
4687  T& front()
4688  {
4689  VMA_HEAVY_ASSERT(m_Count > 0);
4690  return m_pArray[0];
4691  }
4692  const T& front() const
4693  {
4694  VMA_HEAVY_ASSERT(m_Count > 0);
4695  return m_pArray[0];
4696  }
4697  T& back()
4698  {
4699  VMA_HEAVY_ASSERT(m_Count > 0);
4700  return m_pArray[m_Count - 1];
4701  }
4702  const T& back() const
4703  {
4704  VMA_HEAVY_ASSERT(m_Count > 0);
4705  return m_pArray[m_Count - 1];
4706  }
4707 
4708  void reserve(size_t newCapacity, bool freeMemory = false)
4709  {
4710  newCapacity = VMA_MAX(newCapacity, m_Count);
4711 
4712  if((newCapacity < m_Capacity) && !freeMemory)
4713  {
4714  newCapacity = m_Capacity;
4715  }
4716 
4717  if(newCapacity != m_Capacity)
4718  {
4719  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4720  if(m_Count != 0)
4721  {
4722  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4723  }
4724  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4725  m_Capacity = newCapacity;
4726  m_pArray = newArray;
4727  }
4728  }
4729 
4730  void resize(size_t newCount, bool freeMemory = false)
4731  {
4732  size_t newCapacity = m_Capacity;
4733  if(newCount > m_Capacity)
4734  {
4735  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4736  }
4737  else if(freeMemory)
4738  {
4739  newCapacity = newCount;
4740  }
4741 
4742  if(newCapacity != m_Capacity)
4743  {
4744  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4745  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4746  if(elementsToCopy != 0)
4747  {
4748  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4749  }
4750  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4751  m_Capacity = newCapacity;
4752  m_pArray = newArray;
4753  }
4754 
4755  m_Count = newCount;
4756  }
4757 
4758  void clear(bool freeMemory = false)
4759  {
4760  resize(0, freeMemory);
4761  }
4762 
4763  void insert(size_t index, const T& src)
4764  {
4765  VMA_HEAVY_ASSERT(index <= m_Count);
4766  const size_t oldCount = size();
4767  resize(oldCount + 1);
4768  if(index < oldCount)
4769  {
4770  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4771  }
4772  m_pArray[index] = src;
4773  }
4774 
4775  void remove(size_t index)
4776  {
4777  VMA_HEAVY_ASSERT(index < m_Count);
4778  const size_t oldCount = size();
4779  if(index < oldCount - 1)
4780  {
4781  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4782  }
4783  resize(oldCount - 1);
4784  }
4785 
4786  void push_back(const T& src)
4787  {
4788  const size_t newIndex = size();
4789  resize(newIndex + 1);
4790  m_pArray[newIndex] = src;
4791  }
4792 
4793  void pop_back()
4794  {
4795  VMA_HEAVY_ASSERT(m_Count > 0);
4796  resize(size() - 1);
4797  }
4798 
4799  void push_front(const T& src)
4800  {
4801  insert(0, src);
4802  }
4803 
4804  void pop_front()
4805  {
4806  VMA_HEAVY_ASSERT(m_Count > 0);
4807  remove(0);
4808  }
4809 
4810  typedef T* iterator;
4811 
4812  iterator begin() { return m_pArray; }
4813  iterator end() { return m_pArray + m_Count; }
4814 
4815 private:
4816  AllocatorT m_Allocator;
4817  T* m_pArray;
4818  size_t m_Count;
4819  size_t m_Capacity;
4820 };
4821 
4822 template<typename T, typename allocatorT>
4823 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4824 {
4825  vec.insert(index, item);
4826 }
4827 
4828 template<typename T, typename allocatorT>
4829 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4830 {
4831  vec.remove(index);
4832 }
4833 
4834 #endif // #if VMA_USE_STL_VECTOR
4835 
4836 template<typename CmpLess, typename VectorT>
4837 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4838 {
4839  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4840  vector.data(),
4841  vector.data() + vector.size(),
4842  value,
4843  CmpLess()) - vector.data();
4844  VmaVectorInsert(vector, indexToInsert, value);
4845  return indexToInsert;
4846 }
4847 
4848 template<typename CmpLess, typename VectorT>
4849 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4850 {
4851  CmpLess comparator;
4852  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4853  vector.begin(),
4854  vector.end(),
4855  value,
4856  comparator);
4857  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4858  {
4859  size_t indexToRemove = it - vector.begin();
4860  VmaVectorRemove(vector, indexToRemove);
4861  return true;
4862  }
4863  return false;
4864 }
4865 
4867 // class VmaPoolAllocator
4868 
4869 /*
4870 Allocator for objects of type T using a list of arrays (pools) to speed up
4871 allocation. Number of elements that can be allocated is not bounded because
4872 allocator can create multiple blocks.
4873 */
4874 template<typename T>
4875 class VmaPoolAllocator
4876 {
4877  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4878 public:
4879  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4880  ~VmaPoolAllocator();
4881  template<typename... Types> T* Alloc(Types... args);
4882  void Free(T* ptr);
4883 
4884 private:
4885  union Item
4886  {
4887  uint32_t NextFreeIndex;
4888  alignas(T) char Value[sizeof(T)];
4889  };
4890 
4891  struct ItemBlock
4892  {
4893  Item* pItems;
4894  uint32_t Capacity;
4895  uint32_t FirstFreeIndex;
4896  };
4897 
4898  const VkAllocationCallbacks* m_pAllocationCallbacks;
4899  const uint32_t m_FirstBlockCapacity;
4900  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4901 
4902  ItemBlock& CreateNewBlock();
4903 };
4904 
4905 template<typename T>
4906 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4907  m_pAllocationCallbacks(pAllocationCallbacks),
4908  m_FirstBlockCapacity(firstBlockCapacity),
4909  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4910 {
4911  VMA_ASSERT(m_FirstBlockCapacity > 1);
4912 }
4913 
4914 template<typename T>
4915 VmaPoolAllocator<T>::~VmaPoolAllocator()
4916 {
4917  for(size_t i = m_ItemBlocks.size(); i--; )
4918  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4919  m_ItemBlocks.clear();
4920 }
4921 
4922 template<typename T>
4923 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
4924 {
4925  for(size_t i = m_ItemBlocks.size(); i--; )
4926  {
4927  ItemBlock& block = m_ItemBlocks[i];
4928  // This block has some free items: Use first one.
4929  if(block.FirstFreeIndex != UINT32_MAX)
4930  {
4931  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4932  block.FirstFreeIndex = pItem->NextFreeIndex;
4933  T* result = (T*)&pItem->Value;
4934  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4935  return result;
4936  }
4937  }
4938 
4939  // No block has free item: Create new one and use it.
4940  ItemBlock& newBlock = CreateNewBlock();
4941  Item* const pItem = &newBlock.pItems[0];
4942  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4943  T* result = (T*)&pItem->Value;
4944  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4945  return result;
4946 }
4947 
4948 template<typename T>
4949 void VmaPoolAllocator<T>::Free(T* ptr)
4950 {
4951  // Search all memory blocks to find ptr.
4952  for(size_t i = m_ItemBlocks.size(); i--; )
4953  {
4954  ItemBlock& block = m_ItemBlocks[i];
4955 
4956  // Casting to union.
4957  Item* pItemPtr;
4958  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4959 
4960  // Check if pItemPtr is in address range of this block.
4961  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4962  {
4963  ptr->~T(); // Explicit destructor call.
4964  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4965  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4966  block.FirstFreeIndex = index;
4967  return;
4968  }
4969  }
4970  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4971 }
4972 
4973 template<typename T>
4974 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4975 {
4976  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4977  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4978 
4979  const ItemBlock newBlock = {
4980  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4981  newBlockCapacity,
4982  0 };
4983 
4984  m_ItemBlocks.push_back(newBlock);
4985 
4986  // Setup singly-linked list of all free items in this block.
4987  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4988  newBlock.pItems[i].NextFreeIndex = i + 1;
4989  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4990  return m_ItemBlocks.back();
4991 }
4992 
4994 // class VmaRawList, VmaList
4995 
4996 #if VMA_USE_STL_LIST
4997 
4998 #define VmaList std::list
4999 
5000 #else // #if VMA_USE_STL_LIST
5001 
5002 template<typename T>
5003 struct VmaListItem
5004 {
5005  VmaListItem* pPrev;
5006  VmaListItem* pNext;
5007  T Value;
5008 };
5009 
5010 // Doubly linked list.
5011 template<typename T>
5012 class VmaRawList
5013 {
5014  VMA_CLASS_NO_COPY(VmaRawList)
5015 public:
5016  typedef VmaListItem<T> ItemType;
5017 
5018  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5019  ~VmaRawList();
5020  void Clear();
5021 
5022  size_t GetCount() const { return m_Count; }
5023  bool IsEmpty() const { return m_Count == 0; }
5024 
5025  ItemType* Front() { return m_pFront; }
5026  const ItemType* Front() const { return m_pFront; }
5027  ItemType* Back() { return m_pBack; }
5028  const ItemType* Back() const { return m_pBack; }
5029 
5030  ItemType* PushBack();
5031  ItemType* PushFront();
5032  ItemType* PushBack(const T& value);
5033  ItemType* PushFront(const T& value);
5034  void PopBack();
5035  void PopFront();
5036 
5037  // Item can be null - it means PushBack.
5038  ItemType* InsertBefore(ItemType* pItem);
5039  // Item can be null - it means PushFront.
5040  ItemType* InsertAfter(ItemType* pItem);
5041 
5042  ItemType* InsertBefore(ItemType* pItem, const T& value);
5043  ItemType* InsertAfter(ItemType* pItem, const T& value);
5044 
5045  void Remove(ItemType* pItem);
5046 
5047 private:
5048  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5049  VmaPoolAllocator<ItemType> m_ItemAllocator;
5050  ItemType* m_pFront;
5051  ItemType* m_pBack;
5052  size_t m_Count;
5053 };
5054 
5055 template<typename T>
5056 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5057  m_pAllocationCallbacks(pAllocationCallbacks),
5058  m_ItemAllocator(pAllocationCallbacks, 128),
5059  m_pFront(VMA_NULL),
5060  m_pBack(VMA_NULL),
5061  m_Count(0)
5062 {
5063 }
5064 
5065 template<typename T>
5066 VmaRawList<T>::~VmaRawList()
5067 {
5068  // Intentionally not calling Clear, because that would be unnecessary
5069  // computations to return all items to m_ItemAllocator as free.
5070 }
5071 
5072 template<typename T>
5073 void VmaRawList<T>::Clear()
5074 {
5075  if(IsEmpty() == false)
5076  {
5077  ItemType* pItem = m_pBack;
5078  while(pItem != VMA_NULL)
5079  {
5080  ItemType* const pPrevItem = pItem->pPrev;
5081  m_ItemAllocator.Free(pItem);
5082  pItem = pPrevItem;
5083  }
5084  m_pFront = VMA_NULL;
5085  m_pBack = VMA_NULL;
5086  m_Count = 0;
5087  }
5088 }
5089 
5090 template<typename T>
5091 VmaListItem<T>* VmaRawList<T>::PushBack()
5092 {
5093  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5094  pNewItem->pNext = VMA_NULL;
5095  if(IsEmpty())
5096  {
5097  pNewItem->pPrev = VMA_NULL;
5098  m_pFront = pNewItem;
5099  m_pBack = pNewItem;
5100  m_Count = 1;
5101  }
5102  else
5103  {
5104  pNewItem->pPrev = m_pBack;
5105  m_pBack->pNext = pNewItem;
5106  m_pBack = pNewItem;
5107  ++m_Count;
5108  }
5109  return pNewItem;
5110 }
5111 
5112 template<typename T>
5113 VmaListItem<T>* VmaRawList<T>::PushFront()
5114 {
5115  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5116  pNewItem->pPrev = VMA_NULL;
5117  if(IsEmpty())
5118  {
5119  pNewItem->pNext = VMA_NULL;
5120  m_pFront = pNewItem;
5121  m_pBack = pNewItem;
5122  m_Count = 1;
5123  }
5124  else
5125  {
5126  pNewItem->pNext = m_pFront;
5127  m_pFront->pPrev = pNewItem;
5128  m_pFront = pNewItem;
5129  ++m_Count;
5130  }
5131  return pNewItem;
5132 }
5133 
5134 template<typename T>
5135 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5136 {
5137  ItemType* const pNewItem = PushBack();
5138  pNewItem->Value = value;
5139  return pNewItem;
5140 }
5141 
5142 template<typename T>
5143 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5144 {
5145  ItemType* const pNewItem = PushFront();
5146  pNewItem->Value = value;
5147  return pNewItem;
5148 }
5149 
5150 template<typename T>
5151 void VmaRawList<T>::PopBack()
5152 {
5153  VMA_HEAVY_ASSERT(m_Count > 0);
5154  ItemType* const pBackItem = m_pBack;
5155  ItemType* const pPrevItem = pBackItem->pPrev;
5156  if(pPrevItem != VMA_NULL)
5157  {
5158  pPrevItem->pNext = VMA_NULL;
5159  }
5160  m_pBack = pPrevItem;
5161  m_ItemAllocator.Free(pBackItem);
5162  --m_Count;
5163 }
5164 
5165 template<typename T>
5166 void VmaRawList<T>::PopFront()
5167 {
5168  VMA_HEAVY_ASSERT(m_Count > 0);
5169  ItemType* const pFrontItem = m_pFront;
5170  ItemType* const pNextItem = pFrontItem->pNext;
5171  if(pNextItem != VMA_NULL)
5172  {
5173  pNextItem->pPrev = VMA_NULL;
5174  }
5175  m_pFront = pNextItem;
5176  m_ItemAllocator.Free(pFrontItem);
5177  --m_Count;
5178 }
5179 
5180 template<typename T>
5181 void VmaRawList<T>::Remove(ItemType* pItem)
5182 {
5183  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5184  VMA_HEAVY_ASSERT(m_Count > 0);
5185 
5186  if(pItem->pPrev != VMA_NULL)
5187  {
5188  pItem->pPrev->pNext = pItem->pNext;
5189  }
5190  else
5191  {
5192  VMA_HEAVY_ASSERT(m_pFront == pItem);
5193  m_pFront = pItem->pNext;
5194  }
5195 
5196  if(pItem->pNext != VMA_NULL)
5197  {
5198  pItem->pNext->pPrev = pItem->pPrev;
5199  }
5200  else
5201  {
5202  VMA_HEAVY_ASSERT(m_pBack == pItem);
5203  m_pBack = pItem->pPrev;
5204  }
5205 
5206  m_ItemAllocator.Free(pItem);
5207  --m_Count;
5208 }
5209 
5210 template<typename T>
5211 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5212 {
5213  if(pItem != VMA_NULL)
5214  {
5215  ItemType* const prevItem = pItem->pPrev;
5216  ItemType* const newItem = m_ItemAllocator.Alloc();
5217  newItem->pPrev = prevItem;
5218  newItem->pNext = pItem;
5219  pItem->pPrev = newItem;
5220  if(prevItem != VMA_NULL)
5221  {
5222  prevItem->pNext = newItem;
5223  }
5224  else
5225  {
5226  VMA_HEAVY_ASSERT(m_pFront == pItem);
5227  m_pFront = newItem;
5228  }
5229  ++m_Count;
5230  return newItem;
5231  }
5232  else
5233  return PushBack();
5234 }
5235 
5236 template<typename T>
5237 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5238 {
5239  if(pItem != VMA_NULL)
5240  {
5241  ItemType* const nextItem = pItem->pNext;
5242  ItemType* const newItem = m_ItemAllocator.Alloc();
5243  newItem->pNext = nextItem;
5244  newItem->pPrev = pItem;
5245  pItem->pNext = newItem;
5246  if(nextItem != VMA_NULL)
5247  {
5248  nextItem->pPrev = newItem;
5249  }
5250  else
5251  {
5252  VMA_HEAVY_ASSERT(m_pBack == pItem);
5253  m_pBack = newItem;
5254  }
5255  ++m_Count;
5256  return newItem;
5257  }
5258  else
5259  return PushFront();
5260 }
5261 
5262 template<typename T>
5263 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5264 {
5265  ItemType* const newItem = InsertBefore(pItem);
5266  newItem->Value = value;
5267  return newItem;
5268 }
5269 
5270 template<typename T>
5271 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5272 {
5273  ItemType* const newItem = InsertAfter(pItem);
5274  newItem->Value = value;
5275  return newItem;
5276 }
5277 
5278 template<typename T, typename AllocatorT>
5279 class VmaList
5280 {
5281  VMA_CLASS_NO_COPY(VmaList)
5282 public:
5283  class iterator
5284  {
5285  public:
5286  iterator() :
5287  m_pList(VMA_NULL),
5288  m_pItem(VMA_NULL)
5289  {
5290  }
5291 
5292  T& operator*() const
5293  {
5294  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5295  return m_pItem->Value;
5296  }
5297  T* operator->() const
5298  {
5299  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5300  return &m_pItem->Value;
5301  }
5302 
5303  iterator& operator++()
5304  {
5305  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5306  m_pItem = m_pItem->pNext;
5307  return *this;
5308  }
5309  iterator& operator--()
5310  {
5311  if(m_pItem != VMA_NULL)
5312  {
5313  m_pItem = m_pItem->pPrev;
5314  }
5315  else
5316  {
5317  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5318  m_pItem = m_pList->Back();
5319  }
5320  return *this;
5321  }
5322 
5323  iterator operator++(int)
5324  {
5325  iterator result = *this;
5326  ++*this;
5327  return result;
5328  }
5329  iterator operator--(int)
5330  {
5331  iterator result = *this;
5332  --*this;
5333  return result;
5334  }
5335 
5336  bool operator==(const iterator& rhs) const
5337  {
5338  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5339  return m_pItem == rhs.m_pItem;
5340  }
5341  bool operator!=(const iterator& rhs) const
5342  {
5343  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5344  return m_pItem != rhs.m_pItem;
5345  }
5346 
5347  private:
5348  VmaRawList<T>* m_pList;
5349  VmaListItem<T>* m_pItem;
5350 
5351  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5352  m_pList(pList),
5353  m_pItem(pItem)
5354  {
5355  }
5356 
5357  friend class VmaList<T, AllocatorT>;
5358  };
5359 
5360  class const_iterator
5361  {
5362  public:
5363  const_iterator() :
5364  m_pList(VMA_NULL),
5365  m_pItem(VMA_NULL)
5366  {
5367  }
5368 
5369  const_iterator(const iterator& src) :
5370  m_pList(src.m_pList),
5371  m_pItem(src.m_pItem)
5372  {
5373  }
5374 
5375  const T& operator*() const
5376  {
5377  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5378  return m_pItem->Value;
5379  }
5380  const T* operator->() const
5381  {
5382  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5383  return &m_pItem->Value;
5384  }
5385 
5386  const_iterator& operator++()
5387  {
5388  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5389  m_pItem = m_pItem->pNext;
5390  return *this;
5391  }
5392  const_iterator& operator--()
5393  {
5394  if(m_pItem != VMA_NULL)
5395  {
5396  m_pItem = m_pItem->pPrev;
5397  }
5398  else
5399  {
5400  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5401  m_pItem = m_pList->Back();
5402  }
5403  return *this;
5404  }
5405 
5406  const_iterator operator++(int)
5407  {
5408  const_iterator result = *this;
5409  ++*this;
5410  return result;
5411  }
5412  const_iterator operator--(int)
5413  {
5414  const_iterator result = *this;
5415  --*this;
5416  return result;
5417  }
5418 
5419  bool operator==(const const_iterator& rhs) const
5420  {
5421  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5422  return m_pItem == rhs.m_pItem;
5423  }
5424  bool operator!=(const const_iterator& rhs) const
5425  {
5426  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5427  return m_pItem != rhs.m_pItem;
5428  }
5429 
5430  private:
5431  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5432  m_pList(pList),
5433  m_pItem(pItem)
5434  {
5435  }
5436 
5437  const VmaRawList<T>* m_pList;
5438  const VmaListItem<T>* m_pItem;
5439 
5440  friend class VmaList<T, AllocatorT>;
5441  };
5442 
5443  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5444 
5445  bool empty() const { return m_RawList.IsEmpty(); }
5446  size_t size() const { return m_RawList.GetCount(); }
5447 
5448  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5449  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5450 
5451  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5452  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5453 
5454  void clear() { m_RawList.Clear(); }
5455  void push_back(const T& value) { m_RawList.PushBack(value); }
5456  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5457  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5458 
5459 private:
5460  VmaRawList<T> m_RawList;
5461 };
5462 
5463 #endif // #if VMA_USE_STL_LIST
5464 
5466 // class VmaMap
5467 
5468 // Unused in this version.
5469 #if 0
5470 
5471 #if VMA_USE_STL_UNORDERED_MAP
5472 
5473 #define VmaPair std::pair
5474 
5475 #define VMA_MAP_TYPE(KeyT, ValueT) \
5476  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5477 
5478 #else // #if VMA_USE_STL_UNORDERED_MAP
5479 
5480 template<typename T1, typename T2>
5481 struct VmaPair
5482 {
5483  T1 first;
5484  T2 second;
5485 
5486  VmaPair() : first(), second() { }
5487  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5488 };
5489 
5490 /* Class compatible with subset of interface of std::unordered_map.
5491 KeyT, ValueT must be POD because they will be stored in VmaVector.
5492 */
5493 template<typename KeyT, typename ValueT>
5494 class VmaMap
5495 {
5496 public:
5497  typedef VmaPair<KeyT, ValueT> PairType;
5498  typedef PairType* iterator;
5499 
5500  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5501 
5502  iterator begin() { return m_Vector.begin(); }
5503  iterator end() { return m_Vector.end(); }
5504 
5505  void insert(const PairType& pair);
5506  iterator find(const KeyT& key);
5507  void erase(iterator it);
5508 
5509 private:
5510  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5511 };
5512 
5513 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5514 
5515 template<typename FirstT, typename SecondT>
5516 struct VmaPairFirstLess
5517 {
5518  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5519  {
5520  return lhs.first < rhs.first;
5521  }
5522  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5523  {
5524  return lhs.first < rhsFirst;
5525  }
5526 };
5527 
5528 template<typename KeyT, typename ValueT>
5529 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5530 {
5531  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5532  m_Vector.data(),
5533  m_Vector.data() + m_Vector.size(),
5534  pair,
5535  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5536  VmaVectorInsert(m_Vector, indexToInsert, pair);
5537 }
5538 
5539 template<typename KeyT, typename ValueT>
5540 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5541 {
5542  PairType* it = VmaBinaryFindFirstNotLess(
5543  m_Vector.data(),
5544  m_Vector.data() + m_Vector.size(),
5545  key,
5546  VmaPairFirstLess<KeyT, ValueT>());
5547  if((it != m_Vector.end()) && (it->first == key))
5548  {
5549  return it;
5550  }
5551  else
5552  {
5553  return m_Vector.end();
5554  }
5555 }
5556 
5557 template<typename KeyT, typename ValueT>
5558 void VmaMap<KeyT, ValueT>::erase(iterator it)
5559 {
5560  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5561 }
5562 
5563 #endif // #if VMA_USE_STL_UNORDERED_MAP
5564 
5565 #endif // #if 0
5566 
5568 
5569 class VmaDeviceMemoryBlock;
5570 
5571 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5572 
5573 struct VmaAllocation_T
5574 {
5575 private:
5576  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5577 
5578  enum FLAGS
5579  {
5580  FLAG_USER_DATA_STRING = 0x01,
5581  };
5582 
5583 public:
5584  enum ALLOCATION_TYPE
5585  {
5586  ALLOCATION_TYPE_NONE,
5587  ALLOCATION_TYPE_BLOCK,
5588  ALLOCATION_TYPE_DEDICATED,
5589  };
5590 
5591  /*
5592  This struct is allocated using VmaPoolAllocator.
5593  */
5594 
5595  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5596  m_Alignment{1},
5597  m_Size{0},
5598  m_pUserData{VMA_NULL},
5599  m_LastUseFrameIndex{currentFrameIndex},
5600  m_MemoryTypeIndex{0},
5601  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5602  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5603  m_MapCount{0},
5604  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5605  {
5606 #if VMA_STATS_STRING_ENABLED
5607  m_CreationFrameIndex = currentFrameIndex;
5608  m_BufferImageUsage = 0;
5609 #endif
5610  }
5611 
5612  ~VmaAllocation_T()
5613  {
5614  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5615 
5616  // Check if owned string was freed.
5617  VMA_ASSERT(m_pUserData == VMA_NULL);
5618  }
5619 
5620  void InitBlockAllocation(
5621  VmaDeviceMemoryBlock* block,
5622  VkDeviceSize offset,
5623  VkDeviceSize alignment,
5624  VkDeviceSize size,
5625  uint32_t memoryTypeIndex,
5626  VmaSuballocationType suballocationType,
5627  bool mapped,
5628  bool canBecomeLost)
5629  {
5630  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5631  VMA_ASSERT(block != VMA_NULL);
5632  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5633  m_Alignment = alignment;
5634  m_Size = size;
5635  m_MemoryTypeIndex = memoryTypeIndex;
5636  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5637  m_SuballocationType = (uint8_t)suballocationType;
5638  m_BlockAllocation.m_Block = block;
5639  m_BlockAllocation.m_Offset = offset;
5640  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5641  }
5642 
5643  void InitLost()
5644  {
5645  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5646  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5647  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5648  m_MemoryTypeIndex = 0;
5649  m_BlockAllocation.m_Block = VMA_NULL;
5650  m_BlockAllocation.m_Offset = 0;
5651  m_BlockAllocation.m_CanBecomeLost = true;
5652  }
5653 
5654  void ChangeBlockAllocation(
5655  VmaAllocator hAllocator,
5656  VmaDeviceMemoryBlock* block,
5657  VkDeviceSize offset);
5658 
5659  void ChangeOffset(VkDeviceSize newOffset);
5660 
5661  // pMappedData not null means allocation is created with MAPPED flag.
5662  void InitDedicatedAllocation(
5663  uint32_t memoryTypeIndex,
5664  VkDeviceMemory hMemory,
5665  VmaSuballocationType suballocationType,
5666  void* pMappedData,
5667  VkDeviceSize size)
5668  {
5669  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5670  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5671  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5672  m_Alignment = 0;
5673  m_Size = size;
5674  m_MemoryTypeIndex = memoryTypeIndex;
5675  m_SuballocationType = (uint8_t)suballocationType;
5676  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5677  m_DedicatedAllocation.m_hMemory = hMemory;
5678  m_DedicatedAllocation.m_pMappedData = pMappedData;
5679  }
5680 
5681  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5682  VkDeviceSize GetAlignment() const { return m_Alignment; }
5683  VkDeviceSize GetSize() const { return m_Size; }
5684  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5685  void* GetUserData() const { return m_pUserData; }
5686  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5687  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5688 
5689  VmaDeviceMemoryBlock* GetBlock() const
5690  {
5691  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5692  return m_BlockAllocation.m_Block;
5693  }
5694  VkDeviceSize GetOffset() const;
5695  VkDeviceMemory GetMemory() const;
5696  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5697  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5698  void* GetMappedData() const;
5699  bool CanBecomeLost() const;
5700 
5701  uint32_t GetLastUseFrameIndex() const
5702  {
5703  return m_LastUseFrameIndex.load();
5704  }
5705  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5706  {
5707  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5708  }
5709  /*
5710  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5711  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5712  - Else, returns false.
5713 
5714  If hAllocation is already lost, assert - you should not call it then.
5715  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5716  */
5717  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5718 
5719  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5720  {
5721  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5722  outInfo.blockCount = 1;
5723  outInfo.allocationCount = 1;
5724  outInfo.unusedRangeCount = 0;
5725  outInfo.usedBytes = m_Size;
5726  outInfo.unusedBytes = 0;
5727  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5728  outInfo.unusedRangeSizeMin = UINT64_MAX;
5729  outInfo.unusedRangeSizeMax = 0;
5730  }
5731 
5732  void BlockAllocMap();
5733  void BlockAllocUnmap();
5734  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5735  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5736 
5737 #if VMA_STATS_STRING_ENABLED
5738  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5739  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5740 
5741  void InitBufferImageUsage(uint32_t bufferImageUsage)
5742  {
5743  VMA_ASSERT(m_BufferImageUsage == 0);
5744  m_BufferImageUsage = bufferImageUsage;
5745  }
5746 
5747  void PrintParameters(class VmaJsonWriter& json) const;
5748 #endif
5749 
5750 private:
5751  VkDeviceSize m_Alignment;
5752  VkDeviceSize m_Size;
5753  void* m_pUserData;
5754  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5755  uint32_t m_MemoryTypeIndex;
5756  uint8_t m_Type; // ALLOCATION_TYPE
5757  uint8_t m_SuballocationType; // VmaSuballocationType
5758  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5759  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5760  uint8_t m_MapCount;
5761  uint8_t m_Flags; // enum FLAGS
5762 
5763  // Allocation out of VmaDeviceMemoryBlock.
5764  struct BlockAllocation
5765  {
5766  VmaDeviceMemoryBlock* m_Block;
5767  VkDeviceSize m_Offset;
5768  bool m_CanBecomeLost;
5769  };
5770 
5771  // Allocation for an object that has its own private VkDeviceMemory.
5772  struct DedicatedAllocation
5773  {
5774  VkDeviceMemory m_hMemory;
5775  void* m_pMappedData; // Not null means memory is mapped.
5776  };
5777 
5778  union
5779  {
5780  // Allocation out of VmaDeviceMemoryBlock.
5781  BlockAllocation m_BlockAllocation;
5782  // Allocation for an object that has its own private VkDeviceMemory.
5783  DedicatedAllocation m_DedicatedAllocation;
5784  };
5785 
5786 #if VMA_STATS_STRING_ENABLED
5787  uint32_t m_CreationFrameIndex;
5788  uint32_t m_BufferImageUsage; // 0 if unknown.
5789 #endif
5790 
5791  void FreeUserDataString(VmaAllocator hAllocator);
5792 };
5793 
5794 /*
5795 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5796 allocated memory block or free.
5797 */
5798 struct VmaSuballocation
5799 {
5800  VkDeviceSize offset;
5801  VkDeviceSize size;
5802  VmaAllocation hAllocation;
5803  VmaSuballocationType type;
5804 };
5805 
5806 // Comparator for offsets.
5807 struct VmaSuballocationOffsetLess
5808 {
5809  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5810  {
5811  return lhs.offset < rhs.offset;
5812  }
5813 };
5814 struct VmaSuballocationOffsetGreater
5815 {
5816  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5817  {
5818  return lhs.offset > rhs.offset;
5819  }
5820 };
5821 
5822 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5823 
5824 // Cost of one additional allocation lost, as equivalent in bytes.
5825 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5826 
5827 enum class VmaAllocationRequestType
5828 {
5829  Normal,
5830  // Used by "Linear" algorithm.
5831  UpperAddress,
5832  EndOf1st,
5833  EndOf2nd,
5834 };
5835 
5836 /*
5837 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5838 
5839 If canMakeOtherLost was false:
5840 - item points to a FREE suballocation.
5841 - itemsToMakeLostCount is 0.
5842 
5843 If canMakeOtherLost was true:
5844 - item points to first of sequence of suballocations, which are either FREE,
5845  or point to VmaAllocations that can become lost.
5846 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5847  the requested allocation to succeed.
5848 */
5849 struct VmaAllocationRequest
5850 {
5851  VkDeviceSize offset;
5852  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5853  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5854  VmaSuballocationList::iterator item;
5855  size_t itemsToMakeLostCount;
5856  void* customData;
5857  VmaAllocationRequestType type;
5858 
5859  VkDeviceSize CalcCost() const
5860  {
5861  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5862  }
5863 };
5864 
5865 /*
5866 Data structure used for bookkeeping of allocations and unused ranges of memory
5867 in a single VkDeviceMemory block.
5868 */
5869 class VmaBlockMetadata
5870 {
5871 public:
5872  VmaBlockMetadata(VmaAllocator hAllocator);
5873  virtual ~VmaBlockMetadata() { }
5874  virtual void Init(VkDeviceSize size) { m_Size = size; }
5875 
5876  // Validates all data structures inside this object. If not valid, returns false.
5877  virtual bool Validate() const = 0;
5878  VkDeviceSize GetSize() const { return m_Size; }
5879  virtual size_t GetAllocationCount() const = 0;
5880  virtual VkDeviceSize GetSumFreeSize() const = 0;
5881  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5882  // Returns true if this block is empty - contains only single free suballocation.
5883  virtual bool IsEmpty() const = 0;
5884 
5885  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5886  // Shouldn't modify blockCount.
5887  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5888 
5889 #if VMA_STATS_STRING_ENABLED
5890  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5891 #endif
5892 
5893  // Tries to find a place for suballocation with given parameters inside this block.
5894  // If succeeded, fills pAllocationRequest and returns true.
5895  // If failed, returns false.
5896  virtual bool CreateAllocationRequest(
5897  uint32_t currentFrameIndex,
5898  uint32_t frameInUseCount,
5899  VkDeviceSize bufferImageGranularity,
5900  VkDeviceSize allocSize,
5901  VkDeviceSize allocAlignment,
5902  bool upperAddress,
5903  VmaSuballocationType allocType,
5904  bool canMakeOtherLost,
5905  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5906  uint32_t strategy,
5907  VmaAllocationRequest* pAllocationRequest) = 0;
5908 
5909  virtual bool MakeRequestedAllocationsLost(
5910  uint32_t currentFrameIndex,
5911  uint32_t frameInUseCount,
5912  VmaAllocationRequest* pAllocationRequest) = 0;
5913 
5914  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5915 
5916  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5917 
5918  // Makes actual allocation based on request. Request must already be checked and valid.
5919  virtual void Alloc(
5920  const VmaAllocationRequest& request,
5921  VmaSuballocationType type,
5922  VkDeviceSize allocSize,
5923  VmaAllocation hAllocation) = 0;
5924 
5925  // Frees suballocation assigned to given memory region.
5926  virtual void Free(const VmaAllocation allocation) = 0;
5927  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5928 
5929 protected:
5930  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5931 
5932 #if VMA_STATS_STRING_ENABLED
5933  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5934  VkDeviceSize unusedBytes,
5935  size_t allocationCount,
5936  size_t unusedRangeCount) const;
5937  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5938  VkDeviceSize offset,
5939  VmaAllocation hAllocation) const;
5940  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5941  VkDeviceSize offset,
5942  VkDeviceSize size) const;
5943  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5944 #endif
5945 
5946 private:
5947  VkDeviceSize m_Size;
5948  const VkAllocationCallbacks* m_pAllocationCallbacks;
5949 };
5950 
5951 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5952  VMA_ASSERT(0 && "Validation failed: " #cond); \
5953  return false; \
5954  } } while(false)
5955 
5956 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5957 {
5958  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5959 public:
5960  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5961  virtual ~VmaBlockMetadata_Generic();
5962  virtual void Init(VkDeviceSize size);
5963 
5964  virtual bool Validate() const;
5965  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5966  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5967  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5968  virtual bool IsEmpty() const;
5969 
5970  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5971  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5972 
5973 #if VMA_STATS_STRING_ENABLED
5974  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5975 #endif
5976 
5977  virtual bool CreateAllocationRequest(
5978  uint32_t currentFrameIndex,
5979  uint32_t frameInUseCount,
5980  VkDeviceSize bufferImageGranularity,
5981  VkDeviceSize allocSize,
5982  VkDeviceSize allocAlignment,
5983  bool upperAddress,
5984  VmaSuballocationType allocType,
5985  bool canMakeOtherLost,
5986  uint32_t strategy,
5987  VmaAllocationRequest* pAllocationRequest);
5988 
5989  virtual bool MakeRequestedAllocationsLost(
5990  uint32_t currentFrameIndex,
5991  uint32_t frameInUseCount,
5992  VmaAllocationRequest* pAllocationRequest);
5993 
5994  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5995 
5996  virtual VkResult CheckCorruption(const void* pBlockData);
5997 
5998  virtual void Alloc(
5999  const VmaAllocationRequest& request,
6000  VmaSuballocationType type,
6001  VkDeviceSize allocSize,
6002  VmaAllocation hAllocation);
6003 
6004  virtual void Free(const VmaAllocation allocation);
6005  virtual void FreeAtOffset(VkDeviceSize offset);
6006 
6008  // For defragmentation
6009 
6010  bool IsBufferImageGranularityConflictPossible(
6011  VkDeviceSize bufferImageGranularity,
6012  VmaSuballocationType& inOutPrevSuballocType) const;
6013 
6014 private:
6015  friend class VmaDefragmentationAlgorithm_Generic;
6016  friend class VmaDefragmentationAlgorithm_Fast;
6017 
6018  uint32_t m_FreeCount;
6019  VkDeviceSize m_SumFreeSize;
6020  VmaSuballocationList m_Suballocations;
6021  // Suballocations that are free and have size greater than certain threshold.
6022  // Sorted by size, ascending.
6023  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6024 
6025  bool ValidateFreeSuballocationList() const;
6026 
6027  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6028  // If yes, fills pOffset and returns true. If no, returns false.
6029  bool CheckAllocation(
6030  uint32_t currentFrameIndex,
6031  uint32_t frameInUseCount,
6032  VkDeviceSize bufferImageGranularity,
6033  VkDeviceSize allocSize,
6034  VkDeviceSize allocAlignment,
6035  VmaSuballocationType allocType,
6036  VmaSuballocationList::const_iterator suballocItem,
6037  bool canMakeOtherLost,
6038  VkDeviceSize* pOffset,
6039  size_t* itemsToMakeLostCount,
6040  VkDeviceSize* pSumFreeSize,
6041  VkDeviceSize* pSumItemSize) const;
6042  // Given free suballocation, it merges it with following one, which must also be free.
6043  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6044  // Releases given suballocation, making it free.
6045  // Merges it with adjacent free suballocations if applicable.
6046  // Returns iterator to new free suballocation at this place.
6047  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6048  // Given free suballocation, it inserts it into sorted list of
6049  // m_FreeSuballocationsBySize if it's suitable.
6050  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6051  // Given free suballocation, it removes it from sorted list of
6052  // m_FreeSuballocationsBySize if it's suitable.
6053  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6054 };
6055 
6056 /*
6057 Allocations and their references in internal data structure look like this:
6058 
6059 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6060 
6061  0 +-------+
6062  | |
6063  | |
6064  | |
6065  +-------+
6066  | Alloc | 1st[m_1stNullItemsBeginCount]
6067  +-------+
6068  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6069  +-------+
6070  | ... |
6071  +-------+
6072  | Alloc | 1st[1st.size() - 1]
6073  +-------+
6074  | |
6075  | |
6076  | |
6077 GetSize() +-------+
6078 
6079 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6080 
6081  0 +-------+
6082  | Alloc | 2nd[0]
6083  +-------+
6084  | Alloc | 2nd[1]
6085  +-------+
6086  | ... |
6087  +-------+
6088  | Alloc | 2nd[2nd.size() - 1]
6089  +-------+
6090  | |
6091  | |
6092  | |
6093  +-------+
6094  | Alloc | 1st[m_1stNullItemsBeginCount]
6095  +-------+
6096  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6097  +-------+
6098  | ... |
6099  +-------+
6100  | Alloc | 1st[1st.size() - 1]
6101  +-------+
6102  | |
6103 GetSize() +-------+
6104 
6105 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6106 
6107  0 +-------+
6108  | |
6109  | |
6110  | |
6111  +-------+
6112  | Alloc | 1st[m_1stNullItemsBeginCount]
6113  +-------+
6114  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6115  +-------+
6116  | ... |
6117  +-------+
6118  | Alloc | 1st[1st.size() - 1]
6119  +-------+
6120  | |
6121  | |
6122  | |
6123  +-------+
6124  | Alloc | 2nd[2nd.size() - 1]
6125  +-------+
6126  | ... |
6127  +-------+
6128  | Alloc | 2nd[1]
6129  +-------+
6130  | Alloc | 2nd[0]
6131 GetSize() +-------+
6132 
6133 */
6134 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6135 {
6136  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6137 public:
6138  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6139  virtual ~VmaBlockMetadata_Linear();
6140  virtual void Init(VkDeviceSize size);
6141 
6142  virtual bool Validate() const;
6143  virtual size_t GetAllocationCount() const;
6144  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6145  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6146  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6147 
6148  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6149  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6150 
6151 #if VMA_STATS_STRING_ENABLED
6152  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6153 #endif
6154 
6155  virtual bool CreateAllocationRequest(
6156  uint32_t currentFrameIndex,
6157  uint32_t frameInUseCount,
6158  VkDeviceSize bufferImageGranularity,
6159  VkDeviceSize allocSize,
6160  VkDeviceSize allocAlignment,
6161  bool upperAddress,
6162  VmaSuballocationType allocType,
6163  bool canMakeOtherLost,
6164  uint32_t strategy,
6165  VmaAllocationRequest* pAllocationRequest);
6166 
6167  virtual bool MakeRequestedAllocationsLost(
6168  uint32_t currentFrameIndex,
6169  uint32_t frameInUseCount,
6170  VmaAllocationRequest* pAllocationRequest);
6171 
6172  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6173 
6174  virtual VkResult CheckCorruption(const void* pBlockData);
6175 
6176  virtual void Alloc(
6177  const VmaAllocationRequest& request,
6178  VmaSuballocationType type,
6179  VkDeviceSize allocSize,
6180  VmaAllocation hAllocation);
6181 
6182  virtual void Free(const VmaAllocation allocation);
6183  virtual void FreeAtOffset(VkDeviceSize offset);
6184 
6185 private:
6186  /*
6187  There are two suballocation vectors, used in ping-pong way.
6188  The one with index m_1stVectorIndex is called 1st.
6189  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6190  2nd can be non-empty only when 1st is not empty.
6191  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6192  */
6193  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6194 
6195  enum SECOND_VECTOR_MODE
6196  {
6197  SECOND_VECTOR_EMPTY,
6198  /*
6199  Suballocations in 2nd vector are created later than the ones in 1st, but they
6200  all have smaller offset.
6201  */
6202  SECOND_VECTOR_RING_BUFFER,
6203  /*
6204  Suballocations in 2nd vector are upper side of double stack.
6205  They all have offsets higher than those in 1st vector.
6206  Top of this stack means smaller offsets, but higher indices in this vector.
6207  */
6208  SECOND_VECTOR_DOUBLE_STACK,
6209  };
6210 
6211  VkDeviceSize m_SumFreeSize;
6212  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6213  uint32_t m_1stVectorIndex;
6214  SECOND_VECTOR_MODE m_2ndVectorMode;
6215 
6216  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6217  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6218  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6219  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6220 
6221  // Number of items in 1st vector with hAllocation = null at the beginning.
6222  size_t m_1stNullItemsBeginCount;
6223  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6224  size_t m_1stNullItemsMiddleCount;
6225  // Number of items in 2nd vector with hAllocation = null.
6226  size_t m_2ndNullItemsCount;
6227 
6228  bool ShouldCompact1st() const;
6229  void CleanupAfterFree();
6230 
6231  bool CreateAllocationRequest_LowerAddress(
6232  uint32_t currentFrameIndex,
6233  uint32_t frameInUseCount,
6234  VkDeviceSize bufferImageGranularity,
6235  VkDeviceSize allocSize,
6236  VkDeviceSize allocAlignment,
6237  VmaSuballocationType allocType,
6238  bool canMakeOtherLost,
6239  uint32_t strategy,
6240  VmaAllocationRequest* pAllocationRequest);
6241  bool CreateAllocationRequest_UpperAddress(
6242  uint32_t currentFrameIndex,
6243  uint32_t frameInUseCount,
6244  VkDeviceSize bufferImageGranularity,
6245  VkDeviceSize allocSize,
6246  VkDeviceSize allocAlignment,
6247  VmaSuballocationType allocType,
6248  bool canMakeOtherLost,
6249  uint32_t strategy,
6250  VmaAllocationRequest* pAllocationRequest);
6251 };
6252 
6253 /*
6254 - GetSize() is the original size of allocated memory block.
6255 - m_UsableSize is this size aligned down to a power of two.
6256  All allocations and calculations happen relative to m_UsableSize.
6257 - GetUnusableSize() is the difference between them.
6258  It is repoted as separate, unused range, not available for allocations.
6259 
6260 Node at level 0 has size = m_UsableSize.
6261 Each next level contains nodes with size 2 times smaller than current level.
6262 m_LevelCount is the maximum number of levels to use in the current object.
6263 */
6264 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6265 {
6266  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6267 public:
6268  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6269  virtual ~VmaBlockMetadata_Buddy();
6270  virtual void Init(VkDeviceSize size);
6271 
6272  virtual bool Validate() const;
6273  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6274  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6275  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6276  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6277 
6278  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6279  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6280 
6281 #if VMA_STATS_STRING_ENABLED
6282  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6283 #endif
6284 
6285  virtual bool CreateAllocationRequest(
6286  uint32_t currentFrameIndex,
6287  uint32_t frameInUseCount,
6288  VkDeviceSize bufferImageGranularity,
6289  VkDeviceSize allocSize,
6290  VkDeviceSize allocAlignment,
6291  bool upperAddress,
6292  VmaSuballocationType allocType,
6293  bool canMakeOtherLost,
6294  uint32_t strategy,
6295  VmaAllocationRequest* pAllocationRequest);
6296 
6297  virtual bool MakeRequestedAllocationsLost(
6298  uint32_t currentFrameIndex,
6299  uint32_t frameInUseCount,
6300  VmaAllocationRequest* pAllocationRequest);
6301 
6302  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6303 
6304  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6305 
6306  virtual void Alloc(
6307  const VmaAllocationRequest& request,
6308  VmaSuballocationType type,
6309  VkDeviceSize allocSize,
6310  VmaAllocation hAllocation);
6311 
6312  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6313  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6314 
6315 private:
6316  static const VkDeviceSize MIN_NODE_SIZE = 32;
6317  static const size_t MAX_LEVELS = 30;
6318 
6319  struct ValidationContext
6320  {
6321  size_t calculatedAllocationCount;
6322  size_t calculatedFreeCount;
6323  VkDeviceSize calculatedSumFreeSize;
6324 
6325  ValidationContext() :
6326  calculatedAllocationCount(0),
6327  calculatedFreeCount(0),
6328  calculatedSumFreeSize(0) { }
6329  };
6330 
6331  struct Node
6332  {
6333  VkDeviceSize offset;
6334  enum TYPE
6335  {
6336  TYPE_FREE,
6337  TYPE_ALLOCATION,
6338  TYPE_SPLIT,
6339  TYPE_COUNT
6340  } type;
6341  Node* parent;
6342  Node* buddy;
6343 
6344  union
6345  {
6346  struct
6347  {
6348  Node* prev;
6349  Node* next;
6350  } free;
6351  struct
6352  {
6353  VmaAllocation alloc;
6354  } allocation;
6355  struct
6356  {
6357  Node* leftChild;
6358  } split;
6359  };
6360  };
6361 
6362  // Size of the memory block aligned down to a power of two.
6363  VkDeviceSize m_UsableSize;
6364  uint32_t m_LevelCount;
6365 
6366  Node* m_Root;
6367  struct {
6368  Node* front;
6369  Node* back;
6370  } m_FreeList[MAX_LEVELS];
6371  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6372  size_t m_AllocationCount;
6373  // Number of nodes in the tree with type == TYPE_FREE.
6374  size_t m_FreeCount;
6375  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6376  VkDeviceSize m_SumFreeSize;
6377 
6378  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6379  void DeleteNode(Node* node);
6380  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6381  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6382  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6383  // Alloc passed just for validation. Can be null.
6384  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6385  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6386  // Adds node to the front of FreeList at given level.
6387  // node->type must be FREE.
6388  // node->free.prev, next can be undefined.
6389  void AddToFreeListFront(uint32_t level, Node* node);
6390  // Removes node from FreeList at given level.
6391  // node->type must be FREE.
6392  // node->free.prev, next stay untouched.
6393  void RemoveFromFreeList(uint32_t level, Node* node);
6394 
6395 #if VMA_STATS_STRING_ENABLED
6396  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6397 #endif
6398 };
6399 
6400 /*
6401 Represents a single block of device memory (`VkDeviceMemory`) with all the
6402 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6403 
6404 Thread-safety: This class must be externally synchronized.
6405 */
6406 class VmaDeviceMemoryBlock
6407 {
6408  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6409 public:
6410  VmaBlockMetadata* m_pMetadata;
6411 
6412  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6413 
6414  ~VmaDeviceMemoryBlock()
6415  {
6416  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6417  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6418  }
6419 
6420  // Always call after construction.
6421  void Init(
6422  VmaAllocator hAllocator,
6423  VmaPool hParentPool,
6424  uint32_t newMemoryTypeIndex,
6425  VkDeviceMemory newMemory,
6426  VkDeviceSize newSize,
6427  uint32_t id,
6428  uint32_t algorithm);
6429  // Always call before destruction.
6430  void Destroy(VmaAllocator allocator);
6431 
6432  VmaPool GetParentPool() const { return m_hParentPool; }
6433  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6434  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6435  uint32_t GetId() const { return m_Id; }
6436  void* GetMappedData() const { return m_pMappedData; }
6437 
6438  // Validates all data structures inside this object. If not valid, returns false.
6439  bool Validate() const;
6440 
6441  VkResult CheckCorruption(VmaAllocator hAllocator);
6442 
6443  // ppData can be null.
6444  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6445  void Unmap(VmaAllocator hAllocator, uint32_t count);
6446 
6447  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6448  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6449 
6450  VkResult BindBufferMemory(
6451  const VmaAllocator hAllocator,
6452  const VmaAllocation hAllocation,
6453  VkDeviceSize allocationLocalOffset,
6454  VkBuffer hBuffer,
6455  const void* pNext);
6456  VkResult BindImageMemory(
6457  const VmaAllocator hAllocator,
6458  const VmaAllocation hAllocation,
6459  VkDeviceSize allocationLocalOffset,
6460  VkImage hImage,
6461  const void* pNext);
6462 
6463 private:
6464  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6465  uint32_t m_MemoryTypeIndex;
6466  uint32_t m_Id;
6467  VkDeviceMemory m_hMemory;
6468 
6469  /*
6470  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6471  Also protects m_MapCount, m_pMappedData.
6472  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6473  */
6474  VMA_MUTEX m_Mutex;
6475  uint32_t m_MapCount;
6476  void* m_pMappedData;
6477 };
6478 
6479 struct VmaPointerLess
6480 {
6481  bool operator()(const void* lhs, const void* rhs) const
6482  {
6483  return lhs < rhs;
6484  }
6485 };
6486 
6487 struct VmaDefragmentationMove
6488 {
6489  size_t srcBlockIndex;
6490  size_t dstBlockIndex;
6491  VkDeviceSize srcOffset;
6492  VkDeviceSize dstOffset;
6493  VkDeviceSize size;
6494  VmaAllocation hAllocation;
6495  VmaDeviceMemoryBlock* pSrcBlock;
6496  VmaDeviceMemoryBlock* pDstBlock;
6497 };
6498 
6499 class VmaDefragmentationAlgorithm;
6500 
6501 /*
6502 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6503 Vulkan memory type.
6504 
6505 Synchronized internally with a mutex.
6506 */
6507 struct VmaBlockVector
6508 {
6509  VMA_CLASS_NO_COPY(VmaBlockVector)
6510 public:
6511  VmaBlockVector(
6512  VmaAllocator hAllocator,
6513  VmaPool hParentPool,
6514  uint32_t memoryTypeIndex,
6515  VkDeviceSize preferredBlockSize,
6516  size_t minBlockCount,
6517  size_t maxBlockCount,
6518  VkDeviceSize bufferImageGranularity,
6519  uint32_t frameInUseCount,
6520  bool explicitBlockSize,
6521  uint32_t algorithm);
6522  ~VmaBlockVector();
6523 
6524  VkResult CreateMinBlocks();
6525 
6526  VmaAllocator GetAllocator() const { return m_hAllocator; }
6527  VmaPool GetParentPool() const { return m_hParentPool; }
6528  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6529  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6530  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6531  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6532  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6533  uint32_t GetAlgorithm() const { return m_Algorithm; }
6534 
6535  void GetPoolStats(VmaPoolStats* pStats);
6536 
6537  bool IsEmpty();
6538  bool IsCorruptionDetectionEnabled() const;
6539 
6540  VkResult Allocate(
6541  uint32_t currentFrameIndex,
6542  VkDeviceSize size,
6543  VkDeviceSize alignment,
6544  const VmaAllocationCreateInfo& createInfo,
6545  VmaSuballocationType suballocType,
6546  size_t allocationCount,
6547  VmaAllocation* pAllocations);
6548 
6549  void Free(const VmaAllocation hAllocation);
6550 
6551  // Adds statistics of this BlockVector to pStats.
6552  void AddStats(VmaStats* pStats);
6553 
6554 #if VMA_STATS_STRING_ENABLED
6555  void PrintDetailedMap(class VmaJsonWriter& json);
6556 #endif
6557 
6558  void MakePoolAllocationsLost(
6559  uint32_t currentFrameIndex,
6560  size_t* pLostAllocationCount);
6561  VkResult CheckCorruption();
6562 
6563  // Saves results in pCtx->res.
6564  void Defragment(
6565  class VmaBlockVectorDefragmentationContext* pCtx,
6567  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6568  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6569  VkCommandBuffer commandBuffer);
6570  void DefragmentationEnd(
6571  class VmaBlockVectorDefragmentationContext* pCtx,
6572  uint32_t flags,
6573  VmaDefragmentationStats* pStats);
6574 
6575  uint32_t ProcessDefragmentations(
6576  class VmaBlockVectorDefragmentationContext *pCtx,
6577  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6578 
6579  void CommitDefragmentations(
6580  class VmaBlockVectorDefragmentationContext *pCtx,
6581  VmaDefragmentationStats* pStats);
6582 
6584  // To be used only while the m_Mutex is locked. Used during defragmentation.
6585 
6586  size_t GetBlockCount() const { return m_Blocks.size(); }
6587  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6588  size_t CalcAllocationCount() const;
6589  bool IsBufferImageGranularityConflictPossible() const;
6590 
6591 private:
6592  friend class VmaDefragmentationAlgorithm_Generic;
6593 
6594  const VmaAllocator m_hAllocator;
6595  const VmaPool m_hParentPool;
6596  const uint32_t m_MemoryTypeIndex;
6597  const VkDeviceSize m_PreferredBlockSize;
6598  const size_t m_MinBlockCount;
6599  const size_t m_MaxBlockCount;
6600  const VkDeviceSize m_BufferImageGranularity;
6601  const uint32_t m_FrameInUseCount;
6602  const bool m_ExplicitBlockSize;
6603  const uint32_t m_Algorithm;
6604  VMA_RW_MUTEX m_Mutex;
6605 
6606  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6607  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6608  bool m_HasEmptyBlock;
6609  // Incrementally sorted by sumFreeSize, ascending.
6610  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6611  uint32_t m_NextBlockId;
6612 
6613  VkDeviceSize CalcMaxBlockSize() const;
6614 
6615  // Finds and removes given block from vector.
6616  void Remove(VmaDeviceMemoryBlock* pBlock);
6617 
6618  // Performs single step in sorting m_Blocks. They may not be fully sorted
6619  // after this call.
6620  void IncrementallySortBlocks();
6621 
6622  VkResult AllocatePage(
6623  uint32_t currentFrameIndex,
6624  VkDeviceSize size,
6625  VkDeviceSize alignment,
6626  const VmaAllocationCreateInfo& createInfo,
6627  VmaSuballocationType suballocType,
6628  VmaAllocation* pAllocation);
6629 
6630  // To be used only without CAN_MAKE_OTHER_LOST flag.
6631  VkResult AllocateFromBlock(
6632  VmaDeviceMemoryBlock* pBlock,
6633  uint32_t currentFrameIndex,
6634  VkDeviceSize size,
6635  VkDeviceSize alignment,
6636  VmaAllocationCreateFlags allocFlags,
6637  void* pUserData,
6638  VmaSuballocationType suballocType,
6639  uint32_t strategy,
6640  VmaAllocation* pAllocation);
6641 
6642  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6643 
6644  // Saves result to pCtx->res.
6645  void ApplyDefragmentationMovesCpu(
6646  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6647  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6648  // Saves result to pCtx->res.
6649  void ApplyDefragmentationMovesGpu(
6650  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6651  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6652  VkCommandBuffer commandBuffer);
6653 
6654  /*
6655  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6656  - updated with new data.
6657  */
6658  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6659 
6660  void UpdateHasEmptyBlock();
6661 };
6662 
6663 struct VmaPool_T
6664 {
6665  VMA_CLASS_NO_COPY(VmaPool_T)
6666 public:
6667  VmaBlockVector m_BlockVector;
6668 
6669  VmaPool_T(
6670  VmaAllocator hAllocator,
6671  const VmaPoolCreateInfo& createInfo,
6672  VkDeviceSize preferredBlockSize);
6673  ~VmaPool_T();
6674 
6675  uint32_t GetId() const { return m_Id; }
6676  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6677 
6678  const char* GetName() const { return m_Name; }
6679  void SetName(const char* pName);
6680 
6681 #if VMA_STATS_STRING_ENABLED
6682  //void PrintDetailedMap(class VmaStringBuilder& sb);
6683 #endif
6684 
6685 private:
6686  uint32_t m_Id;
6687  char* m_Name;
6688 };
6689 
6690 /*
6691 Performs defragmentation:
6692 
6693 - Updates `pBlockVector->m_pMetadata`.
6694 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6695 - Does not move actual data, only returns requested moves as `moves`.
6696 */
6697 class VmaDefragmentationAlgorithm
6698 {
6699  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6700 public:
6701  VmaDefragmentationAlgorithm(
6702  VmaAllocator hAllocator,
6703  VmaBlockVector* pBlockVector,
6704  uint32_t currentFrameIndex) :
6705  m_hAllocator(hAllocator),
6706  m_pBlockVector(pBlockVector),
6707  m_CurrentFrameIndex(currentFrameIndex)
6708  {
6709  }
6710  virtual ~VmaDefragmentationAlgorithm()
6711  {
6712  }
6713 
6714  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6715  virtual void AddAll() = 0;
6716 
6717  virtual VkResult Defragment(
6718  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6719  VkDeviceSize maxBytesToMove,
6720  uint32_t maxAllocationsToMove,
6721  VmaDefragmentationFlags flags) = 0;
6722 
6723  virtual VkDeviceSize GetBytesMoved() const = 0;
6724  virtual uint32_t GetAllocationsMoved() const = 0;
6725 
6726 protected:
6727  VmaAllocator const m_hAllocator;
6728  VmaBlockVector* const m_pBlockVector;
6729  const uint32_t m_CurrentFrameIndex;
6730 
6731  struct AllocationInfo
6732  {
6733  VmaAllocation m_hAllocation;
6734  VkBool32* m_pChanged;
6735 
6736  AllocationInfo() :
6737  m_hAllocation(VK_NULL_HANDLE),
6738  m_pChanged(VMA_NULL)
6739  {
6740  }
6741  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6742  m_hAllocation(hAlloc),
6743  m_pChanged(pChanged)
6744  {
6745  }
6746  };
6747 };
6748 
6749 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6750 {
6751  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6752 public:
6753  VmaDefragmentationAlgorithm_Generic(
6754  VmaAllocator hAllocator,
6755  VmaBlockVector* pBlockVector,
6756  uint32_t currentFrameIndex,
6757  bool overlappingMoveSupported);
6758  virtual ~VmaDefragmentationAlgorithm_Generic();
6759 
6760  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6761  virtual void AddAll() { m_AllAllocations = true; }
6762 
6763  virtual VkResult Defragment(
6764  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6765  VkDeviceSize maxBytesToMove,
6766  uint32_t maxAllocationsToMove,
6767  VmaDefragmentationFlags flags);
6768 
6769  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6770  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6771 
6772 private:
6773  uint32_t m_AllocationCount;
6774  bool m_AllAllocations;
6775 
6776  VkDeviceSize m_BytesMoved;
6777  uint32_t m_AllocationsMoved;
6778 
6779  struct AllocationInfoSizeGreater
6780  {
6781  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6782  {
6783  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6784  }
6785  };
6786 
6787  struct AllocationInfoOffsetGreater
6788  {
6789  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6790  {
6791  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6792  }
6793  };
6794 
6795  struct BlockInfo
6796  {
6797  size_t m_OriginalBlockIndex;
6798  VmaDeviceMemoryBlock* m_pBlock;
6799  bool m_HasNonMovableAllocations;
6800  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6801 
6802  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6803  m_OriginalBlockIndex(SIZE_MAX),
6804  m_pBlock(VMA_NULL),
6805  m_HasNonMovableAllocations(true),
6806  m_Allocations(pAllocationCallbacks)
6807  {
6808  }
6809 
6810  void CalcHasNonMovableAllocations()
6811  {
6812  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6813  const size_t defragmentAllocCount = m_Allocations.size();
6814  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6815  }
6816 
6817  void SortAllocationsBySizeDescending()
6818  {
6819  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6820  }
6821 
6822  void SortAllocationsByOffsetDescending()
6823  {
6824  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6825  }
6826  };
6827 
6828  struct BlockPointerLess
6829  {
6830  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6831  {
6832  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6833  }
6834  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6835  {
6836  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6837  }
6838  };
6839 
6840  // 1. Blocks with some non-movable allocations go first.
6841  // 2. Blocks with smaller sumFreeSize go first.
6842  struct BlockInfoCompareMoveDestination
6843  {
6844  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6845  {
6846  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6847  {
6848  return true;
6849  }
6850  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6851  {
6852  return false;
6853  }
6854  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6855  {
6856  return true;
6857  }
6858  return false;
6859  }
6860  };
6861 
6862  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6863  BlockInfoVector m_Blocks;
6864 
6865  VkResult DefragmentRound(
6866  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6867  VkDeviceSize maxBytesToMove,
6868  uint32_t maxAllocationsToMove,
6869  bool freeOldAllocations);
6870 
6871  size_t CalcBlocksWithNonMovableCount() const;
6872 
6873  static bool MoveMakesSense(
6874  size_t dstBlockIndex, VkDeviceSize dstOffset,
6875  size_t srcBlockIndex, VkDeviceSize srcOffset);
6876 };
6877 
6878 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6879 {
6880  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6881 public:
6882  VmaDefragmentationAlgorithm_Fast(
6883  VmaAllocator hAllocator,
6884  VmaBlockVector* pBlockVector,
6885  uint32_t currentFrameIndex,
6886  bool overlappingMoveSupported);
6887  virtual ~VmaDefragmentationAlgorithm_Fast();
6888 
6889  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6890  virtual void AddAll() { m_AllAllocations = true; }
6891 
6892  virtual VkResult Defragment(
6893  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6894  VkDeviceSize maxBytesToMove,
6895  uint32_t maxAllocationsToMove,
6896  VmaDefragmentationFlags flags);
6897 
6898  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6899  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6900 
6901 private:
6902  struct BlockInfo
6903  {
6904  size_t origBlockIndex;
6905  };
6906 
6907  class FreeSpaceDatabase
6908  {
6909  public:
6910  FreeSpaceDatabase()
6911  {
6912  FreeSpace s = {};
6913  s.blockInfoIndex = SIZE_MAX;
6914  for(size_t i = 0; i < MAX_COUNT; ++i)
6915  {
6916  m_FreeSpaces[i] = s;
6917  }
6918  }
6919 
6920  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6921  {
6922  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6923  {
6924  return;
6925  }
6926 
6927  // Find first invalid or the smallest structure.
6928  size_t bestIndex = SIZE_MAX;
6929  for(size_t i = 0; i < MAX_COUNT; ++i)
6930  {
6931  // Empty structure.
6932  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6933  {
6934  bestIndex = i;
6935  break;
6936  }
6937  if(m_FreeSpaces[i].size < size &&
6938  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6939  {
6940  bestIndex = i;
6941  }
6942  }
6943 
6944  if(bestIndex != SIZE_MAX)
6945  {
6946  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6947  m_FreeSpaces[bestIndex].offset = offset;
6948  m_FreeSpaces[bestIndex].size = size;
6949  }
6950  }
6951 
6952  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6953  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6954  {
6955  size_t bestIndex = SIZE_MAX;
6956  VkDeviceSize bestFreeSpaceAfter = 0;
6957  for(size_t i = 0; i < MAX_COUNT; ++i)
6958  {
6959  // Structure is valid.
6960  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6961  {
6962  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6963  // Allocation fits into this structure.
6964  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6965  {
6966  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6967  (dstOffset + size);
6968  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6969  {
6970  bestIndex = i;
6971  bestFreeSpaceAfter = freeSpaceAfter;
6972  }
6973  }
6974  }
6975  }
6976 
6977  if(bestIndex != SIZE_MAX)
6978  {
6979  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6980  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6981 
6982  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6983  {
6984  // Leave this structure for remaining empty space.
6985  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6986  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6987  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6988  }
6989  else
6990  {
6991  // This structure becomes invalid.
6992  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6993  }
6994 
6995  return true;
6996  }
6997 
6998  return false;
6999  }
7000 
7001  private:
7002  static const size_t MAX_COUNT = 4;
7003 
7004  struct FreeSpace
7005  {
7006  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7007  VkDeviceSize offset;
7008  VkDeviceSize size;
7009  } m_FreeSpaces[MAX_COUNT];
7010  };
7011 
7012  const bool m_OverlappingMoveSupported;
7013 
7014  uint32_t m_AllocationCount;
7015  bool m_AllAllocations;
7016 
7017  VkDeviceSize m_BytesMoved;
7018  uint32_t m_AllocationsMoved;
7019 
7020  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7021 
7022  void PreprocessMetadata();
7023  void PostprocessMetadata();
7024  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7025 };
7026 
7027 struct VmaBlockDefragmentationContext
7028 {
7029  enum BLOCK_FLAG
7030  {
7031  BLOCK_FLAG_USED = 0x00000001,
7032  };
7033  uint32_t flags;
7034  VkBuffer hBuffer;
7035 };
7036 
7037 class VmaBlockVectorDefragmentationContext
7038 {
7039  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7040 public:
7041  VkResult res;
7042  bool mutexLocked;
7043  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7044  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7045  uint32_t defragmentationMovesProcessed;
7046  uint32_t defragmentationMovesCommitted;
7047  bool hasDefragmentationPlan;
7048 
7049  VmaBlockVectorDefragmentationContext(
7050  VmaAllocator hAllocator,
7051  VmaPool hCustomPool, // Optional.
7052  VmaBlockVector* pBlockVector,
7053  uint32_t currFrameIndex);
7054  ~VmaBlockVectorDefragmentationContext();
7055 
7056  VmaPool GetCustomPool() const { return m_hCustomPool; }
7057  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7058  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7059 
7060  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7061  void AddAll() { m_AllAllocations = true; }
7062 
7063  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7064 
7065 private:
7066  const VmaAllocator m_hAllocator;
7067  // Null if not from custom pool.
7068  const VmaPool m_hCustomPool;
7069  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7070  VmaBlockVector* const m_pBlockVector;
7071  const uint32_t m_CurrFrameIndex;
7072  // Owner of this object.
7073  VmaDefragmentationAlgorithm* m_pAlgorithm;
7074 
7075  struct AllocInfo
7076  {
7077  VmaAllocation hAlloc;
7078  VkBool32* pChanged;
7079  };
7080  // Used between constructor and Begin.
7081  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7082  bool m_AllAllocations;
7083 };
7084 
7085 struct VmaDefragmentationContext_T
7086 {
7087 private:
7088  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7089 public:
7090  VmaDefragmentationContext_T(
7091  VmaAllocator hAllocator,
7092  uint32_t currFrameIndex,
7093  uint32_t flags,
7094  VmaDefragmentationStats* pStats);
7095  ~VmaDefragmentationContext_T();
7096 
7097  void AddPools(uint32_t poolCount, VmaPool* pPools);
7098  void AddAllocations(
7099  uint32_t allocationCount,
7100  VmaAllocation* pAllocations,
7101  VkBool32* pAllocationsChanged);
7102 
7103  /*
7104  Returns:
7105  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7106  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7107  - Negative value if error occured and object can be destroyed immediately.
7108  */
7109  VkResult Defragment(
7110  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7111  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7112  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7113 
7114  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7115  VkResult DefragmentPassEnd();
7116 
7117 private:
7118  const VmaAllocator m_hAllocator;
7119  const uint32_t m_CurrFrameIndex;
7120  const uint32_t m_Flags;
7121  VmaDefragmentationStats* const m_pStats;
7122 
7123  VkDeviceSize m_MaxCpuBytesToMove;
7124  uint32_t m_MaxCpuAllocationsToMove;
7125  VkDeviceSize m_MaxGpuBytesToMove;
7126  uint32_t m_MaxGpuAllocationsToMove;
7127 
7128  // Owner of these objects.
7129  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7130  // Owner of these objects.
7131  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7132 };
7133 
7134 #if VMA_RECORDING_ENABLED
7135 
7136 class VmaRecorder
7137 {
7138 public:
7139  VmaRecorder();
7140  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7141  void WriteConfiguration(
7142  const VkPhysicalDeviceProperties& devProps,
7143  const VkPhysicalDeviceMemoryProperties& memProps,
7144  uint32_t vulkanApiVersion,
7145  bool dedicatedAllocationExtensionEnabled,
7146  bool bindMemory2ExtensionEnabled,
7147  bool memoryBudgetExtensionEnabled,
7148  bool deviceCoherentMemoryExtensionEnabled);
7149  ~VmaRecorder();
7150 
7151  void RecordCreateAllocator(uint32_t frameIndex);
7152  void RecordDestroyAllocator(uint32_t frameIndex);
7153  void RecordCreatePool(uint32_t frameIndex,
7154  const VmaPoolCreateInfo& createInfo,
7155  VmaPool pool);
7156  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7157  void RecordAllocateMemory(uint32_t frameIndex,
7158  const VkMemoryRequirements& vkMemReq,
7159  const VmaAllocationCreateInfo& createInfo,
7160  VmaAllocation allocation);
7161  void RecordAllocateMemoryPages(uint32_t frameIndex,
7162  const VkMemoryRequirements& vkMemReq,
7163  const VmaAllocationCreateInfo& createInfo,
7164  uint64_t allocationCount,
7165  const VmaAllocation* pAllocations);
7166  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7167  const VkMemoryRequirements& vkMemReq,
7168  bool requiresDedicatedAllocation,
7169  bool prefersDedicatedAllocation,
7170  const VmaAllocationCreateInfo& createInfo,
7171  VmaAllocation allocation);
7172  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7173  const VkMemoryRequirements& vkMemReq,
7174  bool requiresDedicatedAllocation,
7175  bool prefersDedicatedAllocation,
7176  const VmaAllocationCreateInfo& createInfo,
7177  VmaAllocation allocation);
7178  void RecordFreeMemory(uint32_t frameIndex,
7179  VmaAllocation allocation);
7180  void RecordFreeMemoryPages(uint32_t frameIndex,
7181  uint64_t allocationCount,
7182  const VmaAllocation* pAllocations);
7183  void RecordSetAllocationUserData(uint32_t frameIndex,
7184  VmaAllocation allocation,
7185  const void* pUserData);
7186  void RecordCreateLostAllocation(uint32_t frameIndex,
7187  VmaAllocation allocation);
7188  void RecordMapMemory(uint32_t frameIndex,
7189  VmaAllocation allocation);
7190  void RecordUnmapMemory(uint32_t frameIndex,
7191  VmaAllocation allocation);
7192  void RecordFlushAllocation(uint32_t frameIndex,
7193  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7194  void RecordInvalidateAllocation(uint32_t frameIndex,
7195  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7196  void RecordCreateBuffer(uint32_t frameIndex,
7197  const VkBufferCreateInfo& bufCreateInfo,
7198  const VmaAllocationCreateInfo& allocCreateInfo,
7199  VmaAllocation allocation);
7200  void RecordCreateImage(uint32_t frameIndex,
7201  const VkImageCreateInfo& imageCreateInfo,
7202  const VmaAllocationCreateInfo& allocCreateInfo,
7203  VmaAllocation allocation);
7204  void RecordDestroyBuffer(uint32_t frameIndex,
7205  VmaAllocation allocation);
7206  void RecordDestroyImage(uint32_t frameIndex,
7207  VmaAllocation allocation);
7208  void RecordTouchAllocation(uint32_t frameIndex,
7209  VmaAllocation allocation);
7210  void RecordGetAllocationInfo(uint32_t frameIndex,
7211  VmaAllocation allocation);
7212  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7213  VmaPool pool);
7214  void RecordDefragmentationBegin(uint32_t frameIndex,
7215  const VmaDefragmentationInfo2& info,
7217  void RecordDefragmentationEnd(uint32_t frameIndex,
7219  void RecordSetPoolName(uint32_t frameIndex,
7220  VmaPool pool,
7221  const char* name);
7222 
7223 private:
7224  struct CallParams
7225  {
7226  uint32_t threadId;
7227  double time;
7228  };
7229 
7230  class UserDataString
7231  {
7232  public:
7233  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7234  const char* GetString() const { return m_Str; }
7235 
7236  private:
7237  char m_PtrStr[17];
7238  const char* m_Str;
7239  };
7240 
7241  bool m_UseMutex;
7242  VmaRecordFlags m_Flags;
7243  FILE* m_File;
7244  VMA_MUTEX m_FileMutex;
7245  int64_t m_Freq;
7246  int64_t m_StartCounter;
7247 
7248  void GetBasicParams(CallParams& outParams);
7249 
7250  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7251  template<typename T>
7252  void PrintPointerList(uint64_t count, const T* pItems)
7253  {
7254  if(count)
7255  {
7256  fprintf(m_File, "%p", pItems[0]);
7257  for(uint64_t i = 1; i < count; ++i)
7258  {
7259  fprintf(m_File, " %p", pItems[i]);
7260  }
7261  }
7262  }
7263 
7264  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7265  void Flush();
7266 };
7267 
7268 #endif // #if VMA_RECORDING_ENABLED
7269 
7270 /*
7271 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7272 */
7273 class VmaAllocationObjectAllocator
7274 {
7275  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7276 public:
7277  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7278 
7279  template<typename... Types> VmaAllocation Allocate(Types... args);
7280  void Free(VmaAllocation hAlloc);
7281 
7282 private:
7283  VMA_MUTEX m_Mutex;
7284  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7285 };
7286 
7287 struct VmaCurrentBudgetData
7288 {
7289  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7290  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7291 
7292 #if VMA_MEMORY_BUDGET
7293  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7294  VMA_RW_MUTEX m_BudgetMutex;
7295  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7296  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7297  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7298 #endif // #if VMA_MEMORY_BUDGET
7299 
7300  VmaCurrentBudgetData()
7301  {
7302  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7303  {
7304  m_BlockBytes[heapIndex] = 0;
7305  m_AllocationBytes[heapIndex] = 0;
7306 #if VMA_MEMORY_BUDGET
7307  m_VulkanUsage[heapIndex] = 0;
7308  m_VulkanBudget[heapIndex] = 0;
7309  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7310 #endif
7311  }
7312 
7313 #if VMA_MEMORY_BUDGET
7314  m_OperationsSinceBudgetFetch = 0;
7315 #endif
7316  }
7317 
7318  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7319  {
7320  m_AllocationBytes[heapIndex] += allocationSize;
7321 #if VMA_MEMORY_BUDGET
7322  ++m_OperationsSinceBudgetFetch;
7323 #endif
7324  }
7325 
7326  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7327  {
7328  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7329  m_AllocationBytes[heapIndex] -= allocationSize;
7330 #if VMA_MEMORY_BUDGET
7331  ++m_OperationsSinceBudgetFetch;
7332 #endif
7333  }
7334 };
7335 
7336 // Main allocator object.
7337 struct VmaAllocator_T
7338 {
7339  VMA_CLASS_NO_COPY(VmaAllocator_T)
7340 public:
7341  bool m_UseMutex;
7342  uint32_t m_VulkanApiVersion;
7343  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7344  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7345  bool m_UseExtMemoryBudget;
7346  bool m_UseAmdDeviceCoherentMemory;
7347  bool m_UseKhrBufferDeviceAddress;
7348  VkDevice m_hDevice;
7349  VkInstance m_hInstance;
7350  bool m_AllocationCallbacksSpecified;
7351  VkAllocationCallbacks m_AllocationCallbacks;
7352  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7353  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7354 
7355  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7356  uint32_t m_HeapSizeLimitMask;
7357 
7358  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7359  VkPhysicalDeviceMemoryProperties m_MemProps;
7360 
7361  // Default pools.
7362  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7363 
7364  // Each vector is sorted by memory (handle value).
7365  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7366  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7367  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7368 
7369  VmaCurrentBudgetData m_Budget;
7370 
7371  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7372  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7373  ~VmaAllocator_T();
7374 
7375  const VkAllocationCallbacks* GetAllocationCallbacks() const
7376  {
7377  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7378  }
7379  const VmaVulkanFunctions& GetVulkanFunctions() const
7380  {
7381  return m_VulkanFunctions;
7382  }
7383 
7384  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7385 
7386  VkDeviceSize GetBufferImageGranularity() const
7387  {
7388  return VMA_MAX(
7389  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7390  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7391  }
7392 
7393  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7394  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7395 
7396  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7397  {
7398  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7399  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7400  }
7401  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7402  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7403  {
7404  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7405  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7406  }
7407  // Minimum alignment for all allocations in specific memory type.
7408  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7409  {
7410  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7411  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7412  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7413  }
7414 
7415  bool IsIntegratedGpu() const
7416  {
7417  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7418  }
7419 
7420  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7421 
7422 #if VMA_RECORDING_ENABLED
7423  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7424 #endif
7425 
7426  void GetBufferMemoryRequirements(
7427  VkBuffer hBuffer,
7428  VkMemoryRequirements& memReq,
7429  bool& requiresDedicatedAllocation,
7430  bool& prefersDedicatedAllocation) const;
7431  void GetImageMemoryRequirements(
7432  VkImage hImage,
7433  VkMemoryRequirements& memReq,
7434  bool& requiresDedicatedAllocation,
7435  bool& prefersDedicatedAllocation) const;
7436 
7437  // Main allocation function.
7438  VkResult AllocateMemory(
7439  const VkMemoryRequirements& vkMemReq,
7440  bool requiresDedicatedAllocation,
7441  bool prefersDedicatedAllocation,
7442  VkBuffer dedicatedBuffer,
7443  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7444  VkImage dedicatedImage,
7445  const VmaAllocationCreateInfo& createInfo,
7446  VmaSuballocationType suballocType,
7447  size_t allocationCount,
7448  VmaAllocation* pAllocations);
7449 
7450  // Main deallocation function.
7451  void FreeMemory(
7452  size_t allocationCount,
7453  const VmaAllocation* pAllocations);
7454 
7455  VkResult ResizeAllocation(
7456  const VmaAllocation alloc,
7457  VkDeviceSize newSize);
7458 
7459  void CalculateStats(VmaStats* pStats);
7460 
7461  void GetBudget(
7462  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7463 
7464 #if VMA_STATS_STRING_ENABLED
7465  void PrintDetailedMap(class VmaJsonWriter& json);
7466 #endif
7467 
7468  VkResult DefragmentationBegin(
7469  const VmaDefragmentationInfo2& info,
7470  VmaDefragmentationStats* pStats,
7471  VmaDefragmentationContext* pContext);
7472  VkResult DefragmentationEnd(
7473  VmaDefragmentationContext context);
7474 
7475  VkResult DefragmentationPassBegin(
7477  VmaDefragmentationContext context);
7478  VkResult DefragmentationPassEnd(
7479  VmaDefragmentationContext context);
7480 
7481  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7482  bool TouchAllocation(VmaAllocation hAllocation);
7483 
7484  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7485  void DestroyPool(VmaPool pool);
7486  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7487 
7488  void SetCurrentFrameIndex(uint32_t frameIndex);
7489  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7490 
7491  void MakePoolAllocationsLost(
7492  VmaPool hPool,
7493  size_t* pLostAllocationCount);
7494  VkResult CheckPoolCorruption(VmaPool hPool);
7495  VkResult CheckCorruption(uint32_t memoryTypeBits);
7496 
7497  void CreateLostAllocation(VmaAllocation* pAllocation);
7498 
7499  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7500  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7501  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7502  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7503  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7504  VkResult BindVulkanBuffer(
7505  VkDeviceMemory memory,
7506  VkDeviceSize memoryOffset,
7507  VkBuffer buffer,
7508  const void* pNext);
7509  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7510  VkResult BindVulkanImage(
7511  VkDeviceMemory memory,
7512  VkDeviceSize memoryOffset,
7513  VkImage image,
7514  const void* pNext);
7515 
7516  VkResult Map(VmaAllocation hAllocation, void** ppData);
7517  void Unmap(VmaAllocation hAllocation);
7518 
7519  VkResult BindBufferMemory(
7520  VmaAllocation hAllocation,
7521  VkDeviceSize allocationLocalOffset,
7522  VkBuffer hBuffer,
7523  const void* pNext);
7524  VkResult BindImageMemory(
7525  VmaAllocation hAllocation,
7526  VkDeviceSize allocationLocalOffset,
7527  VkImage hImage,
7528  const void* pNext);
7529 
7530  void FlushOrInvalidateAllocation(
7531  VmaAllocation hAllocation,
7532  VkDeviceSize offset, VkDeviceSize size,
7533  VMA_CACHE_OPERATION op);
7534 
7535  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7536 
7537  /*
7538  Returns bit mask of memory types that can support defragmentation on GPU as
7539  they support creation of required buffer for copy operations.
7540  */
7541  uint32_t GetGpuDefragmentationMemoryTypeBits();
7542 
7543 private:
7544  VkDeviceSize m_PreferredLargeHeapBlockSize;
7545 
7546  VkPhysicalDevice m_PhysicalDevice;
7547  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7548  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7549 
7550  VMA_RW_MUTEX m_PoolsMutex;
7551  // Protected by m_PoolsMutex. Sorted by pointer value.
7552  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7553  uint32_t m_NextPoolId;
7554 
7555  VmaVulkanFunctions m_VulkanFunctions;
7556 
7557  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7558  uint32_t m_GlobalMemoryTypeBits;
7559 
7560 #if VMA_RECORDING_ENABLED
7561  VmaRecorder* m_pRecorder;
7562 #endif
7563 
7564  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7565  void ImportVulkanFunctions_Static();
7566  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
7567  void ImportVulkanFunctions_Dynamic();
7568  void ValidateVulkanFunctions();
7569 
7570  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7571 
7572  VkResult AllocateMemoryOfType(
7573  VkDeviceSize size,
7574  VkDeviceSize alignment,
7575  bool dedicatedAllocation,
7576  VkBuffer dedicatedBuffer,
7577  VkBufferUsageFlags dedicatedBufferUsage,
7578  VkImage dedicatedImage,
7579  const VmaAllocationCreateInfo& createInfo,
7580  uint32_t memTypeIndex,
7581  VmaSuballocationType suballocType,
7582  size_t allocationCount,
7583  VmaAllocation* pAllocations);
7584 
7585  // Helper function only to be used inside AllocateDedicatedMemory.
7586  VkResult AllocateDedicatedMemoryPage(
7587  VkDeviceSize size,
7588  VmaSuballocationType suballocType,
7589  uint32_t memTypeIndex,
7590  const VkMemoryAllocateInfo& allocInfo,
7591  bool map,
7592  bool isUserDataString,
7593  void* pUserData,
7594  VmaAllocation* pAllocation);
7595 
7596  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7597  VkResult AllocateDedicatedMemory(
7598  VkDeviceSize size,
7599  VmaSuballocationType suballocType,
7600  uint32_t memTypeIndex,
7601  bool withinBudget,
7602  bool map,
7603  bool isUserDataString,
7604  void* pUserData,
7605  VkBuffer dedicatedBuffer,
7606  VkBufferUsageFlags dedicatedBufferUsage,
7607  VkImage dedicatedImage,
7608  size_t allocationCount,
7609  VmaAllocation* pAllocations);
7610 
7611  void FreeDedicatedMemory(const VmaAllocation allocation);
7612 
7613  /*
7614  Calculates and returns bit mask of memory types that can support defragmentation
7615  on GPU as they support creation of required buffer for copy operations.
7616  */
7617  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7618 
7619  uint32_t CalculateGlobalMemoryTypeBits() const;
7620 
7621 #if VMA_MEMORY_BUDGET
7622  void UpdateVulkanBudget();
7623 #endif // #if VMA_MEMORY_BUDGET
7624 };
7625 
7627 // Memory allocation #2 after VmaAllocator_T definition
7628 
7629 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7630 {
7631  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7632 }
7633 
7634 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7635 {
7636  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7637 }
7638 
7639 template<typename T>
7640 static T* VmaAllocate(VmaAllocator hAllocator)
7641 {
7642  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7643 }
7644 
7645 template<typename T>
7646 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7647 {
7648  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7649 }
7650 
7651 template<typename T>
7652 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7653 {
7654  if(ptr != VMA_NULL)
7655  {
7656  ptr->~T();
7657  VmaFree(hAllocator, ptr);
7658  }
7659 }
7660 
7661 template<typename T>
7662 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7663 {
7664  if(ptr != VMA_NULL)
7665  {
7666  for(size_t i = count; i--; )
7667  ptr[i].~T();
7668  VmaFree(hAllocator, ptr);
7669  }
7670 }
7671 
7673 // VmaStringBuilder
7674 
7675 #if VMA_STATS_STRING_ENABLED
7676 
7677 class VmaStringBuilder
7678 {
7679 public:
7680  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7681  size_t GetLength() const { return m_Data.size(); }
7682  const char* GetData() const { return m_Data.data(); }
7683 
7684  void Add(char ch) { m_Data.push_back(ch); }
7685  void Add(const char* pStr);
7686  void AddNewLine() { Add('\n'); }
7687  void AddNumber(uint32_t num);
7688  void AddNumber(uint64_t num);
7689  void AddPointer(const void* ptr);
7690 
7691 private:
7692  VmaVector< char, VmaStlAllocator<char> > m_Data;
7693 };
7694 
7695 void VmaStringBuilder::Add(const char* pStr)
7696 {
7697  const size_t strLen = strlen(pStr);
7698  if(strLen > 0)
7699  {
7700  const size_t oldCount = m_Data.size();
7701  m_Data.resize(oldCount + strLen);
7702  memcpy(m_Data.data() + oldCount, pStr, strLen);
7703  }
7704 }
7705 
7706 void VmaStringBuilder::AddNumber(uint32_t num)
7707 {
7708  char buf[11];
7709  buf[10] = '\0';
7710  char *p = &buf[10];
7711  do
7712  {
7713  *--p = '0' + (num % 10);
7714  num /= 10;
7715  }
7716  while(num);
7717  Add(p);
7718 }
7719 
7720 void VmaStringBuilder::AddNumber(uint64_t num)
7721 {
7722  char buf[21];
7723  buf[20] = '\0';
7724  char *p = &buf[20];
7725  do
7726  {
7727  *--p = '0' + (num % 10);
7728  num /= 10;
7729  }
7730  while(num);
7731  Add(p);
7732 }
7733 
7734 void VmaStringBuilder::AddPointer(const void* ptr)
7735 {
7736  char buf[21];
7737  VmaPtrToStr(buf, sizeof(buf), ptr);
7738  Add(buf);
7739 }
7740 
7741 #endif // #if VMA_STATS_STRING_ENABLED
7742 
7744 // VmaJsonWriter
7745 
7746 #if VMA_STATS_STRING_ENABLED
7747 
7748 class VmaJsonWriter
7749 {
7750  VMA_CLASS_NO_COPY(VmaJsonWriter)
7751 public:
7752  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7753  ~VmaJsonWriter();
7754 
7755  void BeginObject(bool singleLine = false);
7756  void EndObject();
7757 
7758  void BeginArray(bool singleLine = false);
7759  void EndArray();
7760 
7761  void WriteString(const char* pStr);
7762  void BeginString(const char* pStr = VMA_NULL);
7763  void ContinueString(const char* pStr);
7764  void ContinueString(uint32_t n);
7765  void ContinueString(uint64_t n);
7766  void ContinueString_Pointer(const void* ptr);
7767  void EndString(const char* pStr = VMA_NULL);
7768 
7769  void WriteNumber(uint32_t n);
7770  void WriteNumber(uint64_t n);
7771  void WriteBool(bool b);
7772  void WriteNull();
7773 
7774 private:
7775  static const char* const INDENT;
7776 
7777  enum COLLECTION_TYPE
7778  {
7779  COLLECTION_TYPE_OBJECT,
7780  COLLECTION_TYPE_ARRAY,
7781  };
7782  struct StackItem
7783  {
7784  COLLECTION_TYPE type;
7785  uint32_t valueCount;
7786  bool singleLineMode;
7787  };
7788 
7789  VmaStringBuilder& m_SB;
7790  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7791  bool m_InsideString;
7792 
7793  void BeginValue(bool isString);
7794  void WriteIndent(bool oneLess = false);
7795 };
7796 
7797 const char* const VmaJsonWriter::INDENT = " ";
7798 
7799 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7800  m_SB(sb),
7801  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7802  m_InsideString(false)
7803 {
7804 }
7805 
7806 VmaJsonWriter::~VmaJsonWriter()
7807 {
7808  VMA_ASSERT(!m_InsideString);
7809  VMA_ASSERT(m_Stack.empty());
7810 }
7811 
7812 void VmaJsonWriter::BeginObject(bool singleLine)
7813 {
7814  VMA_ASSERT(!m_InsideString);
7815 
7816  BeginValue(false);
7817  m_SB.Add('{');
7818 
7819  StackItem item;
7820  item.type = COLLECTION_TYPE_OBJECT;
7821  item.valueCount = 0;
7822  item.singleLineMode = singleLine;
7823  m_Stack.push_back(item);
7824 }
7825 
7826 void VmaJsonWriter::EndObject()
7827 {
7828  VMA_ASSERT(!m_InsideString);
7829 
7830  WriteIndent(true);
7831  m_SB.Add('}');
7832 
7833  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7834  m_Stack.pop_back();
7835 }
7836 
7837 void VmaJsonWriter::BeginArray(bool singleLine)
7838 {
7839  VMA_ASSERT(!m_InsideString);
7840 
7841  BeginValue(false);
7842  m_SB.Add('[');
7843 
7844  StackItem item;
7845  item.type = COLLECTION_TYPE_ARRAY;
7846  item.valueCount = 0;
7847  item.singleLineMode = singleLine;
7848  m_Stack.push_back(item);
7849 }
7850 
7851 void VmaJsonWriter::EndArray()
7852 {
7853  VMA_ASSERT(!m_InsideString);
7854 
7855  WriteIndent(true);
7856  m_SB.Add(']');
7857 
7858  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7859  m_Stack.pop_back();
7860 }
7861 
7862 void VmaJsonWriter::WriteString(const char* pStr)
7863 {
7864  BeginString(pStr);
7865  EndString();
7866 }
7867 
7868 void VmaJsonWriter::BeginString(const char* pStr)
7869 {
7870  VMA_ASSERT(!m_InsideString);
7871 
7872  BeginValue(true);
7873  m_SB.Add('"');
7874  m_InsideString = true;
7875  if(pStr != VMA_NULL && pStr[0] != '\0')
7876  {
7877  ContinueString(pStr);
7878  }
7879 }
7880 
7881 void VmaJsonWriter::ContinueString(const char* pStr)
7882 {
7883  VMA_ASSERT(m_InsideString);
7884 
7885  const size_t strLen = strlen(pStr);
7886  for(size_t i = 0; i < strLen; ++i)
7887  {
7888  char ch = pStr[i];
7889  if(ch == '\\')
7890  {
7891  m_SB.Add("\\\\");
7892  }
7893  else if(ch == '"')
7894  {
7895  m_SB.Add("\\\"");
7896  }
7897  else if(ch >= 32)
7898  {
7899  m_SB.Add(ch);
7900  }
7901  else switch(ch)
7902  {
7903  case '\b':
7904  m_SB.Add("\\b");
7905  break;
7906  case '\f':
7907  m_SB.Add("\\f");
7908  break;
7909  case '\n':
7910  m_SB.Add("\\n");
7911  break;
7912  case '\r':
7913  m_SB.Add("\\r");
7914  break;
7915  case '\t':
7916  m_SB.Add("\\t");
7917  break;
7918  default:
7919  VMA_ASSERT(0 && "Character not currently supported.");
7920  break;
7921  }
7922  }
7923 }
7924 
7925 void VmaJsonWriter::ContinueString(uint32_t n)
7926 {
7927  VMA_ASSERT(m_InsideString);
7928  m_SB.AddNumber(n);
7929 }
7930 
7931 void VmaJsonWriter::ContinueString(uint64_t n)
7932 {
7933  VMA_ASSERT(m_InsideString);
7934  m_SB.AddNumber(n);
7935 }
7936 
7937 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7938 {
7939  VMA_ASSERT(m_InsideString);
7940  m_SB.AddPointer(ptr);
7941 }
7942 
7943 void VmaJsonWriter::EndString(const char* pStr)
7944 {
7945  VMA_ASSERT(m_InsideString);
7946  if(pStr != VMA_NULL && pStr[0] != '\0')
7947  {
7948  ContinueString(pStr);
7949  }
7950  m_SB.Add('"');
7951  m_InsideString = false;
7952 }
7953 
7954 void VmaJsonWriter::WriteNumber(uint32_t n)
7955 {
7956  VMA_ASSERT(!m_InsideString);
7957  BeginValue(false);
7958  m_SB.AddNumber(n);
7959 }
7960 
7961 void VmaJsonWriter::WriteNumber(uint64_t n)
7962 {
7963  VMA_ASSERT(!m_InsideString);
7964  BeginValue(false);
7965  m_SB.AddNumber(n);
7966 }
7967 
7968 void VmaJsonWriter::WriteBool(bool b)
7969 {
7970  VMA_ASSERT(!m_InsideString);
7971  BeginValue(false);
7972  m_SB.Add(b ? "true" : "false");
7973 }
7974 
7975 void VmaJsonWriter::WriteNull()
7976 {
7977  VMA_ASSERT(!m_InsideString);
7978  BeginValue(false);
7979  m_SB.Add("null");
7980 }
7981 
7982 void VmaJsonWriter::BeginValue(bool isString)
7983 {
7984  if(!m_Stack.empty())
7985  {
7986  StackItem& currItem = m_Stack.back();
7987  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7988  currItem.valueCount % 2 == 0)
7989  {
7990  VMA_ASSERT(isString);
7991  }
7992 
7993  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7994  currItem.valueCount % 2 != 0)
7995  {
7996  m_SB.Add(": ");
7997  }
7998  else if(currItem.valueCount > 0)
7999  {
8000  m_SB.Add(", ");
8001  WriteIndent();
8002  }
8003  else
8004  {
8005  WriteIndent();
8006  }
8007  ++currItem.valueCount;
8008  }
8009 }
8010 
8011 void VmaJsonWriter::WriteIndent(bool oneLess)
8012 {
8013  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8014  {
8015  m_SB.AddNewLine();
8016 
8017  size_t count = m_Stack.size();
8018  if(count > 0 && oneLess)
8019  {
8020  --count;
8021  }
8022  for(size_t i = 0; i < count; ++i)
8023  {
8024  m_SB.Add(INDENT);
8025  }
8026  }
8027 }
8028 
8029 #endif // #if VMA_STATS_STRING_ENABLED
8030 
8032 
8033 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8034 {
8035  if(IsUserDataString())
8036  {
8037  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8038 
8039  FreeUserDataString(hAllocator);
8040 
8041  if(pUserData != VMA_NULL)
8042  {
8043  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8044  }
8045  }
8046  else
8047  {
8048  m_pUserData = pUserData;
8049  }
8050 }
8051 
8052 void VmaAllocation_T::ChangeBlockAllocation(
8053  VmaAllocator hAllocator,
8054  VmaDeviceMemoryBlock* block,
8055  VkDeviceSize offset)
8056 {
8057  VMA_ASSERT(block != VMA_NULL);
8058  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8059 
8060  // Move mapping reference counter from old block to new block.
8061  if(block != m_BlockAllocation.m_Block)
8062  {
8063  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8064  if(IsPersistentMap())
8065  ++mapRefCount;
8066  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8067  block->Map(hAllocator, mapRefCount, VMA_NULL);
8068  }
8069 
8070  m_BlockAllocation.m_Block = block;
8071  m_BlockAllocation.m_Offset = offset;
8072 }
8073 
8074 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8075 {
8076  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8077  m_BlockAllocation.m_Offset = newOffset;
8078 }
8079 
8080 VkDeviceSize VmaAllocation_T::GetOffset() const
8081 {
8082  switch(m_Type)
8083  {
8084  case ALLOCATION_TYPE_BLOCK:
8085  return m_BlockAllocation.m_Offset;
8086  case ALLOCATION_TYPE_DEDICATED:
8087  return 0;
8088  default:
8089  VMA_ASSERT(0);
8090  return 0;
8091  }
8092 }
8093 
8094 VkDeviceMemory VmaAllocation_T::GetMemory() const
8095 {
8096  switch(m_Type)
8097  {
8098  case ALLOCATION_TYPE_BLOCK:
8099  return m_BlockAllocation.m_Block->GetDeviceMemory();
8100  case ALLOCATION_TYPE_DEDICATED:
8101  return m_DedicatedAllocation.m_hMemory;
8102  default:
8103  VMA_ASSERT(0);
8104  return VK_NULL_HANDLE;
8105  }
8106 }
8107 
8108 void* VmaAllocation_T::GetMappedData() const
8109 {
8110  switch(m_Type)
8111  {
8112  case ALLOCATION_TYPE_BLOCK:
8113  if(m_MapCount != 0)
8114  {
8115  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8116  VMA_ASSERT(pBlockData != VMA_NULL);
8117  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8118  }
8119  else
8120  {
8121  return VMA_NULL;
8122  }
8123  break;
8124  case ALLOCATION_TYPE_DEDICATED:
8125  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8126  return m_DedicatedAllocation.m_pMappedData;
8127  default:
8128  VMA_ASSERT(0);
8129  return VMA_NULL;
8130  }
8131 }
8132 
8133 bool VmaAllocation_T::CanBecomeLost() const
8134 {
8135  switch(m_Type)
8136  {
8137  case ALLOCATION_TYPE_BLOCK:
8138  return m_BlockAllocation.m_CanBecomeLost;
8139  case ALLOCATION_TYPE_DEDICATED:
8140  return false;
8141  default:
8142  VMA_ASSERT(0);
8143  return false;
8144  }
8145 }
8146 
8147 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8148 {
8149  VMA_ASSERT(CanBecomeLost());
8150 
8151  /*
8152  Warning: This is a carefully designed algorithm.
8153  Do not modify unless you really know what you're doing :)
8154  */
8155  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8156  for(;;)
8157  {
8158  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8159  {
8160  VMA_ASSERT(0);
8161  return false;
8162  }
8163  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8164  {
8165  return false;
8166  }
8167  else // Last use time earlier than current time.
8168  {
8169  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8170  {
8171  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8172  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8173  return true;
8174  }
8175  }
8176  }
8177 }
8178 
8179 #if VMA_STATS_STRING_ENABLED
8180 
8181 // Correspond to values of enum VmaSuballocationType.
8182 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8183  "FREE",
8184  "UNKNOWN",
8185  "BUFFER",
8186  "IMAGE_UNKNOWN",
8187  "IMAGE_LINEAR",
8188  "IMAGE_OPTIMAL",
8189 };
8190 
8191 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8192 {
8193  json.WriteString("Type");
8194  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8195 
8196  json.WriteString("Size");
8197  json.WriteNumber(m_Size);
8198 
8199  if(m_pUserData != VMA_NULL)
8200  {
8201  json.WriteString("UserData");
8202  if(IsUserDataString())
8203  {
8204  json.WriteString((const char*)m_pUserData);
8205  }
8206  else
8207  {
8208  json.BeginString();
8209  json.ContinueString_Pointer(m_pUserData);
8210  json.EndString();
8211  }
8212  }
8213 
8214  json.WriteString("CreationFrameIndex");
8215  json.WriteNumber(m_CreationFrameIndex);
8216 
8217  json.WriteString("LastUseFrameIndex");
8218  json.WriteNumber(GetLastUseFrameIndex());
8219 
8220  if(m_BufferImageUsage != 0)
8221  {
8222  json.WriteString("Usage");
8223  json.WriteNumber(m_BufferImageUsage);
8224  }
8225 }
8226 
8227 #endif
8228 
8229 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8230 {
8231  VMA_ASSERT(IsUserDataString());
8232  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8233  m_pUserData = VMA_NULL;
8234 }
8235 
8236 void VmaAllocation_T::BlockAllocMap()
8237 {
8238  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8239 
8240  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8241  {
8242  ++m_MapCount;
8243  }
8244  else
8245  {
8246  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8247  }
8248 }
8249 
8250 void VmaAllocation_T::BlockAllocUnmap()
8251 {
8252  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8253 
8254  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8255  {
8256  --m_MapCount;
8257  }
8258  else
8259  {
8260  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8261  }
8262 }
8263 
8264 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8265 {
8266  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8267 
8268  if(m_MapCount != 0)
8269  {
8270  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8271  {
8272  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8273  *ppData = m_DedicatedAllocation.m_pMappedData;
8274  ++m_MapCount;
8275  return VK_SUCCESS;
8276  }
8277  else
8278  {
8279  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8280  return VK_ERROR_MEMORY_MAP_FAILED;
8281  }
8282  }
8283  else
8284  {
8285  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8286  hAllocator->m_hDevice,
8287  m_DedicatedAllocation.m_hMemory,
8288  0, // offset
8289  VK_WHOLE_SIZE,
8290  0, // flags
8291  ppData);
8292  if(result == VK_SUCCESS)
8293  {
8294  m_DedicatedAllocation.m_pMappedData = *ppData;
8295  m_MapCount = 1;
8296  }
8297  return result;
8298  }
8299 }
8300 
8301 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8302 {
8303  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8304 
8305  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8306  {
8307  --m_MapCount;
8308  if(m_MapCount == 0)
8309  {
8310  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8311  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8312  hAllocator->m_hDevice,
8313  m_DedicatedAllocation.m_hMemory);
8314  }
8315  }
8316  else
8317  {
8318  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8319  }
8320 }
8321 
8322 #if VMA_STATS_STRING_ENABLED
8323 
8324 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8325 {
8326  json.BeginObject();
8327 
8328  json.WriteString("Blocks");
8329  json.WriteNumber(stat.blockCount);
8330 
8331  json.WriteString("Allocations");
8332  json.WriteNumber(stat.allocationCount);
8333 
8334  json.WriteString("UnusedRanges");
8335  json.WriteNumber(stat.unusedRangeCount);
8336 
8337  json.WriteString("UsedBytes");
8338  json.WriteNumber(stat.usedBytes);
8339 
8340  json.WriteString("UnusedBytes");
8341  json.WriteNumber(stat.unusedBytes);
8342 
8343  if(stat.allocationCount > 1)
8344  {
8345  json.WriteString("AllocationSize");
8346  json.BeginObject(true);
8347  json.WriteString("Min");
8348  json.WriteNumber(stat.allocationSizeMin);
8349  json.WriteString("Avg");
8350  json.WriteNumber(stat.allocationSizeAvg);
8351  json.WriteString("Max");
8352  json.WriteNumber(stat.allocationSizeMax);
8353  json.EndObject();
8354  }
8355 
8356  if(stat.unusedRangeCount > 1)
8357  {
8358  json.WriteString("UnusedRangeSize");
8359  json.BeginObject(true);
8360  json.WriteString("Min");
8361  json.WriteNumber(stat.unusedRangeSizeMin);
8362  json.WriteString("Avg");
8363  json.WriteNumber(stat.unusedRangeSizeAvg);
8364  json.WriteString("Max");
8365  json.WriteNumber(stat.unusedRangeSizeMax);
8366  json.EndObject();
8367  }
8368 
8369  json.EndObject();
8370 }
8371 
8372 #endif // #if VMA_STATS_STRING_ENABLED
8373 
8374 struct VmaSuballocationItemSizeLess
8375 {
8376  bool operator()(
8377  const VmaSuballocationList::iterator lhs,
8378  const VmaSuballocationList::iterator rhs) const
8379  {
8380  return lhs->size < rhs->size;
8381  }
8382  bool operator()(
8383  const VmaSuballocationList::iterator lhs,
8384  VkDeviceSize rhsSize) const
8385  {
8386  return lhs->size < rhsSize;
8387  }
8388 };
8389 
8390 
8392 // class VmaBlockMetadata
8393 
8394 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8395  m_Size(0),
8396  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8397 {
8398 }
8399 
8400 #if VMA_STATS_STRING_ENABLED
8401 
8402 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8403  VkDeviceSize unusedBytes,
8404  size_t allocationCount,
8405  size_t unusedRangeCount) const
8406 {
8407  json.BeginObject();
8408 
8409  json.WriteString("TotalBytes");
8410  json.WriteNumber(GetSize());
8411 
8412  json.WriteString("UnusedBytes");
8413  json.WriteNumber(unusedBytes);
8414 
8415  json.WriteString("Allocations");
8416  json.WriteNumber((uint64_t)allocationCount);
8417 
8418  json.WriteString("UnusedRanges");
8419  json.WriteNumber((uint64_t)unusedRangeCount);
8420 
8421  json.WriteString("Suballocations");
8422  json.BeginArray();
8423 }
8424 
8425 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8426  VkDeviceSize offset,
8427  VmaAllocation hAllocation) const
8428 {
8429  json.BeginObject(true);
8430 
8431  json.WriteString("Offset");
8432  json.WriteNumber(offset);
8433 
8434  hAllocation->PrintParameters(json);
8435 
8436  json.EndObject();
8437 }
8438 
8439 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8440  VkDeviceSize offset,
8441  VkDeviceSize size) const
8442 {
8443  json.BeginObject(true);
8444 
8445  json.WriteString("Offset");
8446  json.WriteNumber(offset);
8447 
8448  json.WriteString("Type");
8449  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8450 
8451  json.WriteString("Size");
8452  json.WriteNumber(size);
8453 
8454  json.EndObject();
8455 }
8456 
8457 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8458 {
8459  json.EndArray();
8460  json.EndObject();
8461 }
8462 
8463 #endif // #if VMA_STATS_STRING_ENABLED
8464 
8466 // class VmaBlockMetadata_Generic
8467 
8468 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8469  VmaBlockMetadata(hAllocator),
8470  m_FreeCount(0),
8471  m_SumFreeSize(0),
8472  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8473  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8474 {
8475 }
8476 
8477 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8478 {
8479 }
8480 
8481 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8482 {
8483  VmaBlockMetadata::Init(size);
8484 
8485  m_FreeCount = 1;
8486  m_SumFreeSize = size;
8487 
8488  VmaSuballocation suballoc = {};
8489  suballoc.offset = 0;
8490  suballoc.size = size;
8491  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8492  suballoc.hAllocation = VK_NULL_HANDLE;
8493 
8494  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8495  m_Suballocations.push_back(suballoc);
8496  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8497  --suballocItem;
8498  m_FreeSuballocationsBySize.push_back(suballocItem);
8499 }
8500 
8501 bool VmaBlockMetadata_Generic::Validate() const
8502 {
8503  VMA_VALIDATE(!m_Suballocations.empty());
8504 
8505  // Expected offset of new suballocation as calculated from previous ones.
8506  VkDeviceSize calculatedOffset = 0;
8507  // Expected number of free suballocations as calculated from traversing their list.
8508  uint32_t calculatedFreeCount = 0;
8509  // Expected sum size of free suballocations as calculated from traversing their list.
8510  VkDeviceSize calculatedSumFreeSize = 0;
8511  // Expected number of free suballocations that should be registered in
8512  // m_FreeSuballocationsBySize calculated from traversing their list.
8513  size_t freeSuballocationsToRegister = 0;
8514  // True if previous visited suballocation was free.
8515  bool prevFree = false;
8516 
8517  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8518  suballocItem != m_Suballocations.cend();
8519  ++suballocItem)
8520  {
8521  const VmaSuballocation& subAlloc = *suballocItem;
8522 
8523  // Actual offset of this suballocation doesn't match expected one.
8524  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8525 
8526  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8527  // Two adjacent free suballocations are invalid. They should be merged.
8528  VMA_VALIDATE(!prevFree || !currFree);
8529 
8530  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8531 
8532  if(currFree)
8533  {
8534  calculatedSumFreeSize += subAlloc.size;
8535  ++calculatedFreeCount;
8536  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8537  {
8538  ++freeSuballocationsToRegister;
8539  }
8540 
8541  // Margin required between allocations - every free space must be at least that large.
8542  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8543  }
8544  else
8545  {
8546  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8547  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8548 
8549  // Margin required between allocations - previous allocation must be free.
8550  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8551  }
8552 
8553  calculatedOffset += subAlloc.size;
8554  prevFree = currFree;
8555  }
8556 
8557  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8558  // match expected one.
8559  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8560 
8561  VkDeviceSize lastSize = 0;
8562  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8563  {
8564  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8565 
8566  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8567  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8568  // They must be sorted by size ascending.
8569  VMA_VALIDATE(suballocItem->size >= lastSize);
8570 
8571  lastSize = suballocItem->size;
8572  }
8573 
8574  // Check if totals match calculacted values.
8575  VMA_VALIDATE(ValidateFreeSuballocationList());
8576  VMA_VALIDATE(calculatedOffset == GetSize());
8577  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8578  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8579 
8580  return true;
8581 }
8582 
8583 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8584 {
8585  if(!m_FreeSuballocationsBySize.empty())
8586  {
8587  return m_FreeSuballocationsBySize.back()->size;
8588  }
8589  else
8590  {
8591  return 0;
8592  }
8593 }
8594 
8595 bool VmaBlockMetadata_Generic::IsEmpty() const
8596 {
8597  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8598 }
8599 
8600 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8601 {
8602  outInfo.blockCount = 1;
8603 
8604  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8605  outInfo.allocationCount = rangeCount - m_FreeCount;
8606  outInfo.unusedRangeCount = m_FreeCount;
8607 
8608  outInfo.unusedBytes = m_SumFreeSize;
8609  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8610 
8611  outInfo.allocationSizeMin = UINT64_MAX;
8612  outInfo.allocationSizeMax = 0;
8613  outInfo.unusedRangeSizeMin = UINT64_MAX;
8614  outInfo.unusedRangeSizeMax = 0;
8615 
8616  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8617  suballocItem != m_Suballocations.cend();
8618  ++suballocItem)
8619  {
8620  const VmaSuballocation& suballoc = *suballocItem;
8621  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8622  {
8623  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8624  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8625  }
8626  else
8627  {
8628  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8629  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8630  }
8631  }
8632 }
8633 
8634 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8635 {
8636  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8637 
8638  inoutStats.size += GetSize();
8639  inoutStats.unusedSize += m_SumFreeSize;
8640  inoutStats.allocationCount += rangeCount - m_FreeCount;
8641  inoutStats.unusedRangeCount += m_FreeCount;
8642  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8643 }
8644 
8645 #if VMA_STATS_STRING_ENABLED
8646 
8647 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8648 {
8649  PrintDetailedMap_Begin(json,
8650  m_SumFreeSize, // unusedBytes
8651  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8652  m_FreeCount); // unusedRangeCount
8653 
8654  size_t i = 0;
8655  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8656  suballocItem != m_Suballocations.cend();
8657  ++suballocItem, ++i)
8658  {
8659  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8660  {
8661  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8662  }
8663  else
8664  {
8665  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8666  }
8667  }
8668 
8669  PrintDetailedMap_End(json);
8670 }
8671 
8672 #endif // #if VMA_STATS_STRING_ENABLED
8673 
8674 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8675  uint32_t currentFrameIndex,
8676  uint32_t frameInUseCount,
8677  VkDeviceSize bufferImageGranularity,
8678  VkDeviceSize allocSize,
8679  VkDeviceSize allocAlignment,
8680  bool upperAddress,
8681  VmaSuballocationType allocType,
8682  bool canMakeOtherLost,
8683  uint32_t strategy,
8684  VmaAllocationRequest* pAllocationRequest)
8685 {
8686  VMA_ASSERT(allocSize > 0);
8687  VMA_ASSERT(!upperAddress);
8688  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8689  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8690  VMA_HEAVY_ASSERT(Validate());
8691 
8692  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8693 
8694  // There is not enough total free space in this block to fullfill the request: Early return.
8695  if(canMakeOtherLost == false &&
8696  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8697  {
8698  return false;
8699  }
8700 
8701  // New algorithm, efficiently searching freeSuballocationsBySize.
8702  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8703  if(freeSuballocCount > 0)
8704  {
8706  {
8707  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8708  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8709  m_FreeSuballocationsBySize.data(),
8710  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8711  allocSize + 2 * VMA_DEBUG_MARGIN,
8712  VmaSuballocationItemSizeLess());
8713  size_t index = it - m_FreeSuballocationsBySize.data();
8714  for(; index < freeSuballocCount; ++index)
8715  {
8716  if(CheckAllocation(
8717  currentFrameIndex,
8718  frameInUseCount,
8719  bufferImageGranularity,
8720  allocSize,
8721  allocAlignment,
8722  allocType,
8723  m_FreeSuballocationsBySize[index],
8724  false, // canMakeOtherLost
8725  &pAllocationRequest->offset,
8726  &pAllocationRequest->itemsToMakeLostCount,
8727  &pAllocationRequest->sumFreeSize,
8728  &pAllocationRequest->sumItemSize))
8729  {
8730  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8731  return true;
8732  }
8733  }
8734  }
8735  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8736  {
8737  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8738  it != m_Suballocations.end();
8739  ++it)
8740  {
8741  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8742  currentFrameIndex,
8743  frameInUseCount,
8744  bufferImageGranularity,
8745  allocSize,
8746  allocAlignment,
8747  allocType,
8748  it,
8749  false, // canMakeOtherLost
8750  &pAllocationRequest->offset,
8751  &pAllocationRequest->itemsToMakeLostCount,
8752  &pAllocationRequest->sumFreeSize,
8753  &pAllocationRequest->sumItemSize))
8754  {
8755  pAllocationRequest->item = it;
8756  return true;
8757  }
8758  }
8759  }
8760  else // WORST_FIT, FIRST_FIT
8761  {
8762  // Search staring from biggest suballocations.
8763  for(size_t index = freeSuballocCount; index--; )
8764  {
8765  if(CheckAllocation(
8766  currentFrameIndex,
8767  frameInUseCount,
8768  bufferImageGranularity,
8769  allocSize,
8770  allocAlignment,
8771  allocType,
8772  m_FreeSuballocationsBySize[index],
8773  false, // canMakeOtherLost
8774  &pAllocationRequest->offset,
8775  &pAllocationRequest->itemsToMakeLostCount,
8776  &pAllocationRequest->sumFreeSize,
8777  &pAllocationRequest->sumItemSize))
8778  {
8779  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8780  return true;
8781  }
8782  }
8783  }
8784  }
8785 
8786  if(canMakeOtherLost)
8787  {
8788  // Brute-force algorithm. TODO: Come up with something better.
8789 
8790  bool found = false;
8791  VmaAllocationRequest tmpAllocRequest = {};
8792  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8793  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8794  suballocIt != m_Suballocations.end();
8795  ++suballocIt)
8796  {
8797  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8798  suballocIt->hAllocation->CanBecomeLost())
8799  {
8800  if(CheckAllocation(
8801  currentFrameIndex,
8802  frameInUseCount,
8803  bufferImageGranularity,
8804  allocSize,
8805  allocAlignment,
8806  allocType,
8807  suballocIt,
8808  canMakeOtherLost,
8809  &tmpAllocRequest.offset,
8810  &tmpAllocRequest.itemsToMakeLostCount,
8811  &tmpAllocRequest.sumFreeSize,
8812  &tmpAllocRequest.sumItemSize))
8813  {
8815  {
8816  *pAllocationRequest = tmpAllocRequest;
8817  pAllocationRequest->item = suballocIt;
8818  break;
8819  }
8820  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8821  {
8822  *pAllocationRequest = tmpAllocRequest;
8823  pAllocationRequest->item = suballocIt;
8824  found = true;
8825  }
8826  }
8827  }
8828  }
8829 
8830  return found;
8831  }
8832 
8833  return false;
8834 }
8835 
8836 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8837  uint32_t currentFrameIndex,
8838  uint32_t frameInUseCount,
8839  VmaAllocationRequest* pAllocationRequest)
8840 {
8841  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8842 
8843  while(pAllocationRequest->itemsToMakeLostCount > 0)
8844  {
8845  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8846  {
8847  ++pAllocationRequest->item;
8848  }
8849  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8850  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8851  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8852  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8853  {
8854  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8855  --pAllocationRequest->itemsToMakeLostCount;
8856  }
8857  else
8858  {
8859  return false;
8860  }
8861  }
8862 
8863  VMA_HEAVY_ASSERT(Validate());
8864  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8865  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8866 
8867  return true;
8868 }
8869 
8870 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8871 {
8872  uint32_t lostAllocationCount = 0;
8873  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8874  it != m_Suballocations.end();
8875  ++it)
8876  {
8877  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8878  it->hAllocation->CanBecomeLost() &&
8879  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8880  {
8881  it = FreeSuballocation(it);
8882  ++lostAllocationCount;
8883  }
8884  }
8885  return lostAllocationCount;
8886 }
8887 
8888 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8889 {
8890  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8891  it != m_Suballocations.end();
8892  ++it)
8893  {
8894  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8895  {
8896  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8897  {
8898  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8899  return VK_ERROR_VALIDATION_FAILED_EXT;
8900  }
8901  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8902  {
8903  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8904  return VK_ERROR_VALIDATION_FAILED_EXT;
8905  }
8906  }
8907  }
8908 
8909  return VK_SUCCESS;
8910 }
8911 
8912 void VmaBlockMetadata_Generic::Alloc(
8913  const VmaAllocationRequest& request,
8914  VmaSuballocationType type,
8915  VkDeviceSize allocSize,
8916  VmaAllocation hAllocation)
8917 {
8918  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8919  VMA_ASSERT(request.item != m_Suballocations.end());
8920  VmaSuballocation& suballoc = *request.item;
8921  // Given suballocation is a free block.
8922  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8923  // Given offset is inside this suballocation.
8924  VMA_ASSERT(request.offset >= suballoc.offset);
8925  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8926  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8927  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8928 
8929  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8930  // it to become used.
8931  UnregisterFreeSuballocation(request.item);
8932 
8933  suballoc.offset = request.offset;
8934  suballoc.size = allocSize;
8935  suballoc.type = type;
8936  suballoc.hAllocation = hAllocation;
8937 
8938  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8939  if(paddingEnd)
8940  {
8941  VmaSuballocation paddingSuballoc = {};
8942  paddingSuballoc.offset = request.offset + allocSize;
8943  paddingSuballoc.size = paddingEnd;
8944  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8945  VmaSuballocationList::iterator next = request.item;
8946  ++next;
8947  const VmaSuballocationList::iterator paddingEndItem =
8948  m_Suballocations.insert(next, paddingSuballoc);
8949  RegisterFreeSuballocation(paddingEndItem);
8950  }
8951 
8952  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8953  if(paddingBegin)
8954  {
8955  VmaSuballocation paddingSuballoc = {};
8956  paddingSuballoc.offset = request.offset - paddingBegin;
8957  paddingSuballoc.size = paddingBegin;
8958  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8959  const VmaSuballocationList::iterator paddingBeginItem =
8960  m_Suballocations.insert(request.item, paddingSuballoc);
8961  RegisterFreeSuballocation(paddingBeginItem);
8962  }
8963 
8964  // Update totals.
8965  m_FreeCount = m_FreeCount - 1;
8966  if(paddingBegin > 0)
8967  {
8968  ++m_FreeCount;
8969  }
8970  if(paddingEnd > 0)
8971  {
8972  ++m_FreeCount;
8973  }
8974  m_SumFreeSize -= allocSize;
8975 }
8976 
8977 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8978 {
8979  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8980  suballocItem != m_Suballocations.end();
8981  ++suballocItem)
8982  {
8983  VmaSuballocation& suballoc = *suballocItem;
8984  if(suballoc.hAllocation == allocation)
8985  {
8986  FreeSuballocation(suballocItem);
8987  VMA_HEAVY_ASSERT(Validate());
8988  return;
8989  }
8990  }
8991  VMA_ASSERT(0 && "Not found!");
8992 }
8993 
8994 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8995 {
8996  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8997  suballocItem != m_Suballocations.end();
8998  ++suballocItem)
8999  {
9000  VmaSuballocation& suballoc = *suballocItem;
9001  if(suballoc.offset == offset)
9002  {
9003  FreeSuballocation(suballocItem);
9004  return;
9005  }
9006  }
9007  VMA_ASSERT(0 && "Not found!");
9008 }
9009 
9010 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9011 {
9012  VkDeviceSize lastSize = 0;
9013  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9014  {
9015  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9016 
9017  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9018  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9019  VMA_VALIDATE(it->size >= lastSize);
9020  lastSize = it->size;
9021  }
9022  return true;
9023 }
9024 
9025 bool VmaBlockMetadata_Generic::CheckAllocation(
9026  uint32_t currentFrameIndex,
9027  uint32_t frameInUseCount,
9028  VkDeviceSize bufferImageGranularity,
9029  VkDeviceSize allocSize,
9030  VkDeviceSize allocAlignment,
9031  VmaSuballocationType allocType,
9032  VmaSuballocationList::const_iterator suballocItem,
9033  bool canMakeOtherLost,
9034  VkDeviceSize* pOffset,
9035  size_t* itemsToMakeLostCount,
9036  VkDeviceSize* pSumFreeSize,
9037  VkDeviceSize* pSumItemSize) const
9038 {
9039  VMA_ASSERT(allocSize > 0);
9040  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9041  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9042  VMA_ASSERT(pOffset != VMA_NULL);
9043 
9044  *itemsToMakeLostCount = 0;
9045  *pSumFreeSize = 0;
9046  *pSumItemSize = 0;
9047 
9048  if(canMakeOtherLost)
9049  {
9050  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9051  {
9052  *pSumFreeSize = suballocItem->size;
9053  }
9054  else
9055  {
9056  if(suballocItem->hAllocation->CanBecomeLost() &&
9057  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9058  {
9059  ++*itemsToMakeLostCount;
9060  *pSumItemSize = suballocItem->size;
9061  }
9062  else
9063  {
9064  return false;
9065  }
9066  }
9067 
9068  // Remaining size is too small for this request: Early return.
9069  if(GetSize() - suballocItem->offset < allocSize)
9070  {
9071  return false;
9072  }
9073 
9074  // Start from offset equal to beginning of this suballocation.
9075  *pOffset = suballocItem->offset;
9076 
9077  // Apply VMA_DEBUG_MARGIN at the beginning.
9078  if(VMA_DEBUG_MARGIN > 0)
9079  {
9080  *pOffset += VMA_DEBUG_MARGIN;
9081  }
9082 
9083  // Apply alignment.
9084  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9085 
9086  // Check previous suballocations for BufferImageGranularity conflicts.
9087  // Make bigger alignment if necessary.
9088  if(bufferImageGranularity > 1)
9089  {
9090  bool bufferImageGranularityConflict = false;
9091  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9092  while(prevSuballocItem != m_Suballocations.cbegin())
9093  {
9094  --prevSuballocItem;
9095  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9096  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9097  {
9098  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9099  {
9100  bufferImageGranularityConflict = true;
9101  break;
9102  }
9103  }
9104  else
9105  // Already on previous page.
9106  break;
9107  }
9108  if(bufferImageGranularityConflict)
9109  {
9110  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9111  }
9112  }
9113 
9114  // Now that we have final *pOffset, check if we are past suballocItem.
9115  // If yes, return false - this function should be called for another suballocItem as starting point.
9116  if(*pOffset >= suballocItem->offset + suballocItem->size)
9117  {
9118  return false;
9119  }
9120 
9121  // Calculate padding at the beginning based on current offset.
9122  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9123 
9124  // Calculate required margin at the end.
9125  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9126 
9127  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9128  // Another early return check.
9129  if(suballocItem->offset + totalSize > GetSize())
9130  {
9131  return false;
9132  }
9133 
9134  // Advance lastSuballocItem until desired size is reached.
9135  // Update itemsToMakeLostCount.
9136  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9137  if(totalSize > suballocItem->size)
9138  {
9139  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9140  while(remainingSize > 0)
9141  {
9142  ++lastSuballocItem;
9143  if(lastSuballocItem == m_Suballocations.cend())
9144  {
9145  return false;
9146  }
9147  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9148  {
9149  *pSumFreeSize += lastSuballocItem->size;
9150  }
9151  else
9152  {
9153  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9154  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9155  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9156  {
9157  ++*itemsToMakeLostCount;
9158  *pSumItemSize += lastSuballocItem->size;
9159  }
9160  else
9161  {
9162  return false;
9163  }
9164  }
9165  remainingSize = (lastSuballocItem->size < remainingSize) ?
9166  remainingSize - lastSuballocItem->size : 0;
9167  }
9168  }
9169 
9170  // Check next suballocations for BufferImageGranularity conflicts.
9171  // If conflict exists, we must mark more allocations lost or fail.
9172  if(bufferImageGranularity > 1)
9173  {
9174  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9175  ++nextSuballocItem;
9176  while(nextSuballocItem != m_Suballocations.cend())
9177  {
9178  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9179  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9180  {
9181  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9182  {
9183  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9184  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9185  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9186  {
9187  ++*itemsToMakeLostCount;
9188  }
9189  else
9190  {
9191  return false;
9192  }
9193  }
9194  }
9195  else
9196  {
9197  // Already on next page.
9198  break;
9199  }
9200  ++nextSuballocItem;
9201  }
9202  }
9203  }
9204  else
9205  {
9206  const VmaSuballocation& suballoc = *suballocItem;
9207  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9208 
9209  *pSumFreeSize = suballoc.size;
9210 
9211  // Size of this suballocation is too small for this request: Early return.
9212  if(suballoc.size < allocSize)
9213  {
9214  return false;
9215  }
9216 
9217  // Start from offset equal to beginning of this suballocation.
9218  *pOffset = suballoc.offset;
9219 
9220  // Apply VMA_DEBUG_MARGIN at the beginning.
9221  if(VMA_DEBUG_MARGIN > 0)
9222  {
9223  *pOffset += VMA_DEBUG_MARGIN;
9224  }
9225 
9226  // Apply alignment.
9227  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9228 
9229  // Check previous suballocations for BufferImageGranularity conflicts.
9230  // Make bigger alignment if necessary.
9231  if(bufferImageGranularity > 1)
9232  {
9233  bool bufferImageGranularityConflict = false;
9234  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9235  while(prevSuballocItem != m_Suballocations.cbegin())
9236  {
9237  --prevSuballocItem;
9238  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9239  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9240  {
9241  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9242  {
9243  bufferImageGranularityConflict = true;
9244  break;
9245  }
9246  }
9247  else
9248  // Already on previous page.
9249  break;
9250  }
9251  if(bufferImageGranularityConflict)
9252  {
9253  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9254  }
9255  }
9256 
9257  // Calculate padding at the beginning based on current offset.
9258  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9259 
9260  // Calculate required margin at the end.
9261  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9262 
9263  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9264  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9265  {
9266  return false;
9267  }
9268 
9269  // Check next suballocations for BufferImageGranularity conflicts.
9270  // If conflict exists, allocation cannot be made here.
9271  if(bufferImageGranularity > 1)
9272  {
9273  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9274  ++nextSuballocItem;
9275  while(nextSuballocItem != m_Suballocations.cend())
9276  {
9277  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9278  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9279  {
9280  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9281  {
9282  return false;
9283  }
9284  }
9285  else
9286  {
9287  // Already on next page.
9288  break;
9289  }
9290  ++nextSuballocItem;
9291  }
9292  }
9293  }
9294 
9295  // All tests passed: Success. pOffset is already filled.
9296  return true;
9297 }
9298 
9299 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9300 {
9301  VMA_ASSERT(item != m_Suballocations.end());
9302  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9303 
9304  VmaSuballocationList::iterator nextItem = item;
9305  ++nextItem;
9306  VMA_ASSERT(nextItem != m_Suballocations.end());
9307  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9308 
9309  item->size += nextItem->size;
9310  --m_FreeCount;
9311  m_Suballocations.erase(nextItem);
9312 }
9313 
9314 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9315 {
9316  // Change this suballocation to be marked as free.
9317  VmaSuballocation& suballoc = *suballocItem;
9318  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9319  suballoc.hAllocation = VK_NULL_HANDLE;
9320 
9321  // Update totals.
9322  ++m_FreeCount;
9323  m_SumFreeSize += suballoc.size;
9324 
9325  // Merge with previous and/or next suballocation if it's also free.
9326  bool mergeWithNext = false;
9327  bool mergeWithPrev = false;
9328 
9329  VmaSuballocationList::iterator nextItem = suballocItem;
9330  ++nextItem;
9331  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9332  {
9333  mergeWithNext = true;
9334  }
9335 
9336  VmaSuballocationList::iterator prevItem = suballocItem;
9337  if(suballocItem != m_Suballocations.begin())
9338  {
9339  --prevItem;
9340  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9341  {
9342  mergeWithPrev = true;
9343  }
9344  }
9345 
9346  if(mergeWithNext)
9347  {
9348  UnregisterFreeSuballocation(nextItem);
9349  MergeFreeWithNext(suballocItem);
9350  }
9351 
9352  if(mergeWithPrev)
9353  {
9354  UnregisterFreeSuballocation(prevItem);
9355  MergeFreeWithNext(prevItem);
9356  RegisterFreeSuballocation(prevItem);
9357  return prevItem;
9358  }
9359  else
9360  {
9361  RegisterFreeSuballocation(suballocItem);
9362  return suballocItem;
9363  }
9364 }
9365 
9366 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9367 {
9368  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9369  VMA_ASSERT(item->size > 0);
9370 
9371  // You may want to enable this validation at the beginning or at the end of
9372  // this function, depending on what do you want to check.
9373  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9374 
9375  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9376  {
9377  if(m_FreeSuballocationsBySize.empty())
9378  {
9379  m_FreeSuballocationsBySize.push_back(item);
9380  }
9381  else
9382  {
9383  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9384  }
9385  }
9386 
9387  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9388 }
9389 
9390 
9391 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9392 {
9393  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9394  VMA_ASSERT(item->size > 0);
9395 
9396  // You may want to enable this validation at the beginning or at the end of
9397  // this function, depending on what do you want to check.
9398  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9399 
9400  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9401  {
9402  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9403  m_FreeSuballocationsBySize.data(),
9404  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9405  item,
9406  VmaSuballocationItemSizeLess());
9407  for(size_t index = it - m_FreeSuballocationsBySize.data();
9408  index < m_FreeSuballocationsBySize.size();
9409  ++index)
9410  {
9411  if(m_FreeSuballocationsBySize[index] == item)
9412  {
9413  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9414  return;
9415  }
9416  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9417  }
9418  VMA_ASSERT(0 && "Not found.");
9419  }
9420 
9421  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9422 }
9423 
9424 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9425  VkDeviceSize bufferImageGranularity,
9426  VmaSuballocationType& inOutPrevSuballocType) const
9427 {
9428  if(bufferImageGranularity == 1 || IsEmpty())
9429  {
9430  return false;
9431  }
9432 
9433  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9434  bool typeConflictFound = false;
9435  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9436  it != m_Suballocations.cend();
9437  ++it)
9438  {
9439  const VmaSuballocationType suballocType = it->type;
9440  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9441  {
9442  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9443  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9444  {
9445  typeConflictFound = true;
9446  }
9447  inOutPrevSuballocType = suballocType;
9448  }
9449  }
9450 
9451  return typeConflictFound || minAlignment >= bufferImageGranularity;
9452 }
9453 
9455 // class VmaBlockMetadata_Linear
9456 
9457 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9458  VmaBlockMetadata(hAllocator),
9459  m_SumFreeSize(0),
9460  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9461  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9462  m_1stVectorIndex(0),
9463  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9464  m_1stNullItemsBeginCount(0),
9465  m_1stNullItemsMiddleCount(0),
9466  m_2ndNullItemsCount(0)
9467 {
9468 }
9469 
9470 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9471 {
9472 }
9473 
9474 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9475 {
9476  VmaBlockMetadata::Init(size);
9477  m_SumFreeSize = size;
9478 }
9479 
9480 bool VmaBlockMetadata_Linear::Validate() const
9481 {
9482  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9483  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9484 
9485  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9486  VMA_VALIDATE(!suballocations1st.empty() ||
9487  suballocations2nd.empty() ||
9488  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9489 
9490  if(!suballocations1st.empty())
9491  {
9492  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9493  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9494  // Null item at the end should be just pop_back().
9495  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9496  }
9497  if(!suballocations2nd.empty())
9498  {
9499  // Null item at the end should be just pop_back().
9500  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9501  }
9502 
9503  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9504  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9505 
9506  VkDeviceSize sumUsedSize = 0;
9507  const size_t suballoc1stCount = suballocations1st.size();
9508  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9509 
9510  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9511  {
9512  const size_t suballoc2ndCount = suballocations2nd.size();
9513  size_t nullItem2ndCount = 0;
9514  for(size_t i = 0; i < suballoc2ndCount; ++i)
9515  {
9516  const VmaSuballocation& suballoc = suballocations2nd[i];
9517  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9518 
9519  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9520  VMA_VALIDATE(suballoc.offset >= offset);
9521 
9522  if(!currFree)
9523  {
9524  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9525  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9526  sumUsedSize += suballoc.size;
9527  }
9528  else
9529  {
9530  ++nullItem2ndCount;
9531  }
9532 
9533  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9534  }
9535 
9536  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9537  }
9538 
9539  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9540  {
9541  const VmaSuballocation& suballoc = suballocations1st[i];
9542  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9543  suballoc.hAllocation == VK_NULL_HANDLE);
9544  }
9545 
9546  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9547 
9548  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9549  {
9550  const VmaSuballocation& suballoc = suballocations1st[i];
9551  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9552 
9553  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9554  VMA_VALIDATE(suballoc.offset >= offset);
9555  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9556 
9557  if(!currFree)
9558  {
9559  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9560  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9561  sumUsedSize += suballoc.size;
9562  }
9563  else
9564  {
9565  ++nullItem1stCount;
9566  }
9567 
9568  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9569  }
9570  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9571 
9572  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9573  {
9574  const size_t suballoc2ndCount = suballocations2nd.size();
9575  size_t nullItem2ndCount = 0;
9576  for(size_t i = suballoc2ndCount; i--; )
9577  {
9578  const VmaSuballocation& suballoc = suballocations2nd[i];
9579  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9580 
9581  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9582  VMA_VALIDATE(suballoc.offset >= offset);
9583 
9584  if(!currFree)
9585  {
9586  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9587  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9588  sumUsedSize += suballoc.size;
9589  }
9590  else
9591  {
9592  ++nullItem2ndCount;
9593  }
9594 
9595  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9596  }
9597 
9598  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9599  }
9600 
9601  VMA_VALIDATE(offset <= GetSize());
9602  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9603 
9604  return true;
9605 }
9606 
9607 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9608 {
9609  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9610  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9611 }
9612 
9613 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9614 {
9615  const VkDeviceSize size = GetSize();
9616 
9617  /*
9618  We don't consider gaps inside allocation vectors with freed allocations because
9619  they are not suitable for reuse in linear allocator. We consider only space that
9620  is available for new allocations.
9621  */
9622  if(IsEmpty())
9623  {
9624  return size;
9625  }
9626 
9627  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9628 
9629  switch(m_2ndVectorMode)
9630  {
9631  case SECOND_VECTOR_EMPTY:
9632  /*
9633  Available space is after end of 1st, as well as before beginning of 1st (which
9634  whould make it a ring buffer).
9635  */
9636  {
9637  const size_t suballocations1stCount = suballocations1st.size();
9638  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9639  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9640  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9641  return VMA_MAX(
9642  firstSuballoc.offset,
9643  size - (lastSuballoc.offset + lastSuballoc.size));
9644  }
9645  break;
9646 
9647  case SECOND_VECTOR_RING_BUFFER:
9648  /*
9649  Available space is only between end of 2nd and beginning of 1st.
9650  */
9651  {
9652  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9653  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9654  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9655  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9656  }
9657  break;
9658 
9659  case SECOND_VECTOR_DOUBLE_STACK:
9660  /*
9661  Available space is only between end of 1st and top of 2nd.
9662  */
9663  {
9664  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9665  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9666  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9667  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9668  }
9669  break;
9670 
9671  default:
9672  VMA_ASSERT(0);
9673  return 0;
9674  }
9675 }
9676 
9677 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9678 {
9679  const VkDeviceSize size = GetSize();
9680  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9681  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9682  const size_t suballoc1stCount = suballocations1st.size();
9683  const size_t suballoc2ndCount = suballocations2nd.size();
9684 
9685  outInfo.blockCount = 1;
9686  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9687  outInfo.unusedRangeCount = 0;
9688  outInfo.usedBytes = 0;
9689  outInfo.allocationSizeMin = UINT64_MAX;
9690  outInfo.allocationSizeMax = 0;
9691  outInfo.unusedRangeSizeMin = UINT64_MAX;
9692  outInfo.unusedRangeSizeMax = 0;
9693 
9694  VkDeviceSize lastOffset = 0;
9695 
9696  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9697  {
9698  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9699  size_t nextAlloc2ndIndex = 0;
9700  while(lastOffset < freeSpace2ndTo1stEnd)
9701  {
9702  // Find next non-null allocation or move nextAllocIndex to the end.
9703  while(nextAlloc2ndIndex < suballoc2ndCount &&
9704  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9705  {
9706  ++nextAlloc2ndIndex;
9707  }
9708 
9709  // Found non-null allocation.
9710  if(nextAlloc2ndIndex < suballoc2ndCount)
9711  {
9712  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9713 
9714  // 1. Process free space before this allocation.
9715  if(lastOffset < suballoc.offset)
9716  {
9717  // There is free space from lastOffset to suballoc.offset.
9718  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9719  ++outInfo.unusedRangeCount;
9720  outInfo.unusedBytes += unusedRangeSize;
9721  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9722  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9723  }
9724 
9725  // 2. Process this allocation.
9726  // There is allocation with suballoc.offset, suballoc.size.
9727  outInfo.usedBytes += suballoc.size;
9728  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9729  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9730 
9731  // 3. Prepare for next iteration.
9732  lastOffset = suballoc.offset + suballoc.size;
9733  ++nextAlloc2ndIndex;
9734  }
9735  // We are at the end.
9736  else
9737  {
9738  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9739  if(lastOffset < freeSpace2ndTo1stEnd)
9740  {
9741  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9742  ++outInfo.unusedRangeCount;
9743  outInfo.unusedBytes += unusedRangeSize;
9744  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9745  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9746  }
9747 
9748  // End of loop.
9749  lastOffset = freeSpace2ndTo1stEnd;
9750  }
9751  }
9752  }
9753 
9754  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9755  const VkDeviceSize freeSpace1stTo2ndEnd =
9756  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9757  while(lastOffset < freeSpace1stTo2ndEnd)
9758  {
9759  // Find next non-null allocation or move nextAllocIndex to the end.
9760  while(nextAlloc1stIndex < suballoc1stCount &&
9761  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9762  {
9763  ++nextAlloc1stIndex;
9764  }
9765 
9766  // Found non-null allocation.
9767  if(nextAlloc1stIndex < suballoc1stCount)
9768  {
9769  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9770 
9771  // 1. Process free space before this allocation.
9772  if(lastOffset < suballoc.offset)
9773  {
9774  // There is free space from lastOffset to suballoc.offset.
9775  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9776  ++outInfo.unusedRangeCount;
9777  outInfo.unusedBytes += unusedRangeSize;
9778  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9779  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9780  }
9781 
9782  // 2. Process this allocation.
9783  // There is allocation with suballoc.offset, suballoc.size.
9784  outInfo.usedBytes += suballoc.size;
9785  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9786  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9787 
9788  // 3. Prepare for next iteration.
9789  lastOffset = suballoc.offset + suballoc.size;
9790  ++nextAlloc1stIndex;
9791  }
9792  // We are at the end.
9793  else
9794  {
9795  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9796  if(lastOffset < freeSpace1stTo2ndEnd)
9797  {
9798  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9799  ++outInfo.unusedRangeCount;
9800  outInfo.unusedBytes += unusedRangeSize;
9801  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9802  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9803  }
9804 
9805  // End of loop.
9806  lastOffset = freeSpace1stTo2ndEnd;
9807  }
9808  }
9809 
9810  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9811  {
9812  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9813  while(lastOffset < size)
9814  {
9815  // Find next non-null allocation or move nextAllocIndex to the end.
9816  while(nextAlloc2ndIndex != SIZE_MAX &&
9817  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9818  {
9819  --nextAlloc2ndIndex;
9820  }
9821 
9822  // Found non-null allocation.
9823  if(nextAlloc2ndIndex != SIZE_MAX)
9824  {
9825  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9826 
9827  // 1. Process free space before this allocation.
9828  if(lastOffset < suballoc.offset)
9829  {
9830  // There is free space from lastOffset to suballoc.offset.
9831  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9832  ++outInfo.unusedRangeCount;
9833  outInfo.unusedBytes += unusedRangeSize;
9834  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9835  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9836  }
9837 
9838  // 2. Process this allocation.
9839  // There is allocation with suballoc.offset, suballoc.size.
9840  outInfo.usedBytes += suballoc.size;
9841  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9842  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9843 
9844  // 3. Prepare for next iteration.
9845  lastOffset = suballoc.offset + suballoc.size;
9846  --nextAlloc2ndIndex;
9847  }
9848  // We are at the end.
9849  else
9850  {
9851  // There is free space from lastOffset to size.
9852  if(lastOffset < size)
9853  {
9854  const VkDeviceSize unusedRangeSize = size - lastOffset;
9855  ++outInfo.unusedRangeCount;
9856  outInfo.unusedBytes += unusedRangeSize;
9857  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9858  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9859  }
9860 
9861  // End of loop.
9862  lastOffset = size;
9863  }
9864  }
9865  }
9866 
9867  outInfo.unusedBytes = size - outInfo.usedBytes;
9868 }
9869 
9870 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9871 {
9872  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9873  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9874  const VkDeviceSize size = GetSize();
9875  const size_t suballoc1stCount = suballocations1st.size();
9876  const size_t suballoc2ndCount = suballocations2nd.size();
9877 
9878  inoutStats.size += size;
9879 
9880  VkDeviceSize lastOffset = 0;
9881 
9882  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9883  {
9884  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9885  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9886  while(lastOffset < freeSpace2ndTo1stEnd)
9887  {
9888  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9889  while(nextAlloc2ndIndex < suballoc2ndCount &&
9890  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9891  {
9892  ++nextAlloc2ndIndex;
9893  }
9894 
9895  // Found non-null allocation.
9896  if(nextAlloc2ndIndex < suballoc2ndCount)
9897  {
9898  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9899 
9900  // 1. Process free space before this allocation.
9901  if(lastOffset < suballoc.offset)
9902  {
9903  // There is free space from lastOffset to suballoc.offset.
9904  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9905  inoutStats.unusedSize += unusedRangeSize;
9906  ++inoutStats.unusedRangeCount;
9907  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9908  }
9909 
9910  // 2. Process this allocation.
9911  // There is allocation with suballoc.offset, suballoc.size.
9912  ++inoutStats.allocationCount;
9913 
9914  // 3. Prepare for next iteration.
9915  lastOffset = suballoc.offset + suballoc.size;
9916  ++nextAlloc2ndIndex;
9917  }
9918  // We are at the end.
9919  else
9920  {
9921  if(lastOffset < freeSpace2ndTo1stEnd)
9922  {
9923  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9924  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9925  inoutStats.unusedSize += unusedRangeSize;
9926  ++inoutStats.unusedRangeCount;
9927  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9928  }
9929 
9930  // End of loop.
9931  lastOffset = freeSpace2ndTo1stEnd;
9932  }
9933  }
9934  }
9935 
9936  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9937  const VkDeviceSize freeSpace1stTo2ndEnd =
9938  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9939  while(lastOffset < freeSpace1stTo2ndEnd)
9940  {
9941  // Find next non-null allocation or move nextAllocIndex to the end.
9942  while(nextAlloc1stIndex < suballoc1stCount &&
9943  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9944  {
9945  ++nextAlloc1stIndex;
9946  }
9947 
9948  // Found non-null allocation.
9949  if(nextAlloc1stIndex < suballoc1stCount)
9950  {
9951  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9952 
9953  // 1. Process free space before this allocation.
9954  if(lastOffset < suballoc.offset)
9955  {
9956  // There is free space from lastOffset to suballoc.offset.
9957  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9958  inoutStats.unusedSize += unusedRangeSize;
9959  ++inoutStats.unusedRangeCount;
9960  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9961  }
9962 
9963  // 2. Process this allocation.
9964  // There is allocation with suballoc.offset, suballoc.size.
9965  ++inoutStats.allocationCount;
9966 
9967  // 3. Prepare for next iteration.
9968  lastOffset = suballoc.offset + suballoc.size;
9969  ++nextAlloc1stIndex;
9970  }
9971  // We are at the end.
9972  else
9973  {
9974  if(lastOffset < freeSpace1stTo2ndEnd)
9975  {
9976  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9977  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9978  inoutStats.unusedSize += unusedRangeSize;
9979  ++inoutStats.unusedRangeCount;
9980  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9981  }
9982 
9983  // End of loop.
9984  lastOffset = freeSpace1stTo2ndEnd;
9985  }
9986  }
9987 
9988  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9989  {
9990  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9991  while(lastOffset < size)
9992  {
9993  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9994  while(nextAlloc2ndIndex != SIZE_MAX &&
9995  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9996  {
9997  --nextAlloc2ndIndex;
9998  }
9999 
10000  // Found non-null allocation.
10001  if(nextAlloc2ndIndex != SIZE_MAX)
10002  {
10003  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10004 
10005  // 1. Process free space before this allocation.
10006  if(lastOffset < suballoc.offset)
10007  {
10008  // There is free space from lastOffset to suballoc.offset.
10009  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10010  inoutStats.unusedSize += unusedRangeSize;
10011  ++inoutStats.unusedRangeCount;
10012  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10013  }
10014 
10015  // 2. Process this allocation.
10016  // There is allocation with suballoc.offset, suballoc.size.
10017  ++inoutStats.allocationCount;
10018 
10019  // 3. Prepare for next iteration.
10020  lastOffset = suballoc.offset + suballoc.size;
10021  --nextAlloc2ndIndex;
10022  }
10023  // We are at the end.
10024  else
10025  {
10026  if(lastOffset < size)
10027  {
10028  // There is free space from lastOffset to size.
10029  const VkDeviceSize unusedRangeSize = size - lastOffset;
10030  inoutStats.unusedSize += unusedRangeSize;
10031  ++inoutStats.unusedRangeCount;
10032  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10033  }
10034 
10035  // End of loop.
10036  lastOffset = size;
10037  }
10038  }
10039  }
10040 }
10041 
10042 #if VMA_STATS_STRING_ENABLED
10043 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10044 {
10045  const VkDeviceSize size = GetSize();
10046  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10047  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10048  const size_t suballoc1stCount = suballocations1st.size();
10049  const size_t suballoc2ndCount = suballocations2nd.size();
10050 
10051  // FIRST PASS
10052 
10053  size_t unusedRangeCount = 0;
10054  VkDeviceSize usedBytes = 0;
10055 
10056  VkDeviceSize lastOffset = 0;
10057 
10058  size_t alloc2ndCount = 0;
10059  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10060  {
10061  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10062  size_t nextAlloc2ndIndex = 0;
10063  while(lastOffset < freeSpace2ndTo1stEnd)
10064  {
10065  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10066  while(nextAlloc2ndIndex < suballoc2ndCount &&
10067  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10068  {
10069  ++nextAlloc2ndIndex;
10070  }
10071 
10072  // Found non-null allocation.
10073  if(nextAlloc2ndIndex < suballoc2ndCount)
10074  {
10075  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10076 
10077  // 1. Process free space before this allocation.
10078  if(lastOffset < suballoc.offset)
10079  {
10080  // There is free space from lastOffset to suballoc.offset.
10081  ++unusedRangeCount;
10082  }
10083 
10084  // 2. Process this allocation.
10085  // There is allocation with suballoc.offset, suballoc.size.
10086  ++alloc2ndCount;
10087  usedBytes += suballoc.size;
10088 
10089  // 3. Prepare for next iteration.
10090  lastOffset = suballoc.offset + suballoc.size;
10091  ++nextAlloc2ndIndex;
10092  }
10093  // We are at the end.
10094  else
10095  {
10096  if(lastOffset < freeSpace2ndTo1stEnd)
10097  {
10098  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10099  ++unusedRangeCount;
10100  }
10101 
10102  // End of loop.
10103  lastOffset = freeSpace2ndTo1stEnd;
10104  }
10105  }
10106  }
10107 
10108  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10109  size_t alloc1stCount = 0;
10110  const VkDeviceSize freeSpace1stTo2ndEnd =
10111  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10112  while(lastOffset < freeSpace1stTo2ndEnd)
10113  {
10114  // Find next non-null allocation or move nextAllocIndex to the end.
10115  while(nextAlloc1stIndex < suballoc1stCount &&
10116  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10117  {
10118  ++nextAlloc1stIndex;
10119  }
10120 
10121  // Found non-null allocation.
10122  if(nextAlloc1stIndex < suballoc1stCount)
10123  {
10124  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10125 
10126  // 1. Process free space before this allocation.
10127  if(lastOffset < suballoc.offset)
10128  {
10129  // There is free space from lastOffset to suballoc.offset.
10130  ++unusedRangeCount;
10131  }
10132 
10133  // 2. Process this allocation.
10134  // There is allocation with suballoc.offset, suballoc.size.
10135  ++alloc1stCount;
10136  usedBytes += suballoc.size;
10137 
10138  // 3. Prepare for next iteration.
10139  lastOffset = suballoc.offset + suballoc.size;
10140  ++nextAlloc1stIndex;
10141  }
10142  // We are at the end.
10143  else
10144  {
10145  if(lastOffset < size)
10146  {
10147  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10148  ++unusedRangeCount;
10149  }
10150 
10151  // End of loop.
10152  lastOffset = freeSpace1stTo2ndEnd;
10153  }
10154  }
10155 
10156  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10157  {
10158  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10159  while(lastOffset < size)
10160  {
10161  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10162  while(nextAlloc2ndIndex != SIZE_MAX &&
10163  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10164  {
10165  --nextAlloc2ndIndex;
10166  }
10167 
10168  // Found non-null allocation.
10169  if(nextAlloc2ndIndex != SIZE_MAX)
10170  {
10171  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10172 
10173  // 1. Process free space before this allocation.
10174  if(lastOffset < suballoc.offset)
10175  {
10176  // There is free space from lastOffset to suballoc.offset.
10177  ++unusedRangeCount;
10178  }
10179 
10180  // 2. Process this allocation.
10181  // There is allocation with suballoc.offset, suballoc.size.
10182  ++alloc2ndCount;
10183  usedBytes += suballoc.size;
10184 
10185  // 3. Prepare for next iteration.
10186  lastOffset = suballoc.offset + suballoc.size;
10187  --nextAlloc2ndIndex;
10188  }
10189  // We are at the end.
10190  else
10191  {
10192  if(lastOffset < size)
10193  {
10194  // There is free space from lastOffset to size.
10195  ++unusedRangeCount;
10196  }
10197 
10198  // End of loop.
10199  lastOffset = size;
10200  }
10201  }
10202  }
10203 
10204  const VkDeviceSize unusedBytes = size - usedBytes;
10205  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10206 
10207  // SECOND PASS
10208  lastOffset = 0;
10209 
10210  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10211  {
10212  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10213  size_t nextAlloc2ndIndex = 0;
10214  while(lastOffset < freeSpace2ndTo1stEnd)
10215  {
10216  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10217  while(nextAlloc2ndIndex < suballoc2ndCount &&
10218  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10219  {
10220  ++nextAlloc2ndIndex;
10221  }
10222 
10223  // Found non-null allocation.
10224  if(nextAlloc2ndIndex < suballoc2ndCount)
10225  {
10226  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10227 
10228  // 1. Process free space before this allocation.
10229  if(lastOffset < suballoc.offset)
10230  {
10231  // There is free space from lastOffset to suballoc.offset.
10232  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10233  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10234  }
10235 
10236  // 2. Process this allocation.
10237  // There is allocation with suballoc.offset, suballoc.size.
10238  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10239 
10240  // 3. Prepare for next iteration.
10241  lastOffset = suballoc.offset + suballoc.size;
10242  ++nextAlloc2ndIndex;
10243  }
10244  // We are at the end.
10245  else
10246  {
10247  if(lastOffset < freeSpace2ndTo1stEnd)
10248  {
10249  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10250  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10251  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10252  }
10253 
10254  // End of loop.
10255  lastOffset = freeSpace2ndTo1stEnd;
10256  }
10257  }
10258  }
10259 
10260  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10261  while(lastOffset < freeSpace1stTo2ndEnd)
10262  {
10263  // Find next non-null allocation or move nextAllocIndex to the end.
10264  while(nextAlloc1stIndex < suballoc1stCount &&
10265  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10266  {
10267  ++nextAlloc1stIndex;
10268  }
10269 
10270  // Found non-null allocation.
10271  if(nextAlloc1stIndex < suballoc1stCount)
10272  {
10273  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10274 
10275  // 1. Process free space before this allocation.
10276  if(lastOffset < suballoc.offset)
10277  {
10278  // There is free space from lastOffset to suballoc.offset.
10279  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10280  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10281  }
10282 
10283  // 2. Process this allocation.
10284  // There is allocation with suballoc.offset, suballoc.size.
10285  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10286 
10287  // 3. Prepare for next iteration.
10288  lastOffset = suballoc.offset + suballoc.size;
10289  ++nextAlloc1stIndex;
10290  }
10291  // We are at the end.
10292  else
10293  {
10294  if(lastOffset < freeSpace1stTo2ndEnd)
10295  {
10296  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10297  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10298  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10299  }
10300 
10301  // End of loop.
10302  lastOffset = freeSpace1stTo2ndEnd;
10303  }
10304  }
10305 
10306  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10307  {
10308  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10309  while(lastOffset < size)
10310  {
10311  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10312  while(nextAlloc2ndIndex != SIZE_MAX &&
10313  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10314  {
10315  --nextAlloc2ndIndex;
10316  }
10317 
10318  // Found non-null allocation.
10319  if(nextAlloc2ndIndex != SIZE_MAX)
10320  {
10321  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10322 
10323  // 1. Process free space before this allocation.
10324  if(lastOffset < suballoc.offset)
10325  {
10326  // There is free space from lastOffset to suballoc.offset.
10327  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10328  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10329  }
10330 
10331  // 2. Process this allocation.
10332  // There is allocation with suballoc.offset, suballoc.size.
10333  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10334 
10335  // 3. Prepare for next iteration.
10336  lastOffset = suballoc.offset + suballoc.size;
10337  --nextAlloc2ndIndex;
10338  }
10339  // We are at the end.
10340  else
10341  {
10342  if(lastOffset < size)
10343  {
10344  // There is free space from lastOffset to size.
10345  const VkDeviceSize unusedRangeSize = size - lastOffset;
10346  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10347  }
10348 
10349  // End of loop.
10350  lastOffset = size;
10351  }
10352  }
10353  }
10354 
10355  PrintDetailedMap_End(json);
10356 }
10357 #endif // #if VMA_STATS_STRING_ENABLED
10358 
10359 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10360  uint32_t currentFrameIndex,
10361  uint32_t frameInUseCount,
10362  VkDeviceSize bufferImageGranularity,
10363  VkDeviceSize allocSize,
10364  VkDeviceSize allocAlignment,
10365  bool upperAddress,
10366  VmaSuballocationType allocType,
10367  bool canMakeOtherLost,
10368  uint32_t strategy,
10369  VmaAllocationRequest* pAllocationRequest)
10370 {
10371  VMA_ASSERT(allocSize > 0);
10372  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10373  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10374  VMA_HEAVY_ASSERT(Validate());
10375  return upperAddress ?
10376  CreateAllocationRequest_UpperAddress(
10377  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10378  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10379  CreateAllocationRequest_LowerAddress(
10380  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10381  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10382 }
10383 
10384 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10385  uint32_t currentFrameIndex,
10386  uint32_t frameInUseCount,
10387  VkDeviceSize bufferImageGranularity,
10388  VkDeviceSize allocSize,
10389  VkDeviceSize allocAlignment,
10390  VmaSuballocationType allocType,
10391  bool canMakeOtherLost,
10392  uint32_t strategy,
10393  VmaAllocationRequest* pAllocationRequest)
10394 {
10395  const VkDeviceSize size = GetSize();
10396  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10397  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10398 
10399  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10400  {
10401  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10402  return false;
10403  }
10404 
10405  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10406  if(allocSize > size)
10407  {
10408  return false;
10409  }
10410  VkDeviceSize resultBaseOffset = size - allocSize;
10411  if(!suballocations2nd.empty())
10412  {
10413  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10414  resultBaseOffset = lastSuballoc.offset - allocSize;
10415  if(allocSize > lastSuballoc.offset)
10416  {
10417  return false;
10418  }
10419  }
10420 
10421  // Start from offset equal to end of free space.
10422  VkDeviceSize resultOffset = resultBaseOffset;
10423 
10424  // Apply VMA_DEBUG_MARGIN at the end.
10425  if(VMA_DEBUG_MARGIN > 0)
10426  {
10427  if(resultOffset < VMA_DEBUG_MARGIN)
10428  {
10429  return false;
10430  }
10431  resultOffset -= VMA_DEBUG_MARGIN;
10432  }
10433 
10434  // Apply alignment.
10435  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10436 
10437  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10438  // Make bigger alignment if necessary.
10439  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10440  {
10441  bool bufferImageGranularityConflict = false;
10442  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10443  {
10444  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10445  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10446  {
10447  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10448  {
10449  bufferImageGranularityConflict = true;
10450  break;
10451  }
10452  }
10453  else
10454  // Already on previous page.
10455  break;
10456  }
10457  if(bufferImageGranularityConflict)
10458  {
10459  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10460  }
10461  }
10462 
10463  // There is enough free space.
10464  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10465  suballocations1st.back().offset + suballocations1st.back().size :
10466  0;
10467  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10468  {
10469  // Check previous suballocations for BufferImageGranularity conflicts.
10470  // If conflict exists, allocation cannot be made here.
10471  if(bufferImageGranularity > 1)
10472  {
10473  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10474  {
10475  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10476  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10477  {
10478  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10479  {
10480  return false;
10481  }
10482  }
10483  else
10484  {
10485  // Already on next page.
10486  break;
10487  }
10488  }
10489  }
10490 
10491  // All tests passed: Success.
10492  pAllocationRequest->offset = resultOffset;
10493  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10494  pAllocationRequest->sumItemSize = 0;
10495  // pAllocationRequest->item unused.
10496  pAllocationRequest->itemsToMakeLostCount = 0;
10497  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10498  return true;
10499  }
10500 
10501  return false;
10502 }
10503 
10504 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10505  uint32_t currentFrameIndex,
10506  uint32_t frameInUseCount,
10507  VkDeviceSize bufferImageGranularity,
10508  VkDeviceSize allocSize,
10509  VkDeviceSize allocAlignment,
10510  VmaSuballocationType allocType,
10511  bool canMakeOtherLost,
10512  uint32_t strategy,
10513  VmaAllocationRequest* pAllocationRequest)
10514 {
10515  const VkDeviceSize size = GetSize();
10516  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10517  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10518 
10519  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10520  {
10521  // Try to allocate at the end of 1st vector.
10522 
10523  VkDeviceSize resultBaseOffset = 0;
10524  if(!suballocations1st.empty())
10525  {
10526  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10527  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10528  }
10529 
10530  // Start from offset equal to beginning of free space.
10531  VkDeviceSize resultOffset = resultBaseOffset;
10532 
10533  // Apply VMA_DEBUG_MARGIN at the beginning.
10534  if(VMA_DEBUG_MARGIN > 0)
10535  {
10536  resultOffset += VMA_DEBUG_MARGIN;
10537  }
10538 
10539  // Apply alignment.
10540  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10541 
10542  // Check previous suballocations for BufferImageGranularity conflicts.
10543  // Make bigger alignment if necessary.
10544  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10545  {
10546  bool bufferImageGranularityConflict = false;
10547  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10548  {
10549  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10550  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10551  {
10552  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10553  {
10554  bufferImageGranularityConflict = true;
10555  break;
10556  }
10557  }
10558  else
10559  // Already on previous page.
10560  break;
10561  }
10562  if(bufferImageGranularityConflict)
10563  {
10564  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10565  }
10566  }
10567 
10568  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10569  suballocations2nd.back().offset : size;
10570 
10571  // There is enough free space at the end after alignment.
10572  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10573  {
10574  // Check next suballocations for BufferImageGranularity conflicts.
10575  // If conflict exists, allocation cannot be made here.
10576  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10577  {
10578  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10579  {
10580  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10581  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10582  {
10583  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10584  {
10585  return false;
10586  }
10587  }
10588  else
10589  {
10590  // Already on previous page.
10591  break;
10592  }
10593  }
10594  }
10595 
10596  // All tests passed: Success.
10597  pAllocationRequest->offset = resultOffset;
10598  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10599  pAllocationRequest->sumItemSize = 0;
10600  // pAllocationRequest->item, customData unused.
10601  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10602  pAllocationRequest->itemsToMakeLostCount = 0;
10603  return true;
10604  }
10605  }
10606 
10607  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10608  // beginning of 1st vector as the end of free space.
10609  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10610  {
10611  VMA_ASSERT(!suballocations1st.empty());
10612 
10613  VkDeviceSize resultBaseOffset = 0;
10614  if(!suballocations2nd.empty())
10615  {
10616  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10617  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10618  }
10619 
10620  // Start from offset equal to beginning of free space.
10621  VkDeviceSize resultOffset = resultBaseOffset;
10622 
10623  // Apply VMA_DEBUG_MARGIN at the beginning.
10624  if(VMA_DEBUG_MARGIN > 0)
10625  {
10626  resultOffset += VMA_DEBUG_MARGIN;
10627  }
10628 
10629  // Apply alignment.
10630  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10631 
10632  // Check previous suballocations for BufferImageGranularity conflicts.
10633  // Make bigger alignment if necessary.
10634  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10635  {
10636  bool bufferImageGranularityConflict = false;
10637  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10638  {
10639  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10640  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10641  {
10642  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10643  {
10644  bufferImageGranularityConflict = true;
10645  break;
10646  }
10647  }
10648  else
10649  // Already on previous page.
10650  break;
10651  }
10652  if(bufferImageGranularityConflict)
10653  {
10654  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10655  }
10656  }
10657 
10658  pAllocationRequest->itemsToMakeLostCount = 0;
10659  pAllocationRequest->sumItemSize = 0;
10660  size_t index1st = m_1stNullItemsBeginCount;
10661 
10662  if(canMakeOtherLost)
10663  {
10664  while(index1st < suballocations1st.size() &&
10665  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10666  {
10667  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10668  const VmaSuballocation& suballoc = suballocations1st[index1st];
10669  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10670  {
10671  // No problem.
10672  }
10673  else
10674  {
10675  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10676  if(suballoc.hAllocation->CanBecomeLost() &&
10677  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10678  {
10679  ++pAllocationRequest->itemsToMakeLostCount;
10680  pAllocationRequest->sumItemSize += suballoc.size;
10681  }
10682  else
10683  {
10684  return false;
10685  }
10686  }
10687  ++index1st;
10688  }
10689 
10690  // Check next suballocations for BufferImageGranularity conflicts.
10691  // If conflict exists, we must mark more allocations lost or fail.
10692  if(bufferImageGranularity > 1)
10693  {
10694  while(index1st < suballocations1st.size())
10695  {
10696  const VmaSuballocation& suballoc = suballocations1st[index1st];
10697  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10698  {
10699  if(suballoc.hAllocation != VK_NULL_HANDLE)
10700  {
10701  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10702  if(suballoc.hAllocation->CanBecomeLost() &&
10703  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10704  {
10705  ++pAllocationRequest->itemsToMakeLostCount;
10706  pAllocationRequest->sumItemSize += suballoc.size;
10707  }
10708  else
10709  {
10710  return false;
10711  }
10712  }
10713  }
10714  else
10715  {
10716  // Already on next page.
10717  break;
10718  }
10719  ++index1st;
10720  }
10721  }
10722 
10723  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10724  if(index1st == suballocations1st.size() &&
10725  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10726  {
10727  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10728  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10729  }
10730  }
10731 
10732  // There is enough free space at the end after alignment.
10733  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10734  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10735  {
10736  // Check next suballocations for BufferImageGranularity conflicts.
10737  // If conflict exists, allocation cannot be made here.
10738  if(bufferImageGranularity > 1)
10739  {
10740  for(size_t nextSuballocIndex = index1st;
10741  nextSuballocIndex < suballocations1st.size();
10742  nextSuballocIndex++)
10743  {
10744  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10745  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10746  {
10747  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10748  {
10749  return false;
10750  }
10751  }
10752  else
10753  {
10754  // Already on next page.
10755  break;
10756  }
10757  }
10758  }
10759 
10760  // All tests passed: Success.
10761  pAllocationRequest->offset = resultOffset;
10762  pAllocationRequest->sumFreeSize =
10763  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10764  - resultBaseOffset
10765  - pAllocationRequest->sumItemSize;
10766  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10767  // pAllocationRequest->item, customData unused.
10768  return true;
10769  }
10770  }
10771 
10772  return false;
10773 }
10774 
10775 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10776  uint32_t currentFrameIndex,
10777  uint32_t frameInUseCount,
10778  VmaAllocationRequest* pAllocationRequest)
10779 {
10780  if(pAllocationRequest->itemsToMakeLostCount == 0)
10781  {
10782  return true;
10783  }
10784 
10785  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10786 
10787  // We always start from 1st.
10788  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10789  size_t index = m_1stNullItemsBeginCount;
10790  size_t madeLostCount = 0;
10791  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10792  {
10793  if(index == suballocations->size())
10794  {
10795  index = 0;
10796  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10797  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10798  {
10799  suballocations = &AccessSuballocations2nd();
10800  }
10801  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10802  // suballocations continues pointing at AccessSuballocations1st().
10803  VMA_ASSERT(!suballocations->empty());
10804  }
10805  VmaSuballocation& suballoc = (*suballocations)[index];
10806  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10807  {
10808  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10809  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10810  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10811  {
10812  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10813  suballoc.hAllocation = VK_NULL_HANDLE;
10814  m_SumFreeSize += suballoc.size;
10815  if(suballocations == &AccessSuballocations1st())
10816  {
10817  ++m_1stNullItemsMiddleCount;
10818  }
10819  else
10820  {
10821  ++m_2ndNullItemsCount;
10822  }
10823  ++madeLostCount;
10824  }
10825  else
10826  {
10827  return false;
10828  }
10829  }
10830  ++index;
10831  }
10832 
10833  CleanupAfterFree();
10834  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10835 
10836  return true;
10837 }
10838 
10839 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10840 {
10841  uint32_t lostAllocationCount = 0;
10842 
10843  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10844  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10845  {
10846  VmaSuballocation& suballoc = suballocations1st[i];
10847  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10848  suballoc.hAllocation->CanBecomeLost() &&
10849  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10850  {
10851  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10852  suballoc.hAllocation = VK_NULL_HANDLE;
10853  ++m_1stNullItemsMiddleCount;
10854  m_SumFreeSize += suballoc.size;
10855  ++lostAllocationCount;
10856  }
10857  }
10858 
10859  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10860  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10861  {
10862  VmaSuballocation& suballoc = suballocations2nd[i];
10863  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10864  suballoc.hAllocation->CanBecomeLost() &&
10865  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10866  {
10867  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10868  suballoc.hAllocation = VK_NULL_HANDLE;
10869  ++m_2ndNullItemsCount;
10870  m_SumFreeSize += suballoc.size;
10871  ++lostAllocationCount;
10872  }
10873  }
10874 
10875  if(lostAllocationCount)
10876  {
10877  CleanupAfterFree();
10878  }
10879 
10880  return lostAllocationCount;
10881 }
10882 
10883 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10884 {
10885  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10886  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10887  {
10888  const VmaSuballocation& suballoc = suballocations1st[i];
10889  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10890  {
10891  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10892  {
10893  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10894  return VK_ERROR_VALIDATION_FAILED_EXT;
10895  }
10896  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10897  {
10898  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10899  return VK_ERROR_VALIDATION_FAILED_EXT;
10900  }
10901  }
10902  }
10903 
10904  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10905  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10906  {
10907  const VmaSuballocation& suballoc = suballocations2nd[i];
10908  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10909  {
10910  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10911  {
10912  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10913  return VK_ERROR_VALIDATION_FAILED_EXT;
10914  }
10915  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10916  {
10917  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10918  return VK_ERROR_VALIDATION_FAILED_EXT;
10919  }
10920  }
10921  }
10922 
10923  return VK_SUCCESS;
10924 }
10925 
10926 void VmaBlockMetadata_Linear::Alloc(
10927  const VmaAllocationRequest& request,
10928  VmaSuballocationType type,
10929  VkDeviceSize allocSize,
10930  VmaAllocation hAllocation)
10931 {
10932  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10933 
10934  switch(request.type)
10935  {
10936  case VmaAllocationRequestType::UpperAddress:
10937  {
10938  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10939  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10940  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10941  suballocations2nd.push_back(newSuballoc);
10942  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10943  }
10944  break;
10945  case VmaAllocationRequestType::EndOf1st:
10946  {
10947  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10948 
10949  VMA_ASSERT(suballocations1st.empty() ||
10950  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10951  // Check if it fits before the end of the block.
10952  VMA_ASSERT(request.offset + allocSize <= GetSize());
10953 
10954  suballocations1st.push_back(newSuballoc);
10955  }
10956  break;
10957  case VmaAllocationRequestType::EndOf2nd:
10958  {
10959  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10960  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10961  VMA_ASSERT(!suballocations1st.empty() &&
10962  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10963  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10964 
10965  switch(m_2ndVectorMode)
10966  {
10967  case SECOND_VECTOR_EMPTY:
10968  // First allocation from second part ring buffer.
10969  VMA_ASSERT(suballocations2nd.empty());
10970  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10971  break;
10972  case SECOND_VECTOR_RING_BUFFER:
10973  // 2-part ring buffer is already started.
10974  VMA_ASSERT(!suballocations2nd.empty());
10975  break;
10976  case SECOND_VECTOR_DOUBLE_STACK:
10977  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10978  break;
10979  default:
10980  VMA_ASSERT(0);
10981  }
10982 
10983  suballocations2nd.push_back(newSuballoc);
10984  }
10985  break;
10986  default:
10987  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10988  }
10989 
10990  m_SumFreeSize -= newSuballoc.size;
10991 }
10992 
10993 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10994 {
10995  FreeAtOffset(allocation->GetOffset());
10996 }
10997 
10998 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10999 {
11000  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11001  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11002 
11003  if(!suballocations1st.empty())
11004  {
11005  // First allocation: Mark it as next empty at the beginning.
11006  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11007  if(firstSuballoc.offset == offset)
11008  {
11009  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11010  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11011  m_SumFreeSize += firstSuballoc.size;
11012  ++m_1stNullItemsBeginCount;
11013  CleanupAfterFree();
11014  return;
11015  }
11016  }
11017 
11018  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11019  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11020  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11021  {
11022  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11023  if(lastSuballoc.offset == offset)
11024  {
11025  m_SumFreeSize += lastSuballoc.size;
11026  suballocations2nd.pop_back();
11027  CleanupAfterFree();
11028  return;
11029  }
11030  }
11031  // Last allocation in 1st vector.
11032  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11033  {
11034  VmaSuballocation& lastSuballoc = suballocations1st.back();
11035  if(lastSuballoc.offset == offset)
11036  {
11037  m_SumFreeSize += lastSuballoc.size;
11038  suballocations1st.pop_back();
11039  CleanupAfterFree();
11040  return;
11041  }
11042  }
11043 
11044  // Item from the middle of 1st vector.
11045  {
11046  VmaSuballocation refSuballoc;
11047  refSuballoc.offset = offset;
11048  // Rest of members stays uninitialized intentionally for better performance.
11049  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11050  suballocations1st.begin() + m_1stNullItemsBeginCount,
11051  suballocations1st.end(),
11052  refSuballoc,
11053  VmaSuballocationOffsetLess());
11054  if(it != suballocations1st.end())
11055  {
11056  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11057  it->hAllocation = VK_NULL_HANDLE;
11058  ++m_1stNullItemsMiddleCount;
11059  m_SumFreeSize += it->size;
11060  CleanupAfterFree();
11061  return;
11062  }
11063  }
11064 
11065  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11066  {
11067  // Item from the middle of 2nd vector.
11068  VmaSuballocation refSuballoc;
11069  refSuballoc.offset = offset;
11070  // Rest of members stays uninitialized intentionally for better performance.
11071  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11072  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11073  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11074  if(it != suballocations2nd.end())
11075  {
11076  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11077  it->hAllocation = VK_NULL_HANDLE;
11078  ++m_2ndNullItemsCount;
11079  m_SumFreeSize += it->size;
11080  CleanupAfterFree();
11081  return;
11082  }
11083  }
11084 
11085  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11086 }
11087 
11088 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11089 {
11090  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11091  const size_t suballocCount = AccessSuballocations1st().size();
11092  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11093 }
11094 
11095 void VmaBlockMetadata_Linear::CleanupAfterFree()
11096 {
11097  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11098  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11099 
11100  if(IsEmpty())
11101  {
11102  suballocations1st.clear();
11103  suballocations2nd.clear();
11104  m_1stNullItemsBeginCount = 0;
11105  m_1stNullItemsMiddleCount = 0;
11106  m_2ndNullItemsCount = 0;
11107  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11108  }
11109  else
11110  {
11111  const size_t suballoc1stCount = suballocations1st.size();
11112  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11113  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11114 
11115  // Find more null items at the beginning of 1st vector.
11116  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11117  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11118  {
11119  ++m_1stNullItemsBeginCount;
11120  --m_1stNullItemsMiddleCount;
11121  }
11122 
11123  // Find more null items at the end of 1st vector.
11124  while(m_1stNullItemsMiddleCount > 0 &&
11125  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11126  {
11127  --m_1stNullItemsMiddleCount;
11128  suballocations1st.pop_back();
11129  }
11130 
11131  // Find more null items at the end of 2nd vector.
11132  while(m_2ndNullItemsCount > 0 &&
11133  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11134  {
11135  --m_2ndNullItemsCount;
11136  suballocations2nd.pop_back();
11137  }
11138 
11139  // Find more null items at the beginning of 2nd vector.
11140  while(m_2ndNullItemsCount > 0 &&
11141  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11142  {
11143  --m_2ndNullItemsCount;
11144  VmaVectorRemove(suballocations2nd, 0);
11145  }
11146 
11147  if(ShouldCompact1st())
11148  {
11149  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11150  size_t srcIndex = m_1stNullItemsBeginCount;
11151  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11152  {
11153  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11154  {
11155  ++srcIndex;
11156  }
11157  if(dstIndex != srcIndex)
11158  {
11159  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11160  }
11161  ++srcIndex;
11162  }
11163  suballocations1st.resize(nonNullItemCount);
11164  m_1stNullItemsBeginCount = 0;
11165  m_1stNullItemsMiddleCount = 0;
11166  }
11167 
11168  // 2nd vector became empty.
11169  if(suballocations2nd.empty())
11170  {
11171  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11172  }
11173 
11174  // 1st vector became empty.
11175  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11176  {
11177  suballocations1st.clear();
11178  m_1stNullItemsBeginCount = 0;
11179 
11180  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11181  {
11182  // Swap 1st with 2nd. Now 2nd is empty.
11183  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11184  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11185  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11186  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11187  {
11188  ++m_1stNullItemsBeginCount;
11189  --m_1stNullItemsMiddleCount;
11190  }
11191  m_2ndNullItemsCount = 0;
11192  m_1stVectorIndex ^= 1;
11193  }
11194  }
11195  }
11196 
11197  VMA_HEAVY_ASSERT(Validate());
11198 }
11199 
11200 
11202 // class VmaBlockMetadata_Buddy
11203 
11204 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11205  VmaBlockMetadata(hAllocator),
11206  m_Root(VMA_NULL),
11207  m_AllocationCount(0),
11208  m_FreeCount(1),
11209  m_SumFreeSize(0)
11210 {
11211  memset(m_FreeList, 0, sizeof(m_FreeList));
11212 }
11213 
11214 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11215 {
11216  DeleteNode(m_Root);
11217 }
11218 
11219 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11220 {
11221  VmaBlockMetadata::Init(size);
11222 
11223  m_UsableSize = VmaPrevPow2(size);
11224  m_SumFreeSize = m_UsableSize;
11225 
11226  // Calculate m_LevelCount.
11227  m_LevelCount = 1;
11228  while(m_LevelCount < MAX_LEVELS &&
11229  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11230  {
11231  ++m_LevelCount;
11232  }
11233 
11234  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11235  rootNode->offset = 0;
11236  rootNode->type = Node::TYPE_FREE;
11237  rootNode->parent = VMA_NULL;
11238  rootNode->buddy = VMA_NULL;
11239 
11240  m_Root = rootNode;
11241  AddToFreeListFront(0, rootNode);
11242 }
11243 
11244 bool VmaBlockMetadata_Buddy::Validate() const
11245 {
11246  // Validate tree.
11247  ValidationContext ctx;
11248  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11249  {
11250  VMA_VALIDATE(false && "ValidateNode failed.");
11251  }
11252  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11253  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11254 
11255  // Validate free node lists.
11256  for(uint32_t level = 0; level < m_LevelCount; ++level)
11257  {
11258  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11259  m_FreeList[level].front->free.prev == VMA_NULL);
11260 
11261  for(Node* node = m_FreeList[level].front;
11262  node != VMA_NULL;
11263  node = node->free.next)
11264  {
11265  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11266 
11267  if(node->free.next == VMA_NULL)
11268  {
11269  VMA_VALIDATE(m_FreeList[level].back == node);
11270  }
11271  else
11272  {
11273  VMA_VALIDATE(node->free.next->free.prev == node);
11274  }
11275  }
11276  }
11277 
11278  // Validate that free lists ar higher levels are empty.
11279  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11280  {
11281  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11282  }
11283 
11284  return true;
11285 }
11286 
11287 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11288 {
11289  for(uint32_t level = 0; level < m_LevelCount; ++level)
11290  {
11291  if(m_FreeList[level].front != VMA_NULL)
11292  {
11293  return LevelToNodeSize(level);
11294  }
11295  }
11296  return 0;
11297 }
11298 
11299 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11300 {
11301  const VkDeviceSize unusableSize = GetUnusableSize();
11302 
11303  outInfo.blockCount = 1;
11304 
11305  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11306  outInfo.usedBytes = outInfo.unusedBytes = 0;
11307 
11308  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11309  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11310  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11311 
11312  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11313 
11314  if(unusableSize > 0)
11315  {
11316  ++outInfo.unusedRangeCount;
11317  outInfo.unusedBytes += unusableSize;
11318  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11319  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11320  }
11321 }
11322 
11323 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11324 {
11325  const VkDeviceSize unusableSize = GetUnusableSize();
11326 
11327  inoutStats.size += GetSize();
11328  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11329  inoutStats.allocationCount += m_AllocationCount;
11330  inoutStats.unusedRangeCount += m_FreeCount;
11331  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11332 
11333  if(unusableSize > 0)
11334  {
11335  ++inoutStats.unusedRangeCount;
11336  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11337  }
11338 }
11339 
11340 #if VMA_STATS_STRING_ENABLED
11341 
11342 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11343 {
11344  // TODO optimize
11345  VmaStatInfo stat;
11346  CalcAllocationStatInfo(stat);
11347 
11348  PrintDetailedMap_Begin(
11349  json,
11350  stat.unusedBytes,
11351  stat.allocationCount,
11352  stat.unusedRangeCount);
11353 
11354  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11355 
11356  const VkDeviceSize unusableSize = GetUnusableSize();
11357  if(unusableSize > 0)
11358  {
11359  PrintDetailedMap_UnusedRange(json,
11360  m_UsableSize, // offset
11361  unusableSize); // size
11362  }
11363 
11364  PrintDetailedMap_End(json);
11365 }
11366 
11367 #endif // #if VMA_STATS_STRING_ENABLED
11368 
11369 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11370  uint32_t currentFrameIndex,
11371  uint32_t frameInUseCount,
11372  VkDeviceSize bufferImageGranularity,
11373  VkDeviceSize allocSize,
11374  VkDeviceSize allocAlignment,
11375  bool upperAddress,
11376  VmaSuballocationType allocType,
11377  bool canMakeOtherLost,
11378  uint32_t strategy,
11379  VmaAllocationRequest* pAllocationRequest)
11380 {
11381  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11382 
11383  // Simple way to respect bufferImageGranularity. May be optimized some day.
11384  // Whenever it might be an OPTIMAL image...
11385  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11386  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11387  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11388  {
11389  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11390  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11391  }
11392 
11393  if(allocSize > m_UsableSize)
11394  {
11395  return false;
11396  }
11397 
11398  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11399  for(uint32_t level = targetLevel + 1; level--; )
11400  {
11401  for(Node* freeNode = m_FreeList[level].front;
11402  freeNode != VMA_NULL;
11403  freeNode = freeNode->free.next)
11404  {
11405  if(freeNode->offset % allocAlignment == 0)
11406  {
11407  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11408  pAllocationRequest->offset = freeNode->offset;
11409  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11410  pAllocationRequest->sumItemSize = 0;
11411  pAllocationRequest->itemsToMakeLostCount = 0;
11412  pAllocationRequest->customData = (void*)(uintptr_t)level;
11413  return true;
11414  }
11415  }
11416  }
11417 
11418  return false;
11419 }
11420 
11421 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11422  uint32_t currentFrameIndex,
11423  uint32_t frameInUseCount,
11424  VmaAllocationRequest* pAllocationRequest)
11425 {
11426  /*
11427  Lost allocations are not supported in buddy allocator at the moment.
11428  Support might be added in the future.
11429  */
11430  return pAllocationRequest->itemsToMakeLostCount == 0;
11431 }
11432 
11433 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11434 {
11435  /*
11436  Lost allocations are not supported in buddy allocator at the moment.
11437  Support might be added in the future.
11438  */
11439  return 0;
11440 }
11441 
11442 void VmaBlockMetadata_Buddy::Alloc(
11443  const VmaAllocationRequest& request,
11444  VmaSuballocationType type,
11445  VkDeviceSize allocSize,
11446  VmaAllocation hAllocation)
11447 {
11448  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11449 
11450  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11451  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11452 
11453  Node* currNode = m_FreeList[currLevel].front;
11454  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11455  while(currNode->offset != request.offset)
11456  {
11457  currNode = currNode->free.next;
11458  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11459  }
11460 
11461  // Go down, splitting free nodes.
11462  while(currLevel < targetLevel)
11463  {
11464  // currNode is already first free node at currLevel.
11465  // Remove it from list of free nodes at this currLevel.
11466  RemoveFromFreeList(currLevel, currNode);
11467 
11468  const uint32_t childrenLevel = currLevel + 1;
11469 
11470  // Create two free sub-nodes.
11471  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11472  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11473 
11474  leftChild->offset = currNode->offset;
11475  leftChild->type = Node::TYPE_FREE;
11476  leftChild->parent = currNode;
11477  leftChild->buddy = rightChild;
11478 
11479  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11480  rightChild->type = Node::TYPE_FREE;
11481  rightChild->parent = currNode;
11482  rightChild->buddy = leftChild;
11483 
11484  // Convert current currNode to split type.
11485  currNode->type = Node::TYPE_SPLIT;
11486  currNode->split.leftChild = leftChild;
11487 
11488  // Add child nodes to free list. Order is important!
11489  AddToFreeListFront(childrenLevel, rightChild);
11490  AddToFreeListFront(childrenLevel, leftChild);
11491 
11492  ++m_FreeCount;
11493  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11494  ++currLevel;
11495  currNode = m_FreeList[currLevel].front;
11496 
11497  /*
11498  We can be sure that currNode, as left child of node previously split,
11499  also fullfills the alignment requirement.
11500  */
11501  }
11502 
11503  // Remove from free list.
11504  VMA_ASSERT(currLevel == targetLevel &&
11505  currNode != VMA_NULL &&
11506  currNode->type == Node::TYPE_FREE);
11507  RemoveFromFreeList(currLevel, currNode);
11508 
11509  // Convert to allocation node.
11510  currNode->type = Node::TYPE_ALLOCATION;
11511  currNode->allocation.alloc = hAllocation;
11512 
11513  ++m_AllocationCount;
11514  --m_FreeCount;
11515  m_SumFreeSize -= allocSize;
11516 }
11517 
11518 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11519 {
11520  if(node->type == Node::TYPE_SPLIT)
11521  {
11522  DeleteNode(node->split.leftChild->buddy);
11523  DeleteNode(node->split.leftChild);
11524  }
11525 
11526  vma_delete(GetAllocationCallbacks(), node);
11527 }
11528 
11529 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11530 {
11531  VMA_VALIDATE(level < m_LevelCount);
11532  VMA_VALIDATE(curr->parent == parent);
11533  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11534  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11535  switch(curr->type)
11536  {
11537  case Node::TYPE_FREE:
11538  // curr->free.prev, next are validated separately.
11539  ctx.calculatedSumFreeSize += levelNodeSize;
11540  ++ctx.calculatedFreeCount;
11541  break;
11542  case Node::TYPE_ALLOCATION:
11543  ++ctx.calculatedAllocationCount;
11544  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11545  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11546  break;
11547  case Node::TYPE_SPLIT:
11548  {
11549  const uint32_t childrenLevel = level + 1;
11550  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11551  const Node* const leftChild = curr->split.leftChild;
11552  VMA_VALIDATE(leftChild != VMA_NULL);
11553  VMA_VALIDATE(leftChild->offset == curr->offset);
11554  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11555  {
11556  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11557  }
11558  const Node* const rightChild = leftChild->buddy;
11559  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11560  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11561  {
11562  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11563  }
11564  }
11565  break;
11566  default:
11567  return false;
11568  }
11569 
11570  return true;
11571 }
11572 
11573 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11574 {
11575  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11576  uint32_t level = 0;
11577  VkDeviceSize currLevelNodeSize = m_UsableSize;
11578  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11579  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11580  {
11581  ++level;
11582  currLevelNodeSize = nextLevelNodeSize;
11583  nextLevelNodeSize = currLevelNodeSize >> 1;
11584  }
11585  return level;
11586 }
11587 
11588 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11589 {
11590  // Find node and level.
11591  Node* node = m_Root;
11592  VkDeviceSize nodeOffset = 0;
11593  uint32_t level = 0;
11594  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11595  while(node->type == Node::TYPE_SPLIT)
11596  {
11597  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11598  if(offset < nodeOffset + nextLevelSize)
11599  {
11600  node = node->split.leftChild;
11601  }
11602  else
11603  {
11604  node = node->split.leftChild->buddy;
11605  nodeOffset += nextLevelSize;
11606  }
11607  ++level;
11608  levelNodeSize = nextLevelSize;
11609  }
11610 
11611  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11612  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11613 
11614  ++m_FreeCount;
11615  --m_AllocationCount;
11616  m_SumFreeSize += alloc->GetSize();
11617 
11618  node->type = Node::TYPE_FREE;
11619 
11620  // Join free nodes if possible.
11621  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11622  {
11623  RemoveFromFreeList(level, node->buddy);
11624  Node* const parent = node->parent;
11625 
11626  vma_delete(GetAllocationCallbacks(), node->buddy);
11627  vma_delete(GetAllocationCallbacks(), node);
11628  parent->type = Node::TYPE_FREE;
11629 
11630  node = parent;
11631  --level;
11632  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11633  --m_FreeCount;
11634  }
11635 
11636  AddToFreeListFront(level, node);
11637 }
11638 
11639 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11640 {
11641  switch(node->type)
11642  {
11643  case Node::TYPE_FREE:
11644  ++outInfo.unusedRangeCount;
11645  outInfo.unusedBytes += levelNodeSize;
11646  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11647  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11648  break;
11649  case Node::TYPE_ALLOCATION:
11650  {
11651  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11652  ++outInfo.allocationCount;
11653  outInfo.usedBytes += allocSize;
11654  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11655  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11656 
11657  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11658  if(unusedRangeSize > 0)
11659  {
11660  ++outInfo.unusedRangeCount;
11661  outInfo.unusedBytes += unusedRangeSize;
11662  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11663  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11664  }
11665  }
11666  break;
11667  case Node::TYPE_SPLIT:
11668  {
11669  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11670  const Node* const leftChild = node->split.leftChild;
11671  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11672  const Node* const rightChild = leftChild->buddy;
11673  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11674  }
11675  break;
11676  default:
11677  VMA_ASSERT(0);
11678  }
11679 }
11680 
11681 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11682 {
11683  VMA_ASSERT(node->type == Node::TYPE_FREE);
11684 
11685  // List is empty.
11686  Node* const frontNode = m_FreeList[level].front;
11687  if(frontNode == VMA_NULL)
11688  {
11689  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11690  node->free.prev = node->free.next = VMA_NULL;
11691  m_FreeList[level].front = m_FreeList[level].back = node;
11692  }
11693  else
11694  {
11695  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11696  node->free.prev = VMA_NULL;
11697  node->free.next = frontNode;
11698  frontNode->free.prev = node;
11699  m_FreeList[level].front = node;
11700  }
11701 }
11702 
11703 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11704 {
11705  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11706 
11707  // It is at the front.
11708  if(node->free.prev == VMA_NULL)
11709  {
11710  VMA_ASSERT(m_FreeList[level].front == node);
11711  m_FreeList[level].front = node->free.next;
11712  }
11713  else
11714  {
11715  Node* const prevFreeNode = node->free.prev;
11716  VMA_ASSERT(prevFreeNode->free.next == node);
11717  prevFreeNode->free.next = node->free.next;
11718  }
11719 
11720  // It is at the back.
11721  if(node->free.next == VMA_NULL)
11722  {
11723  VMA_ASSERT(m_FreeList[level].back == node);
11724  m_FreeList[level].back = node->free.prev;
11725  }
11726  else
11727  {
11728  Node* const nextFreeNode = node->free.next;
11729  VMA_ASSERT(nextFreeNode->free.prev == node);
11730  nextFreeNode->free.prev = node->free.prev;
11731  }
11732 }
11733 
11734 #if VMA_STATS_STRING_ENABLED
11735 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11736 {
11737  switch(node->type)
11738  {
11739  case Node::TYPE_FREE:
11740  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11741  break;
11742  case Node::TYPE_ALLOCATION:
11743  {
11744  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11745  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11746  if(allocSize < levelNodeSize)
11747  {
11748  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11749  }
11750  }
11751  break;
11752  case Node::TYPE_SPLIT:
11753  {
11754  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11755  const Node* const leftChild = node->split.leftChild;
11756  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11757  const Node* const rightChild = leftChild->buddy;
11758  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11759  }
11760  break;
11761  default:
11762  VMA_ASSERT(0);
11763  }
11764 }
11765 #endif // #if VMA_STATS_STRING_ENABLED
11766 
11767 
11769 // class VmaDeviceMemoryBlock
11770 
11771 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11772  m_pMetadata(VMA_NULL),
11773  m_MemoryTypeIndex(UINT32_MAX),
11774  m_Id(0),
11775  m_hMemory(VK_NULL_HANDLE),
11776  m_MapCount(0),
11777  m_pMappedData(VMA_NULL)
11778 {
11779 }
11780 
11781 void VmaDeviceMemoryBlock::Init(
11782  VmaAllocator hAllocator,
11783  VmaPool hParentPool,
11784  uint32_t newMemoryTypeIndex,
11785  VkDeviceMemory newMemory,
11786  VkDeviceSize newSize,
11787  uint32_t id,
11788  uint32_t algorithm)
11789 {
11790  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11791 
11792  m_hParentPool = hParentPool;
11793  m_MemoryTypeIndex = newMemoryTypeIndex;
11794  m_Id = id;
11795  m_hMemory = newMemory;
11796 
11797  switch(algorithm)
11798  {
11800  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11801  break;
11803  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11804  break;
11805  default:
11806  VMA_ASSERT(0);
11807  // Fall-through.
11808  case 0:
11809  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11810  }
11811  m_pMetadata->Init(newSize);
11812 }
11813 
11814 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11815 {
11816  // This is the most important assert in the entire library.
11817  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11818  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11819 
11820  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11821  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11822  m_hMemory = VK_NULL_HANDLE;
11823 
11824  vma_delete(allocator, m_pMetadata);
11825  m_pMetadata = VMA_NULL;
11826 }
11827 
11828 bool VmaDeviceMemoryBlock::Validate() const
11829 {
11830  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11831  (m_pMetadata->GetSize() != 0));
11832 
11833  return m_pMetadata->Validate();
11834 }
11835 
11836 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11837 {
11838  void* pData = nullptr;
11839  VkResult res = Map(hAllocator, 1, &pData);
11840  if(res != VK_SUCCESS)
11841  {
11842  return res;
11843  }
11844 
11845  res = m_pMetadata->CheckCorruption(pData);
11846 
11847  Unmap(hAllocator, 1);
11848 
11849  return res;
11850 }
11851 
11852 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11853 {
11854  if(count == 0)
11855  {
11856  return VK_SUCCESS;
11857  }
11858 
11859  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11860  if(m_MapCount != 0)
11861  {
11862  m_MapCount += count;
11863  VMA_ASSERT(m_pMappedData != VMA_NULL);
11864  if(ppData != VMA_NULL)
11865  {
11866  *ppData = m_pMappedData;
11867  }
11868  return VK_SUCCESS;
11869  }
11870  else
11871  {
11872  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11873  hAllocator->m_hDevice,
11874  m_hMemory,
11875  0, // offset
11876  VK_WHOLE_SIZE,
11877  0, // flags
11878  &m_pMappedData);
11879  if(result == VK_SUCCESS)
11880  {
11881  if(ppData != VMA_NULL)
11882  {
11883  *ppData = m_pMappedData;
11884  }
11885  m_MapCount = count;
11886  }
11887  return result;
11888  }
11889 }
11890 
11891 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11892 {
11893  if(count == 0)
11894  {
11895  return;
11896  }
11897 
11898  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11899  if(m_MapCount >= count)
11900  {
11901  m_MapCount -= count;
11902  if(m_MapCount == 0)
11903  {
11904  m_pMappedData = VMA_NULL;
11905  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11906  }
11907  }
11908  else
11909  {
11910  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11911  }
11912 }
11913 
11914 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11915 {
11916  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11917  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11918 
11919  void* pData;
11920  VkResult res = Map(hAllocator, 1, &pData);
11921  if(res != VK_SUCCESS)
11922  {
11923  return res;
11924  }
11925 
11926  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11927  VmaWriteMagicValue(pData, allocOffset + allocSize);
11928 
11929  Unmap(hAllocator, 1);
11930 
11931  return VK_SUCCESS;
11932 }
11933 
11934 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11935 {
11936  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11937  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11938 
11939  void* pData;
11940  VkResult res = Map(hAllocator, 1, &pData);
11941  if(res != VK_SUCCESS)
11942  {
11943  return res;
11944  }
11945 
11946  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11947  {
11948  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11949  }
11950  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11951  {
11952  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11953  }
11954 
11955  Unmap(hAllocator, 1);
11956 
11957  return VK_SUCCESS;
11958 }
11959 
11960 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11961  const VmaAllocator hAllocator,
11962  const VmaAllocation hAllocation,
11963  VkDeviceSize allocationLocalOffset,
11964  VkBuffer hBuffer,
11965  const void* pNext)
11966 {
11967  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11968  hAllocation->GetBlock() == this);
11969  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11970  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11971  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11972  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11973  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11974  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11975 }
11976 
11977 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11978  const VmaAllocator hAllocator,
11979  const VmaAllocation hAllocation,
11980  VkDeviceSize allocationLocalOffset,
11981  VkImage hImage,
11982  const void* pNext)
11983 {
11984  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11985  hAllocation->GetBlock() == this);
11986  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11987  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11988  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11989  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11990  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11991  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11992 }
11993 
11994 static void InitStatInfo(VmaStatInfo& outInfo)
11995 {
11996  memset(&outInfo, 0, sizeof(outInfo));
11997  outInfo.allocationSizeMin = UINT64_MAX;
11998  outInfo.unusedRangeSizeMin = UINT64_MAX;
11999 }
12000 
12001 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12002 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12003 {
12004  inoutInfo.blockCount += srcInfo.blockCount;
12005  inoutInfo.allocationCount += srcInfo.allocationCount;
12006  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12007  inoutInfo.usedBytes += srcInfo.usedBytes;
12008  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12009  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12010  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12011  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12012  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12013 }
12014 
12015 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12016 {
12017  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12018  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12019  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12020  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12021 }
12022 
12023 VmaPool_T::VmaPool_T(
12024  VmaAllocator hAllocator,
12025  const VmaPoolCreateInfo& createInfo,
12026  VkDeviceSize preferredBlockSize) :
12027  m_BlockVector(
12028  hAllocator,
12029  this, // hParentPool
12030  createInfo.memoryTypeIndex,
12031  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12032  createInfo.minBlockCount,
12033  createInfo.maxBlockCount,
12034  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12035  createInfo.frameInUseCount,
12036  createInfo.blockSize != 0, // explicitBlockSize
12037  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12038  m_Id(0),
12039  m_Name(VMA_NULL)
12040 {
12041 }
12042 
12043 VmaPool_T::~VmaPool_T()
12044 {
12045 }
12046 
12047 void VmaPool_T::SetName(const char* pName)
12048 {
12049  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12050  VmaFreeString(allocs, m_Name);
12051 
12052  if(pName != VMA_NULL)
12053  {
12054  m_Name = VmaCreateStringCopy(allocs, pName);
12055  }
12056  else
12057  {
12058  m_Name = VMA_NULL;
12059  }
12060 }
12061 
12062 #if VMA_STATS_STRING_ENABLED
12063 
12064 #endif // #if VMA_STATS_STRING_ENABLED
12065 
12066 VmaBlockVector::VmaBlockVector(
12067  VmaAllocator hAllocator,
12068  VmaPool hParentPool,
12069  uint32_t memoryTypeIndex,
12070  VkDeviceSize preferredBlockSize,
12071  size_t minBlockCount,
12072  size_t maxBlockCount,
12073  VkDeviceSize bufferImageGranularity,
12074  uint32_t frameInUseCount,
12075  bool explicitBlockSize,
12076  uint32_t algorithm) :
12077  m_hAllocator(hAllocator),
12078  m_hParentPool(hParentPool),
12079  m_MemoryTypeIndex(memoryTypeIndex),
12080  m_PreferredBlockSize(preferredBlockSize),
12081  m_MinBlockCount(minBlockCount),
12082  m_MaxBlockCount(maxBlockCount),
12083  m_BufferImageGranularity(bufferImageGranularity),
12084  m_FrameInUseCount(frameInUseCount),
12085  m_ExplicitBlockSize(explicitBlockSize),
12086  m_Algorithm(algorithm),
12087  m_HasEmptyBlock(false),
12088  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12089  m_NextBlockId(0)
12090 {
12091 }
12092 
12093 VmaBlockVector::~VmaBlockVector()
12094 {
12095  for(size_t i = m_Blocks.size(); i--; )
12096  {
12097  m_Blocks[i]->Destroy(m_hAllocator);
12098  vma_delete(m_hAllocator, m_Blocks[i]);
12099  }
12100 }
12101 
12102 VkResult VmaBlockVector::CreateMinBlocks()
12103 {
12104  for(size_t i = 0; i < m_MinBlockCount; ++i)
12105  {
12106  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12107  if(res != VK_SUCCESS)
12108  {
12109  return res;
12110  }
12111  }
12112  return VK_SUCCESS;
12113 }
12114 
12115 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12116 {
12117  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12118 
12119  const size_t blockCount = m_Blocks.size();
12120 
12121  pStats->size = 0;
12122  pStats->unusedSize = 0;
12123  pStats->allocationCount = 0;
12124  pStats->unusedRangeCount = 0;
12125  pStats->unusedRangeSizeMax = 0;
12126  pStats->blockCount = blockCount;
12127 
12128  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12129  {
12130  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12131  VMA_ASSERT(pBlock);
12132  VMA_HEAVY_ASSERT(pBlock->Validate());
12133  pBlock->m_pMetadata->AddPoolStats(*pStats);
12134  }
12135 }
12136 
12137 bool VmaBlockVector::IsEmpty()
12138 {
12139  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12140  return m_Blocks.empty();
12141 }
12142 
12143 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12144 {
12145  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12146  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12147  (VMA_DEBUG_MARGIN > 0) &&
12148  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12149  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12150 }
12151 
12152 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12153 
12154 VkResult VmaBlockVector::Allocate(
12155  uint32_t currentFrameIndex,
12156  VkDeviceSize size,
12157  VkDeviceSize alignment,
12158  const VmaAllocationCreateInfo& createInfo,
12159  VmaSuballocationType suballocType,
12160  size_t allocationCount,
12161  VmaAllocation* pAllocations)
12162 {
12163  size_t allocIndex;
12164  VkResult res = VK_SUCCESS;
12165 
12166  if(IsCorruptionDetectionEnabled())
12167  {
12168  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12169  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12170  }
12171 
12172  {
12173  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12174  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12175  {
12176  res = AllocatePage(
12177  currentFrameIndex,
12178  size,
12179  alignment,
12180  createInfo,
12181  suballocType,
12182  pAllocations + allocIndex);
12183  if(res != VK_SUCCESS)
12184  {
12185  break;
12186  }
12187  }
12188  }
12189 
12190  if(res != VK_SUCCESS)
12191  {
12192  // Free all already created allocations.
12193  while(allocIndex--)
12194  {
12195  Free(pAllocations[allocIndex]);
12196  }
12197  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12198  }
12199 
12200  return res;
12201 }
12202 
12203 VkResult VmaBlockVector::AllocatePage(
12204  uint32_t currentFrameIndex,
12205  VkDeviceSize size,
12206  VkDeviceSize alignment,
12207  const VmaAllocationCreateInfo& createInfo,
12208  VmaSuballocationType suballocType,
12209  VmaAllocation* pAllocation)
12210 {
12211  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12212  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12213  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12214  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12215 
12216  VkDeviceSize freeMemory;
12217  {
12218  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12219  VmaBudget heapBudget = {};
12220  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12221  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12222  }
12223 
12224  const bool canFallbackToDedicated = !IsCustomPool();
12225  const bool canCreateNewBlock =
12226  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12227  (m_Blocks.size() < m_MaxBlockCount) &&
12228  (freeMemory >= size || !canFallbackToDedicated);
12229  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12230 
12231  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12232  // Which in turn is available only when maxBlockCount = 1.
12233  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12234  {
12235  canMakeOtherLost = false;
12236  }
12237 
12238  // Upper address can only be used with linear allocator and within single memory block.
12239  if(isUpperAddress &&
12240  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12241  {
12242  return VK_ERROR_FEATURE_NOT_PRESENT;
12243  }
12244 
12245  // Validate strategy.
12246  switch(strategy)
12247  {
12248  case 0:
12250  break;
12254  break;
12255  default:
12256  return VK_ERROR_FEATURE_NOT_PRESENT;
12257  }
12258 
12259  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12260  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12261  {
12262  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12263  }
12264 
12265  /*
12266  Under certain condition, this whole section can be skipped for optimization, so
12267  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12268  e.g. for custom pools with linear algorithm.
12269  */
12270  if(!canMakeOtherLost || canCreateNewBlock)
12271  {
12272  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12273  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12275 
12276  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12277  {
12278  // Use only last block.
12279  if(!m_Blocks.empty())
12280  {
12281  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12282  VMA_ASSERT(pCurrBlock);
12283  VkResult res = AllocateFromBlock(
12284  pCurrBlock,
12285  currentFrameIndex,
12286  size,
12287  alignment,
12288  allocFlagsCopy,
12289  createInfo.pUserData,
12290  suballocType,
12291  strategy,
12292  pAllocation);
12293  if(res == VK_SUCCESS)
12294  {
12295  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12296  return VK_SUCCESS;
12297  }
12298  }
12299  }
12300  else
12301  {
12303  {
12304  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12305  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12306  {
12307  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12308  VMA_ASSERT(pCurrBlock);
12309  VkResult res = AllocateFromBlock(
12310  pCurrBlock,
12311  currentFrameIndex,
12312  size,
12313  alignment,
12314  allocFlagsCopy,
12315  createInfo.pUserData,
12316  suballocType,
12317  strategy,
12318  pAllocation);
12319  if(res == VK_SUCCESS)
12320  {
12321  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12322  return VK_SUCCESS;
12323  }
12324  }
12325  }
12326  else // WORST_FIT, FIRST_FIT
12327  {
12328  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12329  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12330  {
12331  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12332  VMA_ASSERT(pCurrBlock);
12333  VkResult res = AllocateFromBlock(
12334  pCurrBlock,
12335  currentFrameIndex,
12336  size,
12337  alignment,
12338  allocFlagsCopy,
12339  createInfo.pUserData,
12340  suballocType,
12341  strategy,
12342  pAllocation);
12343  if(res == VK_SUCCESS)
12344  {
12345  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12346  return VK_SUCCESS;
12347  }
12348  }
12349  }
12350  }
12351 
12352  // 2. Try to create new block.
12353  if(canCreateNewBlock)
12354  {
12355  // Calculate optimal size for new block.
12356  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12357  uint32_t newBlockSizeShift = 0;
12358  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12359 
12360  if(!m_ExplicitBlockSize)
12361  {
12362  // Allocate 1/8, 1/4, 1/2 as first blocks.
12363  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12364  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12365  {
12366  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12367  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12368  {
12369  newBlockSize = smallerNewBlockSize;
12370  ++newBlockSizeShift;
12371  }
12372  else
12373  {
12374  break;
12375  }
12376  }
12377  }
12378 
12379  size_t newBlockIndex = 0;
12380  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12381  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12382  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12383  if(!m_ExplicitBlockSize)
12384  {
12385  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12386  {
12387  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12388  if(smallerNewBlockSize >= size)
12389  {
12390  newBlockSize = smallerNewBlockSize;
12391  ++newBlockSizeShift;
12392  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12393  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12394  }
12395  else
12396  {
12397  break;
12398  }
12399  }
12400  }
12401 
12402  if(res == VK_SUCCESS)
12403  {
12404  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12405  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12406 
12407  res = AllocateFromBlock(
12408  pBlock,
12409  currentFrameIndex,
12410  size,
12411  alignment,
12412  allocFlagsCopy,
12413  createInfo.pUserData,
12414  suballocType,
12415  strategy,
12416  pAllocation);
12417  if(res == VK_SUCCESS)
12418  {
12419  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12420  return VK_SUCCESS;
12421  }
12422  else
12423  {
12424  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12425  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12426  }
12427  }
12428  }
12429  }
12430 
12431  // 3. Try to allocate from existing blocks with making other allocations lost.
12432  if(canMakeOtherLost)
12433  {
12434  uint32_t tryIndex = 0;
12435  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12436  {
12437  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12438  VmaAllocationRequest bestRequest = {};
12439  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12440 
12441  // 1. Search existing allocations.
12443  {
12444  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12445  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12446  {
12447  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12448  VMA_ASSERT(pCurrBlock);
12449  VmaAllocationRequest currRequest = {};
12450  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12451  currentFrameIndex,
12452  m_FrameInUseCount,
12453  m_BufferImageGranularity,
12454  size,
12455  alignment,
12456  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12457  suballocType,
12458  canMakeOtherLost,
12459  strategy,
12460  &currRequest))
12461  {
12462  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12463  if(pBestRequestBlock == VMA_NULL ||
12464  currRequestCost < bestRequestCost)
12465  {
12466  pBestRequestBlock = pCurrBlock;
12467  bestRequest = currRequest;
12468  bestRequestCost = currRequestCost;
12469 
12470  if(bestRequestCost == 0)
12471  {
12472  break;
12473  }
12474  }
12475  }
12476  }
12477  }
12478  else // WORST_FIT, FIRST_FIT
12479  {
12480  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12481  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12482  {
12483  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12484  VMA_ASSERT(pCurrBlock);
12485  VmaAllocationRequest currRequest = {};
12486  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12487  currentFrameIndex,
12488  m_FrameInUseCount,
12489  m_BufferImageGranularity,
12490  size,
12491  alignment,
12492  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12493  suballocType,
12494  canMakeOtherLost,
12495  strategy,
12496  &currRequest))
12497  {
12498  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12499  if(pBestRequestBlock == VMA_NULL ||
12500  currRequestCost < bestRequestCost ||
12502  {
12503  pBestRequestBlock = pCurrBlock;
12504  bestRequest = currRequest;
12505  bestRequestCost = currRequestCost;
12506 
12507  if(bestRequestCost == 0 ||
12509  {
12510  break;
12511  }
12512  }
12513  }
12514  }
12515  }
12516 
12517  if(pBestRequestBlock != VMA_NULL)
12518  {
12519  if(mapped)
12520  {
12521  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12522  if(res != VK_SUCCESS)
12523  {
12524  return res;
12525  }
12526  }
12527 
12528  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12529  currentFrameIndex,
12530  m_FrameInUseCount,
12531  &bestRequest))
12532  {
12533  // Allocate from this pBlock.
12534  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12535  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12536  UpdateHasEmptyBlock();
12537  (*pAllocation)->InitBlockAllocation(
12538  pBestRequestBlock,
12539  bestRequest.offset,
12540  alignment,
12541  size,
12542  m_MemoryTypeIndex,
12543  suballocType,
12544  mapped,
12545  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12546  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12547  VMA_DEBUG_LOG(" Returned from existing block");
12548  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12549  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12550  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12551  {
12552  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12553  }
12554  if(IsCorruptionDetectionEnabled())
12555  {
12556  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12557  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12558  }
12559  return VK_SUCCESS;
12560  }
12561  // else: Some allocations must have been touched while we are here. Next try.
12562  }
12563  else
12564  {
12565  // Could not find place in any of the blocks - break outer loop.
12566  break;
12567  }
12568  }
12569  /* Maximum number of tries exceeded - a very unlike event when many other
12570  threads are simultaneously touching allocations making it impossible to make
12571  lost at the same time as we try to allocate. */
12572  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12573  {
12574  return VK_ERROR_TOO_MANY_OBJECTS;
12575  }
12576  }
12577 
12578  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12579 }
12580 
12581 void VmaBlockVector::Free(
12582  const VmaAllocation hAllocation)
12583 {
12584  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12585 
12586  bool budgetExceeded = false;
12587  {
12588  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12589  VmaBudget heapBudget = {};
12590  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12591  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12592  }
12593 
12594  // Scope for lock.
12595  {
12596  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12597 
12598  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12599 
12600  if(IsCorruptionDetectionEnabled())
12601  {
12602  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12603  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12604  }
12605 
12606  if(hAllocation->IsPersistentMap())
12607  {
12608  pBlock->Unmap(m_hAllocator, 1);
12609  }
12610 
12611  pBlock->m_pMetadata->Free(hAllocation);
12612  VMA_HEAVY_ASSERT(pBlock->Validate());
12613 
12614  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12615 
12616  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12617  // pBlock became empty after this deallocation.
12618  if(pBlock->m_pMetadata->IsEmpty())
12619  {
12620  // Already has empty block. We don't want to have two, so delete this one.
12621  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12622  {
12623  pBlockToDelete = pBlock;
12624  Remove(pBlock);
12625  }
12626  // else: We now have an empty block - leave it.
12627  }
12628  // pBlock didn't become empty, but we have another empty block - find and free that one.
12629  // (This is optional, heuristics.)
12630  else if(m_HasEmptyBlock && canDeleteBlock)
12631  {
12632  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12633  if(pLastBlock->m_pMetadata->IsEmpty())
12634  {
12635  pBlockToDelete = pLastBlock;
12636  m_Blocks.pop_back();
12637  }
12638  }
12639 
12640  UpdateHasEmptyBlock();
12641  IncrementallySortBlocks();
12642  }
12643 
12644  // Destruction of a free block. Deferred until this point, outside of mutex
12645  // lock, for performance reason.
12646  if(pBlockToDelete != VMA_NULL)
12647  {
12648  VMA_DEBUG_LOG(" Deleted empty block");
12649  pBlockToDelete->Destroy(m_hAllocator);
12650  vma_delete(m_hAllocator, pBlockToDelete);
12651  }
12652 }
12653 
12654 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12655 {
12656  VkDeviceSize result = 0;
12657  for(size_t i = m_Blocks.size(); i--; )
12658  {
12659  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12660  if(result >= m_PreferredBlockSize)
12661  {
12662  break;
12663  }
12664  }
12665  return result;
12666 }
12667 
12668 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12669 {
12670  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12671  {
12672  if(m_Blocks[blockIndex] == pBlock)
12673  {
12674  VmaVectorRemove(m_Blocks, blockIndex);
12675  return;
12676  }
12677  }
12678  VMA_ASSERT(0);
12679 }
12680 
12681 void VmaBlockVector::IncrementallySortBlocks()
12682 {
12683  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12684  {
12685  // Bubble sort only until first swap.
12686  for(size_t i = 1; i < m_Blocks.size(); ++i)
12687  {
12688  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12689  {
12690  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12691  return;
12692  }
12693  }
12694  }
12695 }
12696 
12697 VkResult VmaBlockVector::AllocateFromBlock(
12698  VmaDeviceMemoryBlock* pBlock,
12699  uint32_t currentFrameIndex,
12700  VkDeviceSize size,
12701  VkDeviceSize alignment,
12702  VmaAllocationCreateFlags allocFlags,
12703  void* pUserData,
12704  VmaSuballocationType suballocType,
12705  uint32_t strategy,
12706  VmaAllocation* pAllocation)
12707 {
12708  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12709  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12710  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12711  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12712 
12713  VmaAllocationRequest currRequest = {};
12714  if(pBlock->m_pMetadata->CreateAllocationRequest(
12715  currentFrameIndex,
12716  m_FrameInUseCount,
12717  m_BufferImageGranularity,
12718  size,
12719  alignment,
12720  isUpperAddress,
12721  suballocType,
12722  false, // canMakeOtherLost
12723  strategy,
12724  &currRequest))
12725  {
12726  // Allocate from pCurrBlock.
12727  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12728 
12729  if(mapped)
12730  {
12731  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12732  if(res != VK_SUCCESS)
12733  {
12734  return res;
12735  }
12736  }
12737 
12738  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12739  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12740  UpdateHasEmptyBlock();
12741  (*pAllocation)->InitBlockAllocation(
12742  pBlock,
12743  currRequest.offset,
12744  alignment,
12745  size,
12746  m_MemoryTypeIndex,
12747  suballocType,
12748  mapped,
12749  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12750  VMA_HEAVY_ASSERT(pBlock->Validate());
12751  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12752  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12753  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12754  {
12755  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12756  }
12757  if(IsCorruptionDetectionEnabled())
12758  {
12759  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12760  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12761  }
12762  return VK_SUCCESS;
12763  }
12764  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12765 }
12766 
12767 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12768 {
12769  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12770  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12771  allocInfo.allocationSize = blockSize;
12772 
12773 #if VMA_BUFFER_DEVICE_ADDRESS
12774  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12775  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12776  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
12777  {
12778  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12779  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12780  }
12781 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
12782 
12783  VkDeviceMemory mem = VK_NULL_HANDLE;
12784  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12785  if(res < 0)
12786  {
12787  return res;
12788  }
12789 
12790  // New VkDeviceMemory successfully created.
12791 
12792  // Create new Allocation for it.
12793  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12794  pBlock->Init(
12795  m_hAllocator,
12796  m_hParentPool,
12797  m_MemoryTypeIndex,
12798  mem,
12799  allocInfo.allocationSize,
12800  m_NextBlockId++,
12801  m_Algorithm);
12802 
12803  m_Blocks.push_back(pBlock);
12804  if(pNewBlockIndex != VMA_NULL)
12805  {
12806  *pNewBlockIndex = m_Blocks.size() - 1;
12807  }
12808 
12809  return VK_SUCCESS;
12810 }
12811 
12812 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12813  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12814  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12815 {
12816  const size_t blockCount = m_Blocks.size();
12817  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12818 
12819  enum BLOCK_FLAG
12820  {
12821  BLOCK_FLAG_USED = 0x00000001,
12822  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12823  };
12824 
12825  struct BlockInfo
12826  {
12827  uint32_t flags;
12828  void* pMappedData;
12829  };
12830  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12831  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12832  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12833 
12834  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12835  const size_t moveCount = moves.size();
12836  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12837  {
12838  const VmaDefragmentationMove& move = moves[moveIndex];
12839  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12840  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12841  }
12842 
12843  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12844 
12845  // Go over all blocks. Get mapped pointer or map if necessary.
12846  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12847  {
12848  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12849  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12850  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12851  {
12852  currBlockInfo.pMappedData = pBlock->GetMappedData();
12853  // It is not originally mapped - map it.
12854  if(currBlockInfo.pMappedData == VMA_NULL)
12855  {
12856  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12857  if(pDefragCtx->res == VK_SUCCESS)
12858  {
12859  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12860  }
12861  }
12862  }
12863  }
12864 
12865  // Go over all moves. Do actual data transfer.
12866  if(pDefragCtx->res == VK_SUCCESS)
12867  {
12868  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12869  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12870 
12871  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12872  {
12873  const VmaDefragmentationMove& move = moves[moveIndex];
12874 
12875  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12876  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12877 
12878  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12879 
12880  // Invalidate source.
12881  if(isNonCoherent)
12882  {
12883  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12884  memRange.memory = pSrcBlock->GetDeviceMemory();
12885  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12886  memRange.size = VMA_MIN(
12887  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12888  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12889  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12890  }
12891 
12892  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12893  memmove(
12894  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12895  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12896  static_cast<size_t>(move.size));
12897 
12898  if(IsCorruptionDetectionEnabled())
12899  {
12900  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12901  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12902  }
12903 
12904  // Flush destination.
12905  if(isNonCoherent)
12906  {
12907  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12908  memRange.memory = pDstBlock->GetDeviceMemory();
12909  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12910  memRange.size = VMA_MIN(
12911  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12912  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12913  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12914  }
12915  }
12916  }
12917 
12918  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12919  // Regardless of pCtx->res == VK_SUCCESS.
12920  for(size_t blockIndex = blockCount; blockIndex--; )
12921  {
12922  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12923  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12924  {
12925  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12926  pBlock->Unmap(m_hAllocator, 1);
12927  }
12928  }
12929 }
12930 
12931 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12932  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12933  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12934  VkCommandBuffer commandBuffer)
12935 {
12936  const size_t blockCount = m_Blocks.size();
12937 
12938  pDefragCtx->blockContexts.resize(blockCount);
12939  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12940 
12941  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12942  const size_t moveCount = moves.size();
12943  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12944  {
12945  const VmaDefragmentationMove& move = moves[moveIndex];
12946 
12947  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
12948  {
12949  // Old school move still require us to map the whole block
12950  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12951  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12952  }
12953  }
12954 
12955  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12956 
12957  // Go over all blocks. Create and bind buffer for whole block if necessary.
12958  {
12959  VkBufferCreateInfo bufCreateInfo;
12960  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12961 
12962  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12963  {
12964  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12965  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12966  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12967  {
12968  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12969  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12970  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12971  if(pDefragCtx->res == VK_SUCCESS)
12972  {
12973  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12974  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12975  }
12976  }
12977  }
12978  }
12979 
12980  // Go over all moves. Post data transfer commands to command buffer.
12981  if(pDefragCtx->res == VK_SUCCESS)
12982  {
12983  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12984  {
12985  const VmaDefragmentationMove& move = moves[moveIndex];
12986 
12987  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12988  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12989 
12990  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12991 
12992  VkBufferCopy region = {
12993  move.srcOffset,
12994  move.dstOffset,
12995  move.size };
12996  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12997  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12998  }
12999  }
13000 
13001  // Save buffers to defrag context for later destruction.
13002  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13003  {
13004  pDefragCtx->res = VK_NOT_READY;
13005  }
13006 }
13007 
13008 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13009 {
13010  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13011  {
13012  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13013  if(pBlock->m_pMetadata->IsEmpty())
13014  {
13015  if(m_Blocks.size() > m_MinBlockCount)
13016  {
13017  if(pDefragmentationStats != VMA_NULL)
13018  {
13019  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13020  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13021  }
13022 
13023  VmaVectorRemove(m_Blocks, blockIndex);
13024  pBlock->Destroy(m_hAllocator);
13025  vma_delete(m_hAllocator, pBlock);
13026  }
13027  else
13028  {
13029  break;
13030  }
13031  }
13032  }
13033  UpdateHasEmptyBlock();
13034 }
13035 
13036 void VmaBlockVector::UpdateHasEmptyBlock()
13037 {
13038  m_HasEmptyBlock = false;
13039  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13040  {
13041  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13042  if(pBlock->m_pMetadata->IsEmpty())
13043  {
13044  m_HasEmptyBlock = true;
13045  break;
13046  }
13047  }
13048 }
13049 
13050 #if VMA_STATS_STRING_ENABLED
13051 
13052 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13053 {
13054  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13055 
13056  json.BeginObject();
13057 
13058  if(IsCustomPool())
13059  {
13060  const char* poolName = m_hParentPool->GetName();
13061  if(poolName != VMA_NULL && poolName[0] != '\0')
13062  {
13063  json.WriteString("Name");
13064  json.WriteString(poolName);
13065  }
13066 
13067  json.WriteString("MemoryTypeIndex");
13068  json.WriteNumber(m_MemoryTypeIndex);
13069 
13070  json.WriteString("BlockSize");
13071  json.WriteNumber(m_PreferredBlockSize);
13072 
13073  json.WriteString("BlockCount");
13074  json.BeginObject(true);
13075  if(m_MinBlockCount > 0)
13076  {
13077  json.WriteString("Min");
13078  json.WriteNumber((uint64_t)m_MinBlockCount);
13079  }
13080  if(m_MaxBlockCount < SIZE_MAX)
13081  {
13082  json.WriteString("Max");
13083  json.WriteNumber((uint64_t)m_MaxBlockCount);
13084  }
13085  json.WriteString("Cur");
13086  json.WriteNumber((uint64_t)m_Blocks.size());
13087  json.EndObject();
13088 
13089  if(m_FrameInUseCount > 0)
13090  {
13091  json.WriteString("FrameInUseCount");
13092  json.WriteNumber(m_FrameInUseCount);
13093  }
13094 
13095  if(m_Algorithm != 0)
13096  {
13097  json.WriteString("Algorithm");
13098  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13099  }
13100  }
13101  else
13102  {
13103  json.WriteString("PreferredBlockSize");
13104  json.WriteNumber(m_PreferredBlockSize);
13105  }
13106 
13107  json.WriteString("Blocks");
13108  json.BeginObject();
13109  for(size_t i = 0; i < m_Blocks.size(); ++i)
13110  {
13111  json.BeginString();
13112  json.ContinueString(m_Blocks[i]->GetId());
13113  json.EndString();
13114 
13115  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13116  }
13117  json.EndObject();
13118 
13119  json.EndObject();
13120 }
13121 
13122 #endif // #if VMA_STATS_STRING_ENABLED
13123 
13124 void VmaBlockVector::Defragment(
13125  class VmaBlockVectorDefragmentationContext* pCtx,
13127  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13128  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13129  VkCommandBuffer commandBuffer)
13130 {
13131  pCtx->res = VK_SUCCESS;
13132 
13133  const VkMemoryPropertyFlags memPropFlags =
13134  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13135  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13136 
13137  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13138  isHostVisible;
13139  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13140  !IsCorruptionDetectionEnabled() &&
13141  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13142 
13143  // There are options to defragment this memory type.
13144  if(canDefragmentOnCpu || canDefragmentOnGpu)
13145  {
13146  bool defragmentOnGpu;
13147  // There is only one option to defragment this memory type.
13148  if(canDefragmentOnGpu != canDefragmentOnCpu)
13149  {
13150  defragmentOnGpu = canDefragmentOnGpu;
13151  }
13152  // Both options are available: Heuristics to choose the best one.
13153  else
13154  {
13155  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13156  m_hAllocator->IsIntegratedGpu();
13157  }
13158 
13159  bool overlappingMoveSupported = !defragmentOnGpu;
13160 
13161  if(m_hAllocator->m_UseMutex)
13162  {
13164  {
13165  if(!m_Mutex.TryLockWrite())
13166  {
13167  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13168  return;
13169  }
13170  }
13171  else
13172  {
13173  m_Mutex.LockWrite();
13174  pCtx->mutexLocked = true;
13175  }
13176  }
13177 
13178  pCtx->Begin(overlappingMoveSupported, flags);
13179 
13180  // Defragment.
13181 
13182  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13183  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13184  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13185 
13186  // Accumulate statistics.
13187  if(pStats != VMA_NULL)
13188  {
13189  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13190  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13191  pStats->bytesMoved += bytesMoved;
13192  pStats->allocationsMoved += allocationsMoved;
13193  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13194  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13195  if(defragmentOnGpu)
13196  {
13197  maxGpuBytesToMove -= bytesMoved;
13198  maxGpuAllocationsToMove -= allocationsMoved;
13199  }
13200  else
13201  {
13202  maxCpuBytesToMove -= bytesMoved;
13203  maxCpuAllocationsToMove -= allocationsMoved;
13204  }
13205  }
13206 
13208  {
13209  if(m_hAllocator->m_UseMutex)
13210  m_Mutex.UnlockWrite();
13211 
13212  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13213  pCtx->res = VK_NOT_READY;
13214 
13215  return;
13216  }
13217 
13218  if(pCtx->res >= VK_SUCCESS)
13219  {
13220  if(defragmentOnGpu)
13221  {
13222  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13223  }
13224  else
13225  {
13226  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13227  }
13228  }
13229  }
13230 }
13231 
13232 void VmaBlockVector::DefragmentationEnd(
13233  class VmaBlockVectorDefragmentationContext* pCtx,
13234  uint32_t flags,
13235  VmaDefragmentationStats* pStats)
13236 {
13237  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13238  {
13239  VMA_ASSERT(pCtx->mutexLocked == false);
13240 
13241  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13242  // lock protecting us. Since we mutate state here, we have to take the lock out now
13243  m_Mutex.LockWrite();
13244  pCtx->mutexLocked = true;
13245  }
13246 
13247  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13248  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13249  {
13250  // Destroy buffers.
13251  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13252  {
13253  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13254  if(blockCtx.hBuffer)
13255  {
13256  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13257  }
13258  }
13259 
13260  if(pCtx->res >= VK_SUCCESS)
13261  {
13262  FreeEmptyBlocks(pStats);
13263  }
13264  }
13265 
13266  if(pCtx->mutexLocked)
13267  {
13268  VMA_ASSERT(m_hAllocator->m_UseMutex);
13269  m_Mutex.UnlockWrite();
13270  }
13271 }
13272 
13273 uint32_t VmaBlockVector::ProcessDefragmentations(
13274  class VmaBlockVectorDefragmentationContext *pCtx,
13275  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13276 {
13277  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13278 
13279  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13280 
13281  for(uint32_t i = 0; i < moveCount; ++ i)
13282  {
13283  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13284 
13285  pMove->allocation = move.hAllocation;
13286  pMove->memory = move.pDstBlock->GetDeviceMemory();
13287  pMove->offset = move.dstOffset;
13288 
13289  ++ pMove;
13290  }
13291 
13292  pCtx->defragmentationMovesProcessed += moveCount;
13293 
13294  return moveCount;
13295 }
13296 
13297 void VmaBlockVector::CommitDefragmentations(
13298  class VmaBlockVectorDefragmentationContext *pCtx,
13299  VmaDefragmentationStats* pStats)
13300 {
13301  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13302 
13303  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13304  {
13305  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13306 
13307  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13308  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13309  }
13310 
13311  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13312  FreeEmptyBlocks(pStats);
13313 }
13314 
13315 size_t VmaBlockVector::CalcAllocationCount() const
13316 {
13317  size_t result = 0;
13318  for(size_t i = 0; i < m_Blocks.size(); ++i)
13319  {
13320  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13321  }
13322  return result;
13323 }
13324 
13325 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13326 {
13327  if(m_BufferImageGranularity == 1)
13328  {
13329  return false;
13330  }
13331  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13332  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13333  {
13334  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13335  VMA_ASSERT(m_Algorithm == 0);
13336  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13337  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13338  {
13339  return true;
13340  }
13341  }
13342  return false;
13343 }
13344 
13345 void VmaBlockVector::MakePoolAllocationsLost(
13346  uint32_t currentFrameIndex,
13347  size_t* pLostAllocationCount)
13348 {
13349  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13350  size_t lostAllocationCount = 0;
13351  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13352  {
13353  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13354  VMA_ASSERT(pBlock);
13355  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13356  }
13357  if(pLostAllocationCount != VMA_NULL)
13358  {
13359  *pLostAllocationCount = lostAllocationCount;
13360  }
13361 }
13362 
13363 VkResult VmaBlockVector::CheckCorruption()
13364 {
13365  if(!IsCorruptionDetectionEnabled())
13366  {
13367  return VK_ERROR_FEATURE_NOT_PRESENT;
13368  }
13369 
13370  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13371  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13372  {
13373  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13374  VMA_ASSERT(pBlock);
13375  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13376  if(res != VK_SUCCESS)
13377  {
13378  return res;
13379  }
13380  }
13381  return VK_SUCCESS;
13382 }
13383 
13384 void VmaBlockVector::AddStats(VmaStats* pStats)
13385 {
13386  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13387  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13388 
13389  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13390 
13391  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13392  {
13393  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13394  VMA_ASSERT(pBlock);
13395  VMA_HEAVY_ASSERT(pBlock->Validate());
13396  VmaStatInfo allocationStatInfo;
13397  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13398  VmaAddStatInfo(pStats->total, allocationStatInfo);
13399  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13400  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13401  }
13402 }
13403 
13405 // VmaDefragmentationAlgorithm_Generic members definition
13406 
13407 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13408  VmaAllocator hAllocator,
13409  VmaBlockVector* pBlockVector,
13410  uint32_t currentFrameIndex,
13411  bool overlappingMoveSupported) :
13412  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13413  m_AllocationCount(0),
13414  m_AllAllocations(false),
13415  m_BytesMoved(0),
13416  m_AllocationsMoved(0),
13417  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13418 {
13419  // Create block info for each block.
13420  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13421  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13422  {
13423  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13424  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13425  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13426  m_Blocks.push_back(pBlockInfo);
13427  }
13428 
13429  // Sort them by m_pBlock pointer value.
13430  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13431 }
13432 
13433 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13434 {
13435  for(size_t i = m_Blocks.size(); i--; )
13436  {
13437  vma_delete(m_hAllocator, m_Blocks[i]);
13438  }
13439 }
13440 
13441 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13442 {
13443  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13444  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13445  {
13446  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13447  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13448  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13449  {
13450  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13451  (*it)->m_Allocations.push_back(allocInfo);
13452  }
13453  else
13454  {
13455  VMA_ASSERT(0);
13456  }
13457 
13458  ++m_AllocationCount;
13459  }
13460 }
13461 
13462 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13463  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13464  VkDeviceSize maxBytesToMove,
13465  uint32_t maxAllocationsToMove,
13466  bool freeOldAllocations)
13467 {
13468  if(m_Blocks.empty())
13469  {
13470  return VK_SUCCESS;
13471  }
13472 
13473  // This is a choice based on research.
13474  // Option 1:
13475  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13476  // Option 2:
13477  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13478  // Option 3:
13479  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13480 
13481  size_t srcBlockMinIndex = 0;
13482  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13483  /*
13484  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13485  {
13486  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13487  if(blocksWithNonMovableCount > 0)
13488  {
13489  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13490  }
13491  }
13492  */
13493 
13494  size_t srcBlockIndex = m_Blocks.size() - 1;
13495  size_t srcAllocIndex = SIZE_MAX;
13496  for(;;)
13497  {
13498  // 1. Find next allocation to move.
13499  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13500  // 1.2. Then start from last to first m_Allocations.
13501  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13502  {
13503  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13504  {
13505  // Finished: no more allocations to process.
13506  if(srcBlockIndex == srcBlockMinIndex)
13507  {
13508  return VK_SUCCESS;
13509  }
13510  else
13511  {
13512  --srcBlockIndex;
13513  srcAllocIndex = SIZE_MAX;
13514  }
13515  }
13516  else
13517  {
13518  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13519  }
13520  }
13521 
13522  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13523  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13524 
13525  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13526  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13527  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13528  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13529 
13530  // 2. Try to find new place for this allocation in preceding or current block.
13531  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13532  {
13533  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13534  VmaAllocationRequest dstAllocRequest;
13535  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13536  m_CurrentFrameIndex,
13537  m_pBlockVector->GetFrameInUseCount(),
13538  m_pBlockVector->GetBufferImageGranularity(),
13539  size,
13540  alignment,
13541  false, // upperAddress
13542  suballocType,
13543  false, // canMakeOtherLost
13544  strategy,
13545  &dstAllocRequest) &&
13546  MoveMakesSense(
13547  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13548  {
13549  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13550 
13551  // Reached limit on number of allocations or bytes to move.
13552  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13553  (m_BytesMoved + size > maxBytesToMove))
13554  {
13555  return VK_SUCCESS;
13556  }
13557 
13558  VmaDefragmentationMove move = {};
13559  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13560  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13561  move.srcOffset = srcOffset;
13562  move.dstOffset = dstAllocRequest.offset;
13563  move.size = size;
13564  move.hAllocation = allocInfo.m_hAllocation;
13565  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13566  move.pDstBlock = pDstBlockInfo->m_pBlock;
13567 
13568  moves.push_back(move);
13569 
13570  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13571  dstAllocRequest,
13572  suballocType,
13573  size,
13574  allocInfo.m_hAllocation);
13575 
13576  if(freeOldAllocations)
13577  {
13578  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13579  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13580  }
13581 
13582  if(allocInfo.m_pChanged != VMA_NULL)
13583  {
13584  *allocInfo.m_pChanged = VK_TRUE;
13585  }
13586 
13587  ++m_AllocationsMoved;
13588  m_BytesMoved += size;
13589 
13590  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13591 
13592  break;
13593  }
13594  }
13595 
13596  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13597 
13598  if(srcAllocIndex > 0)
13599  {
13600  --srcAllocIndex;
13601  }
13602  else
13603  {
13604  if(srcBlockIndex > 0)
13605  {
13606  --srcBlockIndex;
13607  srcAllocIndex = SIZE_MAX;
13608  }
13609  else
13610  {
13611  return VK_SUCCESS;
13612  }
13613  }
13614  }
13615 }
13616 
13617 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13618 {
13619  size_t result = 0;
13620  for(size_t i = 0; i < m_Blocks.size(); ++i)
13621  {
13622  if(m_Blocks[i]->m_HasNonMovableAllocations)
13623  {
13624  ++result;
13625  }
13626  }
13627  return result;
13628 }
13629 
13630 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13631  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13632  VkDeviceSize maxBytesToMove,
13633  uint32_t maxAllocationsToMove,
13635 {
13636  if(!m_AllAllocations && m_AllocationCount == 0)
13637  {
13638  return VK_SUCCESS;
13639  }
13640 
13641  const size_t blockCount = m_Blocks.size();
13642  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13643  {
13644  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13645 
13646  if(m_AllAllocations)
13647  {
13648  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13649  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13650  it != pMetadata->m_Suballocations.end();
13651  ++it)
13652  {
13653  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13654  {
13655  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13656  pBlockInfo->m_Allocations.push_back(allocInfo);
13657  }
13658  }
13659  }
13660 
13661  pBlockInfo->CalcHasNonMovableAllocations();
13662 
13663  // This is a choice based on research.
13664  // Option 1:
13665  pBlockInfo->SortAllocationsByOffsetDescending();
13666  // Option 2:
13667  //pBlockInfo->SortAllocationsBySizeDescending();
13668  }
13669 
13670  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13671  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13672 
13673  // This is a choice based on research.
13674  const uint32_t roundCount = 2;
13675 
13676  // Execute defragmentation rounds (the main part).
13677  VkResult result = VK_SUCCESS;
13678  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13679  {
13680  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
13681  }
13682 
13683  return result;
13684 }
13685 
13686 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13687  size_t dstBlockIndex, VkDeviceSize dstOffset,
13688  size_t srcBlockIndex, VkDeviceSize srcOffset)
13689 {
13690  if(dstBlockIndex < srcBlockIndex)
13691  {
13692  return true;
13693  }
13694  if(dstBlockIndex > srcBlockIndex)
13695  {
13696  return false;
13697  }
13698  if(dstOffset < srcOffset)
13699  {
13700  return true;
13701  }
13702  return false;
13703 }
13704 
13706 // VmaDefragmentationAlgorithm_Fast
13707 
13708 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13709  VmaAllocator hAllocator,
13710  VmaBlockVector* pBlockVector,
13711  uint32_t currentFrameIndex,
13712  bool overlappingMoveSupported) :
13713  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13714  m_OverlappingMoveSupported(overlappingMoveSupported),
13715  m_AllocationCount(0),
13716  m_AllAllocations(false),
13717  m_BytesMoved(0),
13718  m_AllocationsMoved(0),
13719  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13720 {
13721  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13722 
13723 }
13724 
13725 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13726 {
13727 }
13728 
13729 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13730  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13731  VkDeviceSize maxBytesToMove,
13732  uint32_t maxAllocationsToMove,
13734 {
13735  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13736 
13737  const size_t blockCount = m_pBlockVector->GetBlockCount();
13738  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13739  {
13740  return VK_SUCCESS;
13741  }
13742 
13743  PreprocessMetadata();
13744 
13745  // Sort blocks in order from most destination.
13746 
13747  m_BlockInfos.resize(blockCount);
13748  for(size_t i = 0; i < blockCount; ++i)
13749  {
13750  m_BlockInfos[i].origBlockIndex = i;
13751  }
13752 
13753  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13754  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13755  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13756  });
13757 
13758  // THE MAIN ALGORITHM
13759 
13760  FreeSpaceDatabase freeSpaceDb;
13761 
13762  size_t dstBlockInfoIndex = 0;
13763  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13764  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13765  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13766  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13767  VkDeviceSize dstOffset = 0;
13768 
13769  bool end = false;
13770  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13771  {
13772  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13773  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13774  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13775  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13776  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13777  {
13778  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13779  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13780  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13781  if(m_AllocationsMoved == maxAllocationsToMove ||
13782  m_BytesMoved + srcAllocSize > maxBytesToMove)
13783  {
13784  end = true;
13785  break;
13786  }
13787  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13788 
13789  VmaDefragmentationMove move = {};
13790  // Try to place it in one of free spaces from the database.
13791  size_t freeSpaceInfoIndex;
13792  VkDeviceSize dstAllocOffset;
13793  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13794  freeSpaceInfoIndex, dstAllocOffset))
13795  {
13796  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13797  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13798  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13799 
13800  // Same block
13801  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13802  {
13803  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13804 
13805  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13806 
13807  VmaSuballocation suballoc = *srcSuballocIt;
13808  suballoc.offset = dstAllocOffset;
13809  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13810  m_BytesMoved += srcAllocSize;
13811  ++m_AllocationsMoved;
13812 
13813  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13814  ++nextSuballocIt;
13815  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13816  srcSuballocIt = nextSuballocIt;
13817 
13818  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13819 
13820  move.srcBlockIndex = srcOrigBlockIndex;
13821  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13822  move.srcOffset = srcAllocOffset;
13823  move.dstOffset = dstAllocOffset;
13824  move.size = srcAllocSize;
13825 
13826  moves.push_back(move);
13827  }
13828  // Different block
13829  else
13830  {
13831  // MOVE OPTION 2: Move the allocation to a different block.
13832 
13833  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13834 
13835  VmaSuballocation suballoc = *srcSuballocIt;
13836  suballoc.offset = dstAllocOffset;
13837  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13838  m_BytesMoved += srcAllocSize;
13839  ++m_AllocationsMoved;
13840 
13841  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13842  ++nextSuballocIt;
13843  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13844  srcSuballocIt = nextSuballocIt;
13845 
13846  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13847 
13848  move.srcBlockIndex = srcOrigBlockIndex;
13849  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13850  move.srcOffset = srcAllocOffset;
13851  move.dstOffset = dstAllocOffset;
13852  move.size = srcAllocSize;
13853 
13854  moves.push_back(move);
13855  }
13856  }
13857  else
13858  {
13859  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13860 
13861  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13862  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13863  dstAllocOffset + srcAllocSize > dstBlockSize)
13864  {
13865  // But before that, register remaining free space at the end of dst block.
13866  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13867 
13868  ++dstBlockInfoIndex;
13869  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13870  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13871  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13872  dstBlockSize = pDstMetadata->GetSize();
13873  dstOffset = 0;
13874  dstAllocOffset = 0;
13875  }
13876 
13877  // Same block
13878  if(dstBlockInfoIndex == srcBlockInfoIndex)
13879  {
13880  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13881 
13882  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13883 
13884  bool skipOver = overlap;
13885  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13886  {
13887  // If destination and source place overlap, skip if it would move it
13888  // by only < 1/64 of its size.
13889  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13890  }
13891 
13892  if(skipOver)
13893  {
13894  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13895 
13896  dstOffset = srcAllocOffset + srcAllocSize;
13897  ++srcSuballocIt;
13898  }
13899  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13900  else
13901  {
13902  srcSuballocIt->offset = dstAllocOffset;
13903  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13904  dstOffset = dstAllocOffset + srcAllocSize;
13905  m_BytesMoved += srcAllocSize;
13906  ++m_AllocationsMoved;
13907  ++srcSuballocIt;
13908 
13909  move.srcBlockIndex = srcOrigBlockIndex;
13910  move.dstBlockIndex = dstOrigBlockIndex;
13911  move.srcOffset = srcAllocOffset;
13912  move.dstOffset = dstAllocOffset;
13913  move.size = srcAllocSize;
13914 
13915  moves.push_back(move);
13916  }
13917  }
13918  // Different block
13919  else
13920  {
13921  // MOVE OPTION 2: Move the allocation to a different block.
13922 
13923  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13924  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13925 
13926  VmaSuballocation suballoc = *srcSuballocIt;
13927  suballoc.offset = dstAllocOffset;
13928  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13929  dstOffset = dstAllocOffset + srcAllocSize;
13930  m_BytesMoved += srcAllocSize;
13931  ++m_AllocationsMoved;
13932 
13933  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13934  ++nextSuballocIt;
13935  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13936  srcSuballocIt = nextSuballocIt;
13937 
13938  pDstMetadata->m_Suballocations.push_back(suballoc);
13939 
13940  move.srcBlockIndex = srcOrigBlockIndex;
13941  move.dstBlockIndex = dstOrigBlockIndex;
13942  move.srcOffset = srcAllocOffset;
13943  move.dstOffset = dstAllocOffset;
13944  move.size = srcAllocSize;
13945 
13946  moves.push_back(move);
13947  }
13948  }
13949  }
13950  }
13951 
13952  m_BlockInfos.clear();
13953 
13954  PostprocessMetadata();
13955 
13956  return VK_SUCCESS;
13957 }
13958 
13959 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13960 {
13961  const size_t blockCount = m_pBlockVector->GetBlockCount();
13962  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13963  {
13964  VmaBlockMetadata_Generic* const pMetadata =
13965  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13966  pMetadata->m_FreeCount = 0;
13967  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13968  pMetadata->m_FreeSuballocationsBySize.clear();
13969  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13970  it != pMetadata->m_Suballocations.end(); )
13971  {
13972  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13973  {
13974  VmaSuballocationList::iterator nextIt = it;
13975  ++nextIt;
13976  pMetadata->m_Suballocations.erase(it);
13977  it = nextIt;
13978  }
13979  else
13980  {
13981  ++it;
13982  }
13983  }
13984  }
13985 }
13986 
13987 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13988 {
13989  const size_t blockCount = m_pBlockVector->GetBlockCount();
13990  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13991  {
13992  VmaBlockMetadata_Generic* const pMetadata =
13993  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13994  const VkDeviceSize blockSize = pMetadata->GetSize();
13995 
13996  // No allocations in this block - entire area is free.
13997  if(pMetadata->m_Suballocations.empty())
13998  {
13999  pMetadata->m_FreeCount = 1;
14000  //pMetadata->m_SumFreeSize is already set to blockSize.
14001  VmaSuballocation suballoc = {
14002  0, // offset
14003  blockSize, // size
14004  VMA_NULL, // hAllocation
14005  VMA_SUBALLOCATION_TYPE_FREE };
14006  pMetadata->m_Suballocations.push_back(suballoc);
14007  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14008  }
14009  // There are some allocations in this block.
14010  else
14011  {
14012  VkDeviceSize offset = 0;
14013  VmaSuballocationList::iterator it;
14014  for(it = pMetadata->m_Suballocations.begin();
14015  it != pMetadata->m_Suballocations.end();
14016  ++it)
14017  {
14018  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14019  VMA_ASSERT(it->offset >= offset);
14020 
14021  // Need to insert preceding free space.
14022  if(it->offset > offset)
14023  {
14024  ++pMetadata->m_FreeCount;
14025  const VkDeviceSize freeSize = it->offset - offset;
14026  VmaSuballocation suballoc = {
14027  offset, // offset
14028  freeSize, // size
14029  VMA_NULL, // hAllocation
14030  VMA_SUBALLOCATION_TYPE_FREE };
14031  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14032  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14033  {
14034  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14035  }
14036  }
14037 
14038  pMetadata->m_SumFreeSize -= it->size;
14039  offset = it->offset + it->size;
14040  }
14041 
14042  // Need to insert trailing free space.
14043  if(offset < blockSize)
14044  {
14045  ++pMetadata->m_FreeCount;
14046  const VkDeviceSize freeSize = blockSize - offset;
14047  VmaSuballocation suballoc = {
14048  offset, // offset
14049  freeSize, // size
14050  VMA_NULL, // hAllocation
14051  VMA_SUBALLOCATION_TYPE_FREE };
14052  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14053  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14054  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14055  {
14056  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14057  }
14058  }
14059 
14060  VMA_SORT(
14061  pMetadata->m_FreeSuballocationsBySize.begin(),
14062  pMetadata->m_FreeSuballocationsBySize.end(),
14063  VmaSuballocationItemSizeLess());
14064  }
14065 
14066  VMA_HEAVY_ASSERT(pMetadata->Validate());
14067  }
14068 }
14069 
14070 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14071 {
14072  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14073  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14074  while(it != pMetadata->m_Suballocations.end())
14075  {
14076  if(it->offset < suballoc.offset)
14077  {
14078  ++it;
14079  }
14080  }
14081  pMetadata->m_Suballocations.insert(it, suballoc);
14082 }
14083 
14085 // VmaBlockVectorDefragmentationContext
14086 
14087 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14088  VmaAllocator hAllocator,
14089  VmaPool hCustomPool,
14090  VmaBlockVector* pBlockVector,
14091  uint32_t currFrameIndex) :
14092  res(VK_SUCCESS),
14093  mutexLocked(false),
14094  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14095  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14096  defragmentationMovesProcessed(0),
14097  defragmentationMovesCommitted(0),
14098  hasDefragmentationPlan(0),
14099  m_hAllocator(hAllocator),
14100  m_hCustomPool(hCustomPool),
14101  m_pBlockVector(pBlockVector),
14102  m_CurrFrameIndex(currFrameIndex),
14103  m_pAlgorithm(VMA_NULL),
14104  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14105  m_AllAllocations(false)
14106 {
14107 }
14108 
14109 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14110 {
14111  vma_delete(m_hAllocator, m_pAlgorithm);
14112 }
14113 
14114 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14115 {
14116  AllocInfo info = { hAlloc, pChanged };
14117  m_Allocations.push_back(info);
14118 }
14119 
14120 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14121 {
14122  const bool allAllocations = m_AllAllocations ||
14123  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14124 
14125  /********************************
14126  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14127  ********************************/
14128 
14129  /*
14130  Fast algorithm is supported only when certain criteria are met:
14131  - VMA_DEBUG_MARGIN is 0.
14132  - All allocations in this block vector are moveable.
14133  - There is no possibility of image/buffer granularity conflict.
14134  - The defragmentation is not incremental
14135  */
14136  if(VMA_DEBUG_MARGIN == 0 &&
14137  allAllocations &&
14138  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14140  {
14141  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14142  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14143  }
14144  else
14145  {
14146  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14147  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14148  }
14149 
14150  if(allAllocations)
14151  {
14152  m_pAlgorithm->AddAll();
14153  }
14154  else
14155  {
14156  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14157  {
14158  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14159  }
14160  }
14161 }
14162 
14164 // VmaDefragmentationContext
14165 
14166 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14167  VmaAllocator hAllocator,
14168  uint32_t currFrameIndex,
14169  uint32_t flags,
14170  VmaDefragmentationStats* pStats) :
14171  m_hAllocator(hAllocator),
14172  m_CurrFrameIndex(currFrameIndex),
14173  m_Flags(flags),
14174  m_pStats(pStats),
14175  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14176 {
14177  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14178 }
14179 
14180 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14181 {
14182  for(size_t i = m_CustomPoolContexts.size(); i--; )
14183  {
14184  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14185  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14186  vma_delete(m_hAllocator, pBlockVectorCtx);
14187  }
14188  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14189  {
14190  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14191  if(pBlockVectorCtx)
14192  {
14193  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14194  vma_delete(m_hAllocator, pBlockVectorCtx);
14195  }
14196  }
14197 }
14198 
14199 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
14200 {
14201  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14202  {
14203  VmaPool pool = pPools[poolIndex];
14204  VMA_ASSERT(pool);
14205  // Pools with algorithm other than default are not defragmented.
14206  if(pool->m_BlockVector.GetAlgorithm() == 0)
14207  {
14208  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14209 
14210  for(size_t i = m_CustomPoolContexts.size(); i--; )
14211  {
14212  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14213  {
14214  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14215  break;
14216  }
14217  }
14218 
14219  if(!pBlockVectorDefragCtx)
14220  {
14221  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14222  m_hAllocator,
14223  pool,
14224  &pool->m_BlockVector,
14225  m_CurrFrameIndex);
14226  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14227  }
14228 
14229  pBlockVectorDefragCtx->AddAll();
14230  }
14231  }
14232 }
14233 
14234 void VmaDefragmentationContext_T::AddAllocations(
14235  uint32_t allocationCount,
14236  VmaAllocation* pAllocations,
14237  VkBool32* pAllocationsChanged)
14238 {
14239  // Dispatch pAllocations among defragmentators. Create them when necessary.
14240  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14241  {
14242  const VmaAllocation hAlloc = pAllocations[allocIndex];
14243  VMA_ASSERT(hAlloc);
14244  // DedicatedAlloc cannot be defragmented.
14245  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14246  // Lost allocation cannot be defragmented.
14247  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14248  {
14249  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14250 
14251  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14252  // This allocation belongs to custom pool.
14253  if(hAllocPool != VK_NULL_HANDLE)
14254  {
14255  // Pools with algorithm other than default are not defragmented.
14256  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14257  {
14258  for(size_t i = m_CustomPoolContexts.size(); i--; )
14259  {
14260  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14261  {
14262  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14263  break;
14264  }
14265  }
14266  if(!pBlockVectorDefragCtx)
14267  {
14268  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14269  m_hAllocator,
14270  hAllocPool,
14271  &hAllocPool->m_BlockVector,
14272  m_CurrFrameIndex);
14273  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14274  }
14275  }
14276  }
14277  // This allocation belongs to default pool.
14278  else
14279  {
14280  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14281  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14282  if(!pBlockVectorDefragCtx)
14283  {
14284  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14285  m_hAllocator,
14286  VMA_NULL, // hCustomPool
14287  m_hAllocator->m_pBlockVectors[memTypeIndex],
14288  m_CurrFrameIndex);
14289  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14290  }
14291  }
14292 
14293  if(pBlockVectorDefragCtx)
14294  {
14295  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14296  &pAllocationsChanged[allocIndex] : VMA_NULL;
14297  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14298  }
14299  }
14300  }
14301 }
14302 
14303 VkResult VmaDefragmentationContext_T::Defragment(
14304  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14305  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14306  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14307 {
14308  if(pStats)
14309  {
14310  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14311  }
14312 
14314  {
14315  // For incremental defragmetnations, we just earmark how much we can move
14316  // The real meat is in the defragmentation steps
14317  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14318  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14319 
14320  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14321  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14322 
14323  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14324  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14325  return VK_SUCCESS;
14326 
14327  return VK_NOT_READY;
14328  }
14329 
14330  if(commandBuffer == VK_NULL_HANDLE)
14331  {
14332  maxGpuBytesToMove = 0;
14333  maxGpuAllocationsToMove = 0;
14334  }
14335 
14336  VkResult res = VK_SUCCESS;
14337 
14338  // Process default pools.
14339  for(uint32_t memTypeIndex = 0;
14340  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14341  ++memTypeIndex)
14342  {
14343  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14344  if(pBlockVectorCtx)
14345  {
14346  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14347  pBlockVectorCtx->GetBlockVector()->Defragment(
14348  pBlockVectorCtx,
14349  pStats, flags,
14350  maxCpuBytesToMove, maxCpuAllocationsToMove,
14351  maxGpuBytesToMove, maxGpuAllocationsToMove,
14352  commandBuffer);
14353  if(pBlockVectorCtx->res != VK_SUCCESS)
14354  {
14355  res = pBlockVectorCtx->res;
14356  }
14357  }
14358  }
14359 
14360  // Process custom pools.
14361  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14362  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14363  ++customCtxIndex)
14364  {
14365  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14366  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14367  pBlockVectorCtx->GetBlockVector()->Defragment(
14368  pBlockVectorCtx,
14369  pStats, flags,
14370  maxCpuBytesToMove, maxCpuAllocationsToMove,
14371  maxGpuBytesToMove, maxGpuAllocationsToMove,
14372  commandBuffer);
14373  if(pBlockVectorCtx->res != VK_SUCCESS)
14374  {
14375  res = pBlockVectorCtx->res;
14376  }
14377  }
14378 
14379  return res;
14380 }
14381 
14382 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14383 {
14384  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14385  uint32_t movesLeft = pInfo->moveCount;
14386 
14387  // Process default pools.
14388  for(uint32_t memTypeIndex = 0;
14389  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14390  ++memTypeIndex)
14391  {
14392  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14393  if(pBlockVectorCtx)
14394  {
14395  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14396 
14397  if(!pBlockVectorCtx->hasDefragmentationPlan)
14398  {
14399  pBlockVectorCtx->GetBlockVector()->Defragment(
14400  pBlockVectorCtx,
14401  m_pStats, m_Flags,
14402  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14403  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14404  VK_NULL_HANDLE);
14405 
14406  if(pBlockVectorCtx->res < VK_SUCCESS)
14407  continue;
14408 
14409  pBlockVectorCtx->hasDefragmentationPlan = true;
14410  }
14411 
14412  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14413  pBlockVectorCtx,
14414  pCurrentMove, movesLeft);
14415 
14416  movesLeft -= processed;
14417  pCurrentMove += processed;
14418  }
14419  }
14420 
14421  // Process custom pools.
14422  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14423  customCtxIndex < customCtxCount;
14424  ++customCtxIndex)
14425  {
14426  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14427  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14428 
14429  if(!pBlockVectorCtx->hasDefragmentationPlan)
14430  {
14431  pBlockVectorCtx->GetBlockVector()->Defragment(
14432  pBlockVectorCtx,
14433  m_pStats, m_Flags,
14434  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14435  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14436  VK_NULL_HANDLE);
14437 
14438  if(pBlockVectorCtx->res < VK_SUCCESS)
14439  continue;
14440 
14441  pBlockVectorCtx->hasDefragmentationPlan = true;
14442  }
14443 
14444  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14445  pBlockVectorCtx,
14446  pCurrentMove, movesLeft);
14447 
14448  movesLeft -= processed;
14449  pCurrentMove += processed;
14450  }
14451 
14452  pInfo->moveCount = pInfo->moveCount - movesLeft;
14453 
14454  return VK_SUCCESS;
14455 }
14456 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14457 {
14458  VkResult res = VK_SUCCESS;
14459 
14460  // Process default pools.
14461  for(uint32_t memTypeIndex = 0;
14462  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14463  ++memTypeIndex)
14464  {
14465  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14466  if(pBlockVectorCtx)
14467  {
14468  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14469 
14470  if(!pBlockVectorCtx->hasDefragmentationPlan)
14471  {
14472  res = VK_NOT_READY;
14473  continue;
14474  }
14475 
14476  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14477  pBlockVectorCtx, m_pStats);
14478 
14479  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14480  res = VK_NOT_READY;
14481  }
14482  }
14483 
14484  // Process custom pools.
14485  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14486  customCtxIndex < customCtxCount;
14487  ++customCtxIndex)
14488  {
14489  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14490  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14491 
14492  if(!pBlockVectorCtx->hasDefragmentationPlan)
14493  {
14494  res = VK_NOT_READY;
14495  continue;
14496  }
14497 
14498  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14499  pBlockVectorCtx, m_pStats);
14500 
14501  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14502  res = VK_NOT_READY;
14503  }
14504 
14505  return res;
14506 }
14507 
14509 // VmaRecorder
14510 
14511 #if VMA_RECORDING_ENABLED
14512 
14513 VmaRecorder::VmaRecorder() :
14514  m_UseMutex(true),
14515  m_Flags(0),
14516  m_File(VMA_NULL),
14517  m_Freq(INT64_MAX),
14518  m_StartCounter(INT64_MAX)
14519 {
14520 }
14521 
14522 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14523 {
14524  m_UseMutex = useMutex;
14525  m_Flags = settings.flags;
14526 
14527  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
14528  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
14529 
14530  // Open file for writing.
14531  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14532  if(err != 0)
14533  {
14534  return VK_ERROR_INITIALIZATION_FAILED;
14535  }
14536 
14537  // Write header.
14538  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14539  fprintf(m_File, "%s\n", "1,8");
14540 
14541  return VK_SUCCESS;
14542 }
14543 
14544 VmaRecorder::~VmaRecorder()
14545 {
14546  if(m_File != VMA_NULL)
14547  {
14548  fclose(m_File);
14549  }
14550 }
14551 
14552 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14553 {
14554  CallParams callParams;
14555  GetBasicParams(callParams);
14556 
14557  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14558  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14559  Flush();
14560 }
14561 
14562 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14563 {
14564  CallParams callParams;
14565  GetBasicParams(callParams);
14566 
14567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14568  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14569  Flush();
14570 }
14571 
14572 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14573 {
14574  CallParams callParams;
14575  GetBasicParams(callParams);
14576 
14577  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14578  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14579  createInfo.memoryTypeIndex,
14580  createInfo.flags,
14581  createInfo.blockSize,
14582  (uint64_t)createInfo.minBlockCount,
14583  (uint64_t)createInfo.maxBlockCount,
14584  createInfo.frameInUseCount,
14585  pool);
14586  Flush();
14587 }
14588 
14589 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14590 {
14591  CallParams callParams;
14592  GetBasicParams(callParams);
14593 
14594  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14595  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14596  pool);
14597  Flush();
14598 }
14599 
14600 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14601  const VkMemoryRequirements& vkMemReq,
14602  const VmaAllocationCreateInfo& createInfo,
14603  VmaAllocation allocation)
14604 {
14605  CallParams callParams;
14606  GetBasicParams(callParams);
14607 
14608  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14609  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14610  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14611  vkMemReq.size,
14612  vkMemReq.alignment,
14613  vkMemReq.memoryTypeBits,
14614  createInfo.flags,
14615  createInfo.usage,
14616  createInfo.requiredFlags,
14617  createInfo.preferredFlags,
14618  createInfo.memoryTypeBits,
14619  createInfo.pool,
14620  allocation,
14621  userDataStr.GetString());
14622  Flush();
14623 }
14624 
14625 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14626  const VkMemoryRequirements& vkMemReq,
14627  const VmaAllocationCreateInfo& createInfo,
14628  uint64_t allocationCount,
14629  const VmaAllocation* pAllocations)
14630 {
14631  CallParams callParams;
14632  GetBasicParams(callParams);
14633 
14634  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14635  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14636  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14637  vkMemReq.size,
14638  vkMemReq.alignment,
14639  vkMemReq.memoryTypeBits,
14640  createInfo.flags,
14641  createInfo.usage,
14642  createInfo.requiredFlags,
14643  createInfo.preferredFlags,
14644  createInfo.memoryTypeBits,
14645  createInfo.pool);
14646  PrintPointerList(allocationCount, pAllocations);
14647  fprintf(m_File, ",%s\n", userDataStr.GetString());
14648  Flush();
14649 }
14650 
14651 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14652  const VkMemoryRequirements& vkMemReq,
14653  bool requiresDedicatedAllocation,
14654  bool prefersDedicatedAllocation,
14655  const VmaAllocationCreateInfo& createInfo,
14656  VmaAllocation allocation)
14657 {
14658  CallParams callParams;
14659  GetBasicParams(callParams);
14660 
14661  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14662  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14663  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14664  vkMemReq.size,
14665  vkMemReq.alignment,
14666  vkMemReq.memoryTypeBits,
14667  requiresDedicatedAllocation ? 1 : 0,
14668  prefersDedicatedAllocation ? 1 : 0,
14669  createInfo.flags,
14670  createInfo.usage,
14671  createInfo.requiredFlags,
14672  createInfo.preferredFlags,
14673  createInfo.memoryTypeBits,
14674  createInfo.pool,
14675  allocation,
14676  userDataStr.GetString());
14677  Flush();
14678 }
14679 
14680 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14681  const VkMemoryRequirements& vkMemReq,
14682  bool requiresDedicatedAllocation,
14683  bool prefersDedicatedAllocation,
14684  const VmaAllocationCreateInfo& createInfo,
14685  VmaAllocation allocation)
14686 {
14687  CallParams callParams;
14688  GetBasicParams(callParams);
14689 
14690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14691  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14692  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14693  vkMemReq.size,
14694  vkMemReq.alignment,
14695  vkMemReq.memoryTypeBits,
14696  requiresDedicatedAllocation ? 1 : 0,
14697  prefersDedicatedAllocation ? 1 : 0,
14698  createInfo.flags,
14699  createInfo.usage,
14700  createInfo.requiredFlags,
14701  createInfo.preferredFlags,
14702  createInfo.memoryTypeBits,
14703  createInfo.pool,
14704  allocation,
14705  userDataStr.GetString());
14706  Flush();
14707 }
14708 
14709 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14710  VmaAllocation allocation)
14711 {
14712  CallParams callParams;
14713  GetBasicParams(callParams);
14714 
14715  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14716  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14717  allocation);
14718  Flush();
14719 }
14720 
14721 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14722  uint64_t allocationCount,
14723  const VmaAllocation* pAllocations)
14724 {
14725  CallParams callParams;
14726  GetBasicParams(callParams);
14727 
14728  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14729  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14730  PrintPointerList(allocationCount, pAllocations);
14731  fprintf(m_File, "\n");
14732  Flush();
14733 }
14734 
14735 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14736  VmaAllocation allocation,
14737  const void* pUserData)
14738 {
14739  CallParams callParams;
14740  GetBasicParams(callParams);
14741 
14742  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14743  UserDataString userDataStr(
14744  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14745  pUserData);
14746  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14747  allocation,
14748  userDataStr.GetString());
14749  Flush();
14750 }
14751 
14752 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14753  VmaAllocation allocation)
14754 {
14755  CallParams callParams;
14756  GetBasicParams(callParams);
14757 
14758  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14759  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14760  allocation);
14761  Flush();
14762 }
14763 
14764 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14765  VmaAllocation allocation)
14766 {
14767  CallParams callParams;
14768  GetBasicParams(callParams);
14769 
14770  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14771  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14772  allocation);
14773  Flush();
14774 }
14775 
14776 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14777  VmaAllocation allocation)
14778 {
14779  CallParams callParams;
14780  GetBasicParams(callParams);
14781 
14782  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14783  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14784  allocation);
14785  Flush();
14786 }
14787 
14788 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14789  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14790 {
14791  CallParams callParams;
14792  GetBasicParams(callParams);
14793 
14794  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14795  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14796  allocation,
14797  offset,
14798  size);
14799  Flush();
14800 }
14801 
14802 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14803  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14804 {
14805  CallParams callParams;
14806  GetBasicParams(callParams);
14807 
14808  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14809  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14810  allocation,
14811  offset,
14812  size);
14813  Flush();
14814 }
14815 
14816 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14817  const VkBufferCreateInfo& bufCreateInfo,
14818  const VmaAllocationCreateInfo& allocCreateInfo,
14819  VmaAllocation allocation)
14820 {
14821  CallParams callParams;
14822  GetBasicParams(callParams);
14823 
14824  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14825  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14826  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14827  bufCreateInfo.flags,
14828  bufCreateInfo.size,
14829  bufCreateInfo.usage,
14830  bufCreateInfo.sharingMode,
14831  allocCreateInfo.flags,
14832  allocCreateInfo.usage,
14833  allocCreateInfo.requiredFlags,
14834  allocCreateInfo.preferredFlags,
14835  allocCreateInfo.memoryTypeBits,
14836  allocCreateInfo.pool,
14837  allocation,
14838  userDataStr.GetString());
14839  Flush();
14840 }
14841 
14842 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14843  const VkImageCreateInfo& imageCreateInfo,
14844  const VmaAllocationCreateInfo& allocCreateInfo,
14845  VmaAllocation allocation)
14846 {
14847  CallParams callParams;
14848  GetBasicParams(callParams);
14849 
14850  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14851  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14852  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14853  imageCreateInfo.flags,
14854  imageCreateInfo.imageType,
14855  imageCreateInfo.format,
14856  imageCreateInfo.extent.width,
14857  imageCreateInfo.extent.height,
14858  imageCreateInfo.extent.depth,
14859  imageCreateInfo.mipLevels,
14860  imageCreateInfo.arrayLayers,
14861  imageCreateInfo.samples,
14862  imageCreateInfo.tiling,
14863  imageCreateInfo.usage,
14864  imageCreateInfo.sharingMode,
14865  imageCreateInfo.initialLayout,
14866  allocCreateInfo.flags,
14867  allocCreateInfo.usage,
14868  allocCreateInfo.requiredFlags,
14869  allocCreateInfo.preferredFlags,
14870  allocCreateInfo.memoryTypeBits,
14871  allocCreateInfo.pool,
14872  allocation,
14873  userDataStr.GetString());
14874  Flush();
14875 }
14876 
14877 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14878  VmaAllocation allocation)
14879 {
14880  CallParams callParams;
14881  GetBasicParams(callParams);
14882 
14883  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14884  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14885  allocation);
14886  Flush();
14887 }
14888 
14889 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14890  VmaAllocation allocation)
14891 {
14892  CallParams callParams;
14893  GetBasicParams(callParams);
14894 
14895  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14896  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14897  allocation);
14898  Flush();
14899 }
14900 
14901 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14902  VmaAllocation allocation)
14903 {
14904  CallParams callParams;
14905  GetBasicParams(callParams);
14906 
14907  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14908  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14909  allocation);
14910  Flush();
14911 }
14912 
14913 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14914  VmaAllocation allocation)
14915 {
14916  CallParams callParams;
14917  GetBasicParams(callParams);
14918 
14919  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14920  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14921  allocation);
14922  Flush();
14923 }
14924 
14925 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14926  VmaPool pool)
14927 {
14928  CallParams callParams;
14929  GetBasicParams(callParams);
14930 
14931  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14932  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14933  pool);
14934  Flush();
14935 }
14936 
14937 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14938  const VmaDefragmentationInfo2& info,
14940 {
14941  CallParams callParams;
14942  GetBasicParams(callParams);
14943 
14944  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14945  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14946  info.flags);
14947  PrintPointerList(info.allocationCount, info.pAllocations);
14948  fprintf(m_File, ",");
14949  PrintPointerList(info.poolCount, info.pPools);
14950  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14951  info.maxCpuBytesToMove,
14953  info.maxGpuBytesToMove,
14955  info.commandBuffer,
14956  ctx);
14957  Flush();
14958 }
14959 
14960 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14962 {
14963  CallParams callParams;
14964  GetBasicParams(callParams);
14965 
14966  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14967  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14968  ctx);
14969  Flush();
14970 }
14971 
14972 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14973  VmaPool pool,
14974  const char* name)
14975 {
14976  CallParams callParams;
14977  GetBasicParams(callParams);
14978 
14979  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14980  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14981  pool, name != VMA_NULL ? name : "");
14982  Flush();
14983 }
14984 
14985 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14986 {
14987  if(pUserData != VMA_NULL)
14988  {
14989  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14990  {
14991  m_Str = (const char*)pUserData;
14992  }
14993  else
14994  {
14995  sprintf_s(m_PtrStr, "%p", pUserData);
14996  m_Str = m_PtrStr;
14997  }
14998  }
14999  else
15000  {
15001  m_Str = "";
15002  }
15003 }
15004 
15005 void VmaRecorder::WriteConfiguration(
15006  const VkPhysicalDeviceProperties& devProps,
15007  const VkPhysicalDeviceMemoryProperties& memProps,
15008  uint32_t vulkanApiVersion,
15009  bool dedicatedAllocationExtensionEnabled,
15010  bool bindMemory2ExtensionEnabled,
15011  bool memoryBudgetExtensionEnabled,
15012  bool deviceCoherentMemoryExtensionEnabled)
15013 {
15014  fprintf(m_File, "Config,Begin\n");
15015 
15016  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15017 
15018  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15019  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15020  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15021  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15022  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15023  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15024 
15025  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15026  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15027  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15028 
15029  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15030  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15031  {
15032  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15033  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15034  }
15035  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15036  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15037  {
15038  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15039  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15040  }
15041 
15042  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15043  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15044  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15045  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15046 
15047  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15048  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15049  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15050  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15051  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15052  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15053  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15054  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15055  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15056 
15057  fprintf(m_File, "Config,End\n");
15058 }
15059 
15060 void VmaRecorder::GetBasicParams(CallParams& outParams)
15061 {
15062  outParams.threadId = GetCurrentThreadId();
15063 
15064  LARGE_INTEGER counter;
15065  QueryPerformanceCounter(&counter);
15066  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
15067 }
15068 
15069 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15070 {
15071  if(count)
15072  {
15073  fprintf(m_File, "%p", pItems[0]);
15074  for(uint64_t i = 1; i < count; ++i)
15075  {
15076  fprintf(m_File, " %p", pItems[i]);
15077  }
15078  }
15079 }
15080 
15081 void VmaRecorder::Flush()
15082 {
15083  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15084  {
15085  fflush(m_File);
15086  }
15087 }
15088 
15089 #endif // #if VMA_RECORDING_ENABLED
15090 
15092 // VmaAllocationObjectAllocator
15093 
15094 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15095  m_Allocator(pAllocationCallbacks, 1024)
15096 {
15097 }
15098 
15099 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15100 {
15101  VmaMutexLock mutexLock(m_Mutex);
15102  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15103 }
15104 
15105 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15106 {
15107  VmaMutexLock mutexLock(m_Mutex);
15108  m_Allocator.Free(hAlloc);
15109 }
15110 
15112 // VmaAllocator_T
15113 
15114 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15115  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15116  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15117  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15118  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15119  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15120  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15121  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15122  m_hDevice(pCreateInfo->device),
15123  m_hInstance(pCreateInfo->instance),
15124  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15125  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15126  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15127  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15128  m_HeapSizeLimitMask(0),
15129  m_PreferredLargeHeapBlockSize(0),
15130  m_PhysicalDevice(pCreateInfo->physicalDevice),
15131  m_CurrentFrameIndex(0),
15132  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15133  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15134  m_NextPoolId(0),
15135  m_GlobalMemoryTypeBits(UINT32_MAX)
15137  ,m_pRecorder(VMA_NULL)
15138 #endif
15139 {
15140  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15141  {
15142  m_UseKhrDedicatedAllocation = false;
15143  m_UseKhrBindMemory2 = false;
15144  }
15145 
15146  if(VMA_DEBUG_DETECT_CORRUPTION)
15147  {
15148  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15149  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15150  }
15151 
15152  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15153 
15154  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15155  {
15156 #if !(VMA_DEDICATED_ALLOCATION)
15158  {
15159  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15160  }
15161 #endif
15162 #if !(VMA_BIND_MEMORY2)
15163  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15164  {
15165  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15166  }
15167 #endif
15168  }
15169 #if !(VMA_MEMORY_BUDGET)
15170  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15171  {
15172  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15173  }
15174 #endif
15175 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15176  if(m_UseKhrBufferDeviceAddress)
15177  {
15178  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15179  }
15180 #endif
15181 #if VMA_VULKAN_VERSION < 1002000
15182  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15183  {
15184  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15185  }
15186 #endif
15187 #if VMA_VULKAN_VERSION < 1001000
15188  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15189  {
15190  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15191  }
15192 #endif
15193 
15194  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15195  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15196  memset(&m_MemProps, 0, sizeof(m_MemProps));
15197 
15198  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15199  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15200  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15201 
15202  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15203  {
15204  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15205  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15206  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15207  }
15208 
15209  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15210 
15211  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15212  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15213 
15214  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15215  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15216  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15217  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15218 
15219  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15220  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15221 
15222  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15223 
15224  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15225  {
15226  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15227  {
15228  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15229  if(limit != VK_WHOLE_SIZE)
15230  {
15231  m_HeapSizeLimitMask |= 1u << heapIndex;
15232  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15233  {
15234  m_MemProps.memoryHeaps[heapIndex].size = limit;
15235  }
15236  }
15237  }
15238  }
15239 
15240  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15241  {
15242  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15243 
15244  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15245  this,
15246  VK_NULL_HANDLE, // hParentPool
15247  memTypeIndex,
15248  preferredBlockSize,
15249  0,
15250  SIZE_MAX,
15251  GetBufferImageGranularity(),
15252  pCreateInfo->frameInUseCount,
15253  false, // explicitBlockSize
15254  false); // linearAlgorithm
15255  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15256  // becase minBlockCount is 0.
15257  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15258 
15259  }
15260 }
15261 
15262 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15263 {
15264  VkResult res = VK_SUCCESS;
15265 
15266  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15267  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15268  {
15269 #if VMA_RECORDING_ENABLED
15270  m_pRecorder = vma_new(this, VmaRecorder)();
15271  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15272  if(res != VK_SUCCESS)
15273  {
15274  return res;
15275  }
15276  m_pRecorder->WriteConfiguration(
15277  m_PhysicalDeviceProperties,
15278  m_MemProps,
15279  m_VulkanApiVersion,
15280  m_UseKhrDedicatedAllocation,
15281  m_UseKhrBindMemory2,
15282  m_UseExtMemoryBudget,
15283  m_UseAmdDeviceCoherentMemory);
15284  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15285 #else
15286  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15287  return VK_ERROR_FEATURE_NOT_PRESENT;
15288 #endif
15289  }
15290 
15291 #if VMA_MEMORY_BUDGET
15292  if(m_UseExtMemoryBudget)
15293  {
15294  UpdateVulkanBudget();
15295  }
15296 #endif // #if VMA_MEMORY_BUDGET
15297 
15298  return res;
15299 }
15300 
15301 VmaAllocator_T::~VmaAllocator_T()
15302 {
15303 #if VMA_RECORDING_ENABLED
15304  if(m_pRecorder != VMA_NULL)
15305  {
15306  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15307  vma_delete(this, m_pRecorder);
15308  }
15309 #endif
15310 
15311  VMA_ASSERT(m_Pools.empty());
15312 
15313  for(size_t i = GetMemoryTypeCount(); i--; )
15314  {
15315  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15316  {
15317  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15318  }
15319 
15320  vma_delete(this, m_pDedicatedAllocations[i]);
15321  vma_delete(this, m_pBlockVectors[i]);
15322  }
15323 }
15324 
15325 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15326 {
15327 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15328  ImportVulkanFunctions_Static();
15329 #endif
15330 
15331  if(pVulkanFunctions != VMA_NULL)
15332  {
15333  ImportVulkanFunctions_Custom(pVulkanFunctions);
15334  }
15335 
15336 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15337  ImportVulkanFunctions_Dynamic();
15338 #endif
15339 
15340  ValidateVulkanFunctions();
15341 }
15342 
15343 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15344 
15345 void VmaAllocator_T::ImportVulkanFunctions_Static()
15346 {
15347  // Vulkan 1.0
15348  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15349  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15350  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15351  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15352  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15353  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15354  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15355  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15356  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15357  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15358  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15359  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15360  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15361  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15362  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15363  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15364  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15365 
15366  // Vulkan 1.1
15367 #if VMA_VULKAN_VERSION >= 1001000
15368  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15369  {
15370  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15371  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15372  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15373  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15374  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15375  }
15376 #endif
15377 }
15378 
15379 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15380 
15381 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15382 {
15383  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15384 
15385 #define VMA_COPY_IF_NOT_NULL(funcName) \
15386  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15387 
15388  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15389  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15390  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15391  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15392  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15393  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15394  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15395  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15396  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15397  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15398  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15399  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15400  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15401  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15402  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15403  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15404  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15405 
15406 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15407  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15408  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15409 #endif
15410 
15411 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15412  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15413  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15414 #endif
15415 
15416 #if VMA_MEMORY_BUDGET
15417  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15418 #endif
15419 
15420 #undef VMA_COPY_IF_NOT_NULL
15421 }
15422 
15423 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
15424 {
15425 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
15426  if(m_VulkanFunctions.memberName == VMA_NULL) \
15427  m_VulkanFunctions.memberName = \
15428  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
15429 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
15430  if(m_VulkanFunctions.memberName == VMA_NULL) \
15431  m_VulkanFunctions.memberName = \
15432  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
15433 
15434  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
15435  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
15436  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
15437  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
15438  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
15439  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
15440  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
15441  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
15442  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
15443  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
15444  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
15445  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
15446  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
15447  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
15448  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
15449  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
15450  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
15451 
15452 #if VMA_DEDICATED_ALLOCATION
15453  if(m_UseKhrDedicatedAllocation)
15454  {
15455  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
15456  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
15457  }
15458 #endif
15459 
15460 #if VMA_BIND_MEMORY2
15461  if(m_UseKhrBindMemory2)
15462  {
15463  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
15464  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
15465  }
15466 #endif // #if VMA_BIND_MEMORY2
15467 
15468 #if VMA_MEMORY_BUDGET
15469  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15470  {
15471  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
15472  }
15473 #endif // #if VMA_MEMORY_BUDGET
15474 
15475 #undef VMA_FETCH_DEVICE_FUNC
15476 #undef VMA_FETCH_INSTANCE_FUNC
15477 }
15478 
15479 void VmaAllocator_T::ValidateVulkanFunctions()
15480 {
15481  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15482  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15483  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15484  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15485  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15486  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15487  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15488  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15489  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15490  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15491  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15492  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15493  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15494  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15495  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15496  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15497  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15498 
15499 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15500  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15501  {
15502  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15503  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15504  }
15505 #endif
15506 
15507 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15508  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15509  {
15510  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15511  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15512  }
15513 #endif
15514 
15515 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15516  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15517  {
15518  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15519  }
15520 #endif
15521 }
15522 
15523 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15524 {
15525  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15526  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15527  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15528  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15529 }
15530 
15531 VkResult VmaAllocator_T::AllocateMemoryOfType(
15532  VkDeviceSize size,
15533  VkDeviceSize alignment,
15534  bool dedicatedAllocation,
15535  VkBuffer dedicatedBuffer,
15536  VkBufferUsageFlags dedicatedBufferUsage,
15537  VkImage dedicatedImage,
15538  const VmaAllocationCreateInfo& createInfo,
15539  uint32_t memTypeIndex,
15540  VmaSuballocationType suballocType,
15541  size_t allocationCount,
15542  VmaAllocation* pAllocations)
15543 {
15544  VMA_ASSERT(pAllocations != VMA_NULL);
15545  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15546 
15547  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15548 
15549  // If memory type is not HOST_VISIBLE, disable MAPPED.
15550  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15551  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15552  {
15553  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15554  }
15555  // If memory is lazily allocated, it should be always dedicated.
15556  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15557  {
15559  }
15560 
15561  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15562  VMA_ASSERT(blockVector);
15563 
15564  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15565  bool preferDedicatedMemory =
15566  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15567  dedicatedAllocation ||
15568  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15569  size > preferredBlockSize / 2;
15570 
15571  if(preferDedicatedMemory &&
15572  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15573  finalCreateInfo.pool == VK_NULL_HANDLE)
15574  {
15576  }
15577 
15578  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15579  {
15580  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15581  {
15582  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15583  }
15584  else
15585  {
15586  return AllocateDedicatedMemory(
15587  size,
15588  suballocType,
15589  memTypeIndex,
15590  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15591  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15592  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15593  finalCreateInfo.pUserData,
15594  dedicatedBuffer,
15595  dedicatedBufferUsage,
15596  dedicatedImage,
15597  allocationCount,
15598  pAllocations);
15599  }
15600  }
15601  else
15602  {
15603  VkResult res = blockVector->Allocate(
15604  m_CurrentFrameIndex.load(),
15605  size,
15606  alignment,
15607  finalCreateInfo,
15608  suballocType,
15609  allocationCount,
15610  pAllocations);
15611  if(res == VK_SUCCESS)
15612  {
15613  return res;
15614  }
15615 
15616  // 5. Try dedicated memory.
15617  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15618  {
15619  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15620  }
15621  else
15622  {
15623  res = AllocateDedicatedMemory(
15624  size,
15625  suballocType,
15626  memTypeIndex,
15627  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15628  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15629  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15630  finalCreateInfo.pUserData,
15631  dedicatedBuffer,
15632  dedicatedBufferUsage,
15633  dedicatedImage,
15634  allocationCount,
15635  pAllocations);
15636  if(res == VK_SUCCESS)
15637  {
15638  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
15639  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
15640  return VK_SUCCESS;
15641  }
15642  else
15643  {
15644  // Everything failed: Return error code.
15645  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15646  return res;
15647  }
15648  }
15649  }
15650 }
15651 
15652 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15653  VkDeviceSize size,
15654  VmaSuballocationType suballocType,
15655  uint32_t memTypeIndex,
15656  bool withinBudget,
15657  bool map,
15658  bool isUserDataString,
15659  void* pUserData,
15660  VkBuffer dedicatedBuffer,
15661  VkBufferUsageFlags dedicatedBufferUsage,
15662  VkImage dedicatedImage,
15663  size_t allocationCount,
15664  VmaAllocation* pAllocations)
15665 {
15666  VMA_ASSERT(allocationCount > 0 && pAllocations);
15667 
15668  if(withinBudget)
15669  {
15670  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15671  VmaBudget heapBudget = {};
15672  GetBudget(&heapBudget, heapIndex, 1);
15673  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15674  {
15675  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15676  }
15677  }
15678 
15679  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15680  allocInfo.memoryTypeIndex = memTypeIndex;
15681  allocInfo.allocationSize = size;
15682 
15683 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15684  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15685  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15686  {
15687  if(dedicatedBuffer != VK_NULL_HANDLE)
15688  {
15689  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15690  dedicatedAllocInfo.buffer = dedicatedBuffer;
15691  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15692  }
15693  else if(dedicatedImage != VK_NULL_HANDLE)
15694  {
15695  dedicatedAllocInfo.image = dedicatedImage;
15696  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15697  }
15698  }
15699 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15700 
15701 #if VMA_BUFFER_DEVICE_ADDRESS
15702  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
15703  if(m_UseKhrBufferDeviceAddress)
15704  {
15705  bool canContainBufferWithDeviceAddress = true;
15706  if(dedicatedBuffer != VK_NULL_HANDLE)
15707  {
15708  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
15709  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
15710  }
15711  else if(dedicatedImage != VK_NULL_HANDLE)
15712  {
15713  canContainBufferWithDeviceAddress = false;
15714  }
15715  if(canContainBufferWithDeviceAddress)
15716  {
15717  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
15718  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
15719  }
15720  }
15721 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
15722 
15723  size_t allocIndex;
15724  VkResult res = VK_SUCCESS;
15725  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15726  {
15727  res = AllocateDedicatedMemoryPage(
15728  size,
15729  suballocType,
15730  memTypeIndex,
15731  allocInfo,
15732  map,
15733  isUserDataString,
15734  pUserData,
15735  pAllocations + allocIndex);
15736  if(res != VK_SUCCESS)
15737  {
15738  break;
15739  }
15740  }
15741 
15742  if(res == VK_SUCCESS)
15743  {
15744  // Register them in m_pDedicatedAllocations.
15745  {
15746  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15747  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15748  VMA_ASSERT(pDedicatedAllocations);
15749  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15750  {
15751  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15752  }
15753  }
15754 
15755  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15756  }
15757  else
15758  {
15759  // Free all already created allocations.
15760  while(allocIndex--)
15761  {
15762  VmaAllocation currAlloc = pAllocations[allocIndex];
15763  VkDeviceMemory hMemory = currAlloc->GetMemory();
15764 
15765  /*
15766  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15767  before vkFreeMemory.
15768 
15769  if(currAlloc->GetMappedData() != VMA_NULL)
15770  {
15771  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15772  }
15773  */
15774 
15775  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15776  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15777  currAlloc->SetUserData(this, VMA_NULL);
15778  m_AllocationObjectAllocator.Free(currAlloc);
15779  }
15780 
15781  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15782  }
15783 
15784  return res;
15785 }
15786 
15787 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15788  VkDeviceSize size,
15789  VmaSuballocationType suballocType,
15790  uint32_t memTypeIndex,
15791  const VkMemoryAllocateInfo& allocInfo,
15792  bool map,
15793  bool isUserDataString,
15794  void* pUserData,
15795  VmaAllocation* pAllocation)
15796 {
15797  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15798  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15799  if(res < 0)
15800  {
15801  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15802  return res;
15803  }
15804 
15805  void* pMappedData = VMA_NULL;
15806  if(map)
15807  {
15808  res = (*m_VulkanFunctions.vkMapMemory)(
15809  m_hDevice,
15810  hMemory,
15811  0,
15812  VK_WHOLE_SIZE,
15813  0,
15814  &pMappedData);
15815  if(res < 0)
15816  {
15817  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15818  FreeVulkanMemory(memTypeIndex, size, hMemory);
15819  return res;
15820  }
15821  }
15822 
15823  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
15824  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15825  (*pAllocation)->SetUserData(this, pUserData);
15826  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15827  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15828  {
15829  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15830  }
15831 
15832  return VK_SUCCESS;
15833 }
15834 
15835 void VmaAllocator_T::GetBufferMemoryRequirements(
15836  VkBuffer hBuffer,
15837  VkMemoryRequirements& memReq,
15838  bool& requiresDedicatedAllocation,
15839  bool& prefersDedicatedAllocation) const
15840 {
15841 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15842  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15843  {
15844  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15845  memReqInfo.buffer = hBuffer;
15846 
15847  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15848 
15849  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15850  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15851 
15852  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15853 
15854  memReq = memReq2.memoryRequirements;
15855  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15856  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15857  }
15858  else
15859 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15860  {
15861  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15862  requiresDedicatedAllocation = false;
15863  prefersDedicatedAllocation = false;
15864  }
15865 }
15866 
15867 void VmaAllocator_T::GetImageMemoryRequirements(
15868  VkImage hImage,
15869  VkMemoryRequirements& memReq,
15870  bool& requiresDedicatedAllocation,
15871  bool& prefersDedicatedAllocation) const
15872 {
15873 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15874  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15875  {
15876  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15877  memReqInfo.image = hImage;
15878 
15879  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15880 
15881  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15882  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15883 
15884  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15885 
15886  memReq = memReq2.memoryRequirements;
15887  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15888  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15889  }
15890  else
15891 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15892  {
15893  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15894  requiresDedicatedAllocation = false;
15895  prefersDedicatedAllocation = false;
15896  }
15897 }
15898 
15899 VkResult VmaAllocator_T::AllocateMemory(
15900  const VkMemoryRequirements& vkMemReq,
15901  bool requiresDedicatedAllocation,
15902  bool prefersDedicatedAllocation,
15903  VkBuffer dedicatedBuffer,
15904  VkBufferUsageFlags dedicatedBufferUsage,
15905  VkImage dedicatedImage,
15906  const VmaAllocationCreateInfo& createInfo,
15907  VmaSuballocationType suballocType,
15908  size_t allocationCount,
15909  VmaAllocation* pAllocations)
15910 {
15911  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15912 
15913  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15914 
15915  if(vkMemReq.size == 0)
15916  {
15917  return VK_ERROR_VALIDATION_FAILED_EXT;
15918  }
15919  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15920  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15921  {
15922  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15923  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15924  }
15925  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15927  {
15928  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15929  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15930  }
15931  if(requiresDedicatedAllocation)
15932  {
15933  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15934  {
15935  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15936  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15937  }
15938  if(createInfo.pool != VK_NULL_HANDLE)
15939  {
15940  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15941  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15942  }
15943  }
15944  if((createInfo.pool != VK_NULL_HANDLE) &&
15945  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15946  {
15947  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15948  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15949  }
15950 
15951  if(createInfo.pool != VK_NULL_HANDLE)
15952  {
15953  const VkDeviceSize alignmentForPool = VMA_MAX(
15954  vkMemReq.alignment,
15955  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15956 
15957  VmaAllocationCreateInfo createInfoForPool = createInfo;
15958  // If memory type is not HOST_VISIBLE, disable MAPPED.
15959  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15960  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15961  {
15962  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15963  }
15964 
15965  return createInfo.pool->m_BlockVector.Allocate(
15966  m_CurrentFrameIndex.load(),
15967  vkMemReq.size,
15968  alignmentForPool,
15969  createInfoForPool,
15970  suballocType,
15971  allocationCount,
15972  pAllocations);
15973  }
15974  else
15975  {
15976  // Bit mask of memory Vulkan types acceptable for this allocation.
15977  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15978  uint32_t memTypeIndex = UINT32_MAX;
15979  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15980  if(res == VK_SUCCESS)
15981  {
15982  VkDeviceSize alignmentForMemType = VMA_MAX(
15983  vkMemReq.alignment,
15984  GetMemoryTypeMinAlignment(memTypeIndex));
15985 
15986  res = AllocateMemoryOfType(
15987  vkMemReq.size,
15988  alignmentForMemType,
15989  requiresDedicatedAllocation || prefersDedicatedAllocation,
15990  dedicatedBuffer,
15991  dedicatedBufferUsage,
15992  dedicatedImage,
15993  createInfo,
15994  memTypeIndex,
15995  suballocType,
15996  allocationCount,
15997  pAllocations);
15998  // Succeeded on first try.
15999  if(res == VK_SUCCESS)
16000  {
16001  return res;
16002  }
16003  // Allocation from this memory type failed. Try other compatible memory types.
16004  else
16005  {
16006  for(;;)
16007  {
16008  // Remove old memTypeIndex from list of possibilities.
16009  memoryTypeBits &= ~(1u << memTypeIndex);
16010  // Find alternative memTypeIndex.
16011  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16012  if(res == VK_SUCCESS)
16013  {
16014  alignmentForMemType = VMA_MAX(
16015  vkMemReq.alignment,
16016  GetMemoryTypeMinAlignment(memTypeIndex));
16017 
16018  res = AllocateMemoryOfType(
16019  vkMemReq.size,
16020  alignmentForMemType,
16021  requiresDedicatedAllocation || prefersDedicatedAllocation,
16022  dedicatedBuffer,
16023  dedicatedBufferUsage,
16024  dedicatedImage,
16025  createInfo,
16026  memTypeIndex,
16027  suballocType,
16028  allocationCount,
16029  pAllocations);
16030  // Allocation from this alternative memory type succeeded.
16031  if(res == VK_SUCCESS)
16032  {
16033  return res;
16034  }
16035  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16036  }
16037  // No other matching memory type index could be found.
16038  else
16039  {
16040  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16041  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16042  }
16043  }
16044  }
16045  }
16046  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16047  else
16048  return res;
16049  }
16050 }
16051 
16052 void VmaAllocator_T::FreeMemory(
16053  size_t allocationCount,
16054  const VmaAllocation* pAllocations)
16055 {
16056  VMA_ASSERT(pAllocations);
16057 
16058  for(size_t allocIndex = allocationCount; allocIndex--; )
16059  {
16060  VmaAllocation allocation = pAllocations[allocIndex];
16061 
16062  if(allocation != VK_NULL_HANDLE)
16063  {
16064  if(TouchAllocation(allocation))
16065  {
16066  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16067  {
16068  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16069  }
16070 
16071  switch(allocation->GetType())
16072  {
16073  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16074  {
16075  VmaBlockVector* pBlockVector = VMA_NULL;
16076  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16077  if(hPool != VK_NULL_HANDLE)
16078  {
16079  pBlockVector = &hPool->m_BlockVector;
16080  }
16081  else
16082  {
16083  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16084  pBlockVector = m_pBlockVectors[memTypeIndex];
16085  }
16086  pBlockVector->Free(allocation);
16087  }
16088  break;
16089  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16090  FreeDedicatedMemory(allocation);
16091  break;
16092  default:
16093  VMA_ASSERT(0);
16094  }
16095  }
16096 
16097  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16098  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16099  allocation->SetUserData(this, VMA_NULL);
16100  m_AllocationObjectAllocator.Free(allocation);
16101  }
16102  }
16103 }
16104 
16105 VkResult VmaAllocator_T::ResizeAllocation(
16106  const VmaAllocation alloc,
16107  VkDeviceSize newSize)
16108 {
16109  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16110  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16111  {
16112  return VK_ERROR_VALIDATION_FAILED_EXT;
16113  }
16114  if(newSize == alloc->GetSize())
16115  {
16116  return VK_SUCCESS;
16117  }
16118  return VK_ERROR_OUT_OF_POOL_MEMORY;
16119 }
16120 
16121 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16122 {
16123  // Initialize.
16124  InitStatInfo(pStats->total);
16125  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16126  InitStatInfo(pStats->memoryType[i]);
16127  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16128  InitStatInfo(pStats->memoryHeap[i]);
16129 
16130  // Process default pools.
16131  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16132  {
16133  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16134  VMA_ASSERT(pBlockVector);
16135  pBlockVector->AddStats(pStats);
16136  }
16137 
16138  // Process custom pools.
16139  {
16140  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16141  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16142  {
16143  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16144  }
16145  }
16146 
16147  // Process dedicated allocations.
16148  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16149  {
16150  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16151  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16152  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16153  VMA_ASSERT(pDedicatedAllocVector);
16154  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16155  {
16156  VmaStatInfo allocationStatInfo;
16157  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16158  VmaAddStatInfo(pStats->total, allocationStatInfo);
16159  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16160  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16161  }
16162  }
16163 
16164  // Postprocess.
16165  VmaPostprocessCalcStatInfo(pStats->total);
16166  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16167  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16168  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16169  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16170 }
16171 
16172 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16173 {
16174 #if VMA_MEMORY_BUDGET
16175  if(m_UseExtMemoryBudget)
16176  {
16177  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16178  {
16179  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16180  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16181  {
16182  const uint32_t heapIndex = firstHeap + i;
16183 
16184  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16185  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16186 
16187  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16188  {
16189  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16190  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16191  }
16192  else
16193  {
16194  outBudget->usage = 0;
16195  }
16196 
16197  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16198  outBudget->budget = VMA_MIN(
16199  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16200  }
16201  }
16202  else
16203  {
16204  UpdateVulkanBudget(); // Outside of mutex lock
16205  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16206  }
16207  }
16208  else
16209 #endif
16210  {
16211  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16212  {
16213  const uint32_t heapIndex = firstHeap + i;
16214 
16215  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16216  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16217 
16218  outBudget->usage = outBudget->blockBytes;
16219  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16220  }
16221  }
16222 }
16223 
16224 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16225 
16226 VkResult VmaAllocator_T::DefragmentationBegin(
16227  const VmaDefragmentationInfo2& info,
16228  VmaDefragmentationStats* pStats,
16229  VmaDefragmentationContext* pContext)
16230 {
16231  if(info.pAllocationsChanged != VMA_NULL)
16232  {
16233  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16234  }
16235 
16236  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16237  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16238 
16239  (*pContext)->AddPools(info.poolCount, info.pPools);
16240  (*pContext)->AddAllocations(
16242 
16243  VkResult res = (*pContext)->Defragment(
16246  info.commandBuffer, pStats, info.flags);
16247 
16248  if(res != VK_NOT_READY)
16249  {
16250  vma_delete(this, *pContext);
16251  *pContext = VMA_NULL;
16252  }
16253 
16254  return res;
16255 }
16256 
16257 VkResult VmaAllocator_T::DefragmentationEnd(
16258  VmaDefragmentationContext context)
16259 {
16260  vma_delete(this, context);
16261  return VK_SUCCESS;
16262 }
16263 
16264 VkResult VmaAllocator_T::DefragmentationPassBegin(
16266  VmaDefragmentationContext context)
16267 {
16268  return context->DefragmentPassBegin(pInfo);
16269 }
16270 VkResult VmaAllocator_T::DefragmentationPassEnd(
16271  VmaDefragmentationContext context)
16272 {
16273  return context->DefragmentPassEnd();
16274 
16275 }
16276 
16277 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16278 {
16279  if(hAllocation->CanBecomeLost())
16280  {
16281  /*
16282  Warning: This is a carefully designed algorithm.
16283  Do not modify unless you really know what you're doing :)
16284  */
16285  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16286  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16287  for(;;)
16288  {
16289  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16290  {
16291  pAllocationInfo->memoryType = UINT32_MAX;
16292  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16293  pAllocationInfo->offset = 0;
16294  pAllocationInfo->size = hAllocation->GetSize();
16295  pAllocationInfo->pMappedData = VMA_NULL;
16296  pAllocationInfo->pUserData = hAllocation->GetUserData();
16297  return;
16298  }
16299  else if(localLastUseFrameIndex == localCurrFrameIndex)
16300  {
16301  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16302  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16303  pAllocationInfo->offset = hAllocation->GetOffset();
16304  pAllocationInfo->size = hAllocation->GetSize();
16305  pAllocationInfo->pMappedData = VMA_NULL;
16306  pAllocationInfo->pUserData = hAllocation->GetUserData();
16307  return;
16308  }
16309  else // Last use time earlier than current time.
16310  {
16311  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16312  {
16313  localLastUseFrameIndex = localCurrFrameIndex;
16314  }
16315  }
16316  }
16317  }
16318  else
16319  {
16320 #if VMA_STATS_STRING_ENABLED
16321  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16322  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16323  for(;;)
16324  {
16325  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16326  if(localLastUseFrameIndex == localCurrFrameIndex)
16327  {
16328  break;
16329  }
16330  else // Last use time earlier than current time.
16331  {
16332  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16333  {
16334  localLastUseFrameIndex = localCurrFrameIndex;
16335  }
16336  }
16337  }
16338 #endif
16339 
16340  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16341  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16342  pAllocationInfo->offset = hAllocation->GetOffset();
16343  pAllocationInfo->size = hAllocation->GetSize();
16344  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16345  pAllocationInfo->pUserData = hAllocation->GetUserData();
16346  }
16347 }
16348 
16349 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16350 {
16351  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16352  if(hAllocation->CanBecomeLost())
16353  {
16354  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16355  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16356  for(;;)
16357  {
16358  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16359  {
16360  return false;
16361  }
16362  else if(localLastUseFrameIndex == localCurrFrameIndex)
16363  {
16364  return true;
16365  }
16366  else // Last use time earlier than current time.
16367  {
16368  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16369  {
16370  localLastUseFrameIndex = localCurrFrameIndex;
16371  }
16372  }
16373  }
16374  }
16375  else
16376  {
16377 #if VMA_STATS_STRING_ENABLED
16378  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16379  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16380  for(;;)
16381  {
16382  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16383  if(localLastUseFrameIndex == localCurrFrameIndex)
16384  {
16385  break;
16386  }
16387  else // Last use time earlier than current time.
16388  {
16389  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16390  {
16391  localLastUseFrameIndex = localCurrFrameIndex;
16392  }
16393  }
16394  }
16395 #endif
16396 
16397  return true;
16398  }
16399 }
16400 
16401 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16402 {
16403  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16404 
16405  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16406 
16407  if(newCreateInfo.maxBlockCount == 0)
16408  {
16409  newCreateInfo.maxBlockCount = SIZE_MAX;
16410  }
16411  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16412  {
16413  return VK_ERROR_INITIALIZATION_FAILED;
16414  }
16415  // Memory type index out of range or forbidden.
16416  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16417  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16418  {
16419  return VK_ERROR_FEATURE_NOT_PRESENT;
16420  }
16421 
16422  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16423 
16424  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16425 
16426  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16427  if(res != VK_SUCCESS)
16428  {
16429  vma_delete(this, *pPool);
16430  *pPool = VMA_NULL;
16431  return res;
16432  }
16433 
16434  // Add to m_Pools.
16435  {
16436  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16437  (*pPool)->SetId(m_NextPoolId++);
16438  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16439  }
16440 
16441  return VK_SUCCESS;
16442 }
16443 
16444 void VmaAllocator_T::DestroyPool(VmaPool pool)
16445 {
16446  // Remove from m_Pools.
16447  {
16448  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16449  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16450  VMA_ASSERT(success && "Pool not found in Allocator.");
16451  }
16452 
16453  vma_delete(this, pool);
16454 }
16455 
16456 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16457 {
16458  pool->m_BlockVector.GetPoolStats(pPoolStats);
16459 }
16460 
16461 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16462 {
16463  m_CurrentFrameIndex.store(frameIndex);
16464 
16465 #if VMA_MEMORY_BUDGET
16466  if(m_UseExtMemoryBudget)
16467  {
16468  UpdateVulkanBudget();
16469  }
16470 #endif // #if VMA_MEMORY_BUDGET
16471 }
16472 
16473 void VmaAllocator_T::MakePoolAllocationsLost(
16474  VmaPool hPool,
16475  size_t* pLostAllocationCount)
16476 {
16477  hPool->m_BlockVector.MakePoolAllocationsLost(
16478  m_CurrentFrameIndex.load(),
16479  pLostAllocationCount);
16480 }
16481 
16482 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16483 {
16484  return hPool->m_BlockVector.CheckCorruption();
16485 }
16486 
16487 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16488 {
16489  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16490 
16491  // Process default pools.
16492  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16493  {
16494  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16495  {
16496  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16497  VMA_ASSERT(pBlockVector);
16498  VkResult localRes = pBlockVector->CheckCorruption();
16499  switch(localRes)
16500  {
16501  case VK_ERROR_FEATURE_NOT_PRESENT:
16502  break;
16503  case VK_SUCCESS:
16504  finalRes = VK_SUCCESS;
16505  break;
16506  default:
16507  return localRes;
16508  }
16509  }
16510  }
16511 
16512  // Process custom pools.
16513  {
16514  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16515  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16516  {
16517  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16518  {
16519  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16520  switch(localRes)
16521  {
16522  case VK_ERROR_FEATURE_NOT_PRESENT:
16523  break;
16524  case VK_SUCCESS:
16525  finalRes = VK_SUCCESS;
16526  break;
16527  default:
16528  return localRes;
16529  }
16530  }
16531  }
16532  }
16533 
16534  return finalRes;
16535 }
16536 
16537 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16538 {
16539  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16540  (*pAllocation)->InitLost();
16541 }
16542 
16543 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16544 {
16545  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16546 
16547  // HeapSizeLimit is in effect for this heap.
16548  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16549  {
16550  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16551  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16552  for(;;)
16553  {
16554  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16555  if(blockBytesAfterAllocation > heapSize)
16556  {
16557  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16558  }
16559  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16560  {
16561  break;
16562  }
16563  }
16564  }
16565  else
16566  {
16567  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16568  }
16569 
16570  // VULKAN CALL vkAllocateMemory.
16571  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16572 
16573  if(res == VK_SUCCESS)
16574  {
16575 #if VMA_MEMORY_BUDGET
16576  ++m_Budget.m_OperationsSinceBudgetFetch;
16577 #endif
16578 
16579  // Informative callback.
16580  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16581  {
16582  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
16583  }
16584  }
16585  else
16586  {
16587  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16588  }
16589 
16590  return res;
16591 }
16592 
16593 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
16594 {
16595  // Informative callback.
16596  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
16597  {
16598  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
16599  }
16600 
16601  // VULKAN CALL vkFreeMemory.
16602  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
16603 
16604  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
16605 }
16606 
16607 VkResult VmaAllocator_T::BindVulkanBuffer(
16608  VkDeviceMemory memory,
16609  VkDeviceSize memoryOffset,
16610  VkBuffer buffer,
16611  const void* pNext)
16612 {
16613  if(pNext != VMA_NULL)
16614  {
16615 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16616  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16617  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
16618  {
16619  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
16620  bindBufferMemoryInfo.pNext = pNext;
16621  bindBufferMemoryInfo.buffer = buffer;
16622  bindBufferMemoryInfo.memory = memory;
16623  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16624  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16625  }
16626  else
16627 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16628  {
16629  return VK_ERROR_EXTENSION_NOT_PRESENT;
16630  }
16631  }
16632  else
16633  {
16634  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
16635  }
16636 }
16637 
16638 VkResult VmaAllocator_T::BindVulkanImage(
16639  VkDeviceMemory memory,
16640  VkDeviceSize memoryOffset,
16641  VkImage image,
16642  const void* pNext)
16643 {
16644  if(pNext != VMA_NULL)
16645  {
16646 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16647  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16648  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
16649  {
16650  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
16651  bindBufferMemoryInfo.pNext = pNext;
16652  bindBufferMemoryInfo.image = image;
16653  bindBufferMemoryInfo.memory = memory;
16654  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16655  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16656  }
16657  else
16658 #endif // #if VMA_BIND_MEMORY2
16659  {
16660  return VK_ERROR_EXTENSION_NOT_PRESENT;
16661  }
16662  }
16663  else
16664  {
16665  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
16666  }
16667 }
16668 
16669 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
16670 {
16671  if(hAllocation->CanBecomeLost())
16672  {
16673  return VK_ERROR_MEMORY_MAP_FAILED;
16674  }
16675 
16676  switch(hAllocation->GetType())
16677  {
16678  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16679  {
16680  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16681  char *pBytes = VMA_NULL;
16682  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
16683  if(res == VK_SUCCESS)
16684  {
16685  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16686  hAllocation->BlockAllocMap();
16687  }
16688  return res;
16689  }
16690  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16691  return hAllocation->DedicatedAllocMap(this, ppData);
16692  default:
16693  VMA_ASSERT(0);
16694  return VK_ERROR_MEMORY_MAP_FAILED;
16695  }
16696 }
16697 
16698 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16699 {
16700  switch(hAllocation->GetType())
16701  {
16702  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16703  {
16704  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16705  hAllocation->BlockAllocUnmap();
16706  pBlock->Unmap(this, 1);
16707  }
16708  break;
16709  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16710  hAllocation->DedicatedAllocUnmap(this);
16711  break;
16712  default:
16713  VMA_ASSERT(0);
16714  }
16715 }
16716 
16717 VkResult VmaAllocator_T::BindBufferMemory(
16718  VmaAllocation hAllocation,
16719  VkDeviceSize allocationLocalOffset,
16720  VkBuffer hBuffer,
16721  const void* pNext)
16722 {
16723  VkResult res = VK_SUCCESS;
16724  switch(hAllocation->GetType())
16725  {
16726  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16727  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16728  break;
16729  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16730  {
16731  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16732  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16733  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16734  break;
16735  }
16736  default:
16737  VMA_ASSERT(0);
16738  }
16739  return res;
16740 }
16741 
16742 VkResult VmaAllocator_T::BindImageMemory(
16743  VmaAllocation hAllocation,
16744  VkDeviceSize allocationLocalOffset,
16745  VkImage hImage,
16746  const void* pNext)
16747 {
16748  VkResult res = VK_SUCCESS;
16749  switch(hAllocation->GetType())
16750  {
16751  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16752  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16753  break;
16754  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16755  {
16756  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16757  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16758  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16759  break;
16760  }
16761  default:
16762  VMA_ASSERT(0);
16763  }
16764  return res;
16765 }
16766 
16767 void VmaAllocator_T::FlushOrInvalidateAllocation(
16768  VmaAllocation hAllocation,
16769  VkDeviceSize offset, VkDeviceSize size,
16770  VMA_CACHE_OPERATION op)
16771 {
16772  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16773  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16774  {
16775  const VkDeviceSize allocationSize = hAllocation->GetSize();
16776  VMA_ASSERT(offset <= allocationSize);
16777 
16778  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16779 
16780  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16781  memRange.memory = hAllocation->GetMemory();
16782 
16783  switch(hAllocation->GetType())
16784  {
16785  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16786  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16787  if(size == VK_WHOLE_SIZE)
16788  {
16789  memRange.size = allocationSize - memRange.offset;
16790  }
16791  else
16792  {
16793  VMA_ASSERT(offset + size <= allocationSize);
16794  memRange.size = VMA_MIN(
16795  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16796  allocationSize - memRange.offset);
16797  }
16798  break;
16799 
16800  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16801  {
16802  // 1. Still within this allocation.
16803  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16804  if(size == VK_WHOLE_SIZE)
16805  {
16806  size = allocationSize - offset;
16807  }
16808  else
16809  {
16810  VMA_ASSERT(offset + size <= allocationSize);
16811  }
16812  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16813 
16814  // 2. Adjust to whole block.
16815  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16816  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16817  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16818  memRange.offset += allocationOffset;
16819  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16820 
16821  break;
16822  }
16823 
16824  default:
16825  VMA_ASSERT(0);
16826  }
16827 
16828  switch(op)
16829  {
16830  case VMA_CACHE_FLUSH:
16831  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16832  break;
16833  case VMA_CACHE_INVALIDATE:
16834  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16835  break;
16836  default:
16837  VMA_ASSERT(0);
16838  }
16839  }
16840  // else: Just ignore this call.
16841 }
16842 
16843 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16844 {
16845  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16846 
16847  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16848  {
16849  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16850  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16851  VMA_ASSERT(pDedicatedAllocations);
16852  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16853  VMA_ASSERT(success);
16854  }
16855 
16856  VkDeviceMemory hMemory = allocation->GetMemory();
16857 
16858  /*
16859  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16860  before vkFreeMemory.
16861 
16862  if(allocation->GetMappedData() != VMA_NULL)
16863  {
16864  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16865  }
16866  */
16867 
16868  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16869 
16870  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16871 }
16872 
16873 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16874 {
16875  VkBufferCreateInfo dummyBufCreateInfo;
16876  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16877 
16878  uint32_t memoryTypeBits = 0;
16879 
16880  // Create buffer.
16881  VkBuffer buf = VK_NULL_HANDLE;
16882  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16883  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16884  if(res == VK_SUCCESS)
16885  {
16886  // Query for supported memory types.
16887  VkMemoryRequirements memReq;
16888  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16889  memoryTypeBits = memReq.memoryTypeBits;
16890 
16891  // Destroy buffer.
16892  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16893  }
16894 
16895  return memoryTypeBits;
16896 }
16897 
16898 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
16899 {
16900  // Make sure memory information is already fetched.
16901  VMA_ASSERT(GetMemoryTypeCount() > 0);
16902 
16903  uint32_t memoryTypeBits = UINT32_MAX;
16904 
16905  if(!m_UseAmdDeviceCoherentMemory)
16906  {
16907  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
16908  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16909  {
16910  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
16911  {
16912  memoryTypeBits &= ~(1u << memTypeIndex);
16913  }
16914  }
16915  }
16916 
16917  return memoryTypeBits;
16918 }
16919 
16920 #if VMA_MEMORY_BUDGET
16921 
16922 void VmaAllocator_T::UpdateVulkanBudget()
16923 {
16924  VMA_ASSERT(m_UseExtMemoryBudget);
16925 
16926  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16927 
16928  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16929  VmaPnextChainPushFront(&memProps, &budgetProps);
16930 
16931  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16932 
16933  {
16934  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16935 
16936  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16937  {
16938  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16939  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16940  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16941 
16942  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
16943  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
16944  {
16945  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16946  }
16947  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
16948  {
16949  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
16950  }
16951  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
16952  {
16953  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16954  }
16955  }
16956  m_Budget.m_OperationsSinceBudgetFetch = 0;
16957  }
16958 }
16959 
16960 #endif // #if VMA_MEMORY_BUDGET
16961 
16962 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16963 {
16964  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16965  !hAllocation->CanBecomeLost() &&
16966  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16967  {
16968  void* pData = VMA_NULL;
16969  VkResult res = Map(hAllocation, &pData);
16970  if(res == VK_SUCCESS)
16971  {
16972  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16973  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16974  Unmap(hAllocation);
16975  }
16976  else
16977  {
16978  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16979  }
16980  }
16981 }
16982 
16983 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16984 {
16985  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16986  if(memoryTypeBits == UINT32_MAX)
16987  {
16988  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16989  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16990  }
16991  return memoryTypeBits;
16992 }
16993 
16994 #if VMA_STATS_STRING_ENABLED
16995 
16996 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16997 {
16998  bool dedicatedAllocationsStarted = false;
16999  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17000  {
17001  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17002  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17003  VMA_ASSERT(pDedicatedAllocVector);
17004  if(pDedicatedAllocVector->empty() == false)
17005  {
17006  if(dedicatedAllocationsStarted == false)
17007  {
17008  dedicatedAllocationsStarted = true;
17009  json.WriteString("DedicatedAllocations");
17010  json.BeginObject();
17011  }
17012 
17013  json.BeginString("Type ");
17014  json.ContinueString(memTypeIndex);
17015  json.EndString();
17016 
17017  json.BeginArray();
17018 
17019  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17020  {
17021  json.BeginObject(true);
17022  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17023  hAlloc->PrintParameters(json);
17024  json.EndObject();
17025  }
17026 
17027  json.EndArray();
17028  }
17029  }
17030  if(dedicatedAllocationsStarted)
17031  {
17032  json.EndObject();
17033  }
17034 
17035  {
17036  bool allocationsStarted = false;
17037  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17038  {
17039  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17040  {
17041  if(allocationsStarted == false)
17042  {
17043  allocationsStarted = true;
17044  json.WriteString("DefaultPools");
17045  json.BeginObject();
17046  }
17047 
17048  json.BeginString("Type ");
17049  json.ContinueString(memTypeIndex);
17050  json.EndString();
17051 
17052  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17053  }
17054  }
17055  if(allocationsStarted)
17056  {
17057  json.EndObject();
17058  }
17059  }
17060 
17061  // Custom pools
17062  {
17063  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17064  const size_t poolCount = m_Pools.size();
17065  if(poolCount > 0)
17066  {
17067  json.WriteString("Pools");
17068  json.BeginObject();
17069  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17070  {
17071  json.BeginString();
17072  json.ContinueString(m_Pools[poolIndex]->GetId());
17073  json.EndString();
17074 
17075  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17076  }
17077  json.EndObject();
17078  }
17079  }
17080 }
17081 
17082 #endif // #if VMA_STATS_STRING_ENABLED
17083 
17085 // Public interface
17086 
17087 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17088  const VmaAllocatorCreateInfo* pCreateInfo,
17089  VmaAllocator* pAllocator)
17090 {
17091  VMA_ASSERT(pCreateInfo && pAllocator);
17092  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17093  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17094  VMA_DEBUG_LOG("vmaCreateAllocator");
17095  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17096  return (*pAllocator)->Init(pCreateInfo);
17097 }
17098 
17099 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17100  VmaAllocator allocator)
17101 {
17102  if(allocator != VK_NULL_HANDLE)
17103  {
17104  VMA_DEBUG_LOG("vmaDestroyAllocator");
17105  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17106  vma_delete(&allocationCallbacks, allocator);
17107  }
17108 }
17109 
17110 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17111 {
17112  VMA_ASSERT(allocator && pAllocatorInfo);
17113  pAllocatorInfo->instance = allocator->m_hInstance;
17114  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17115  pAllocatorInfo->device = allocator->m_hDevice;
17116 }
17117 
17118 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17119  VmaAllocator allocator,
17120  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17121 {
17122  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17123  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17124 }
17125 
17126 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17127  VmaAllocator allocator,
17128  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17129 {
17130  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17131  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17132 }
17133 
17134 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17135  VmaAllocator allocator,
17136  uint32_t memoryTypeIndex,
17137  VkMemoryPropertyFlags* pFlags)
17138 {
17139  VMA_ASSERT(allocator && pFlags);
17140  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17141  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17142 }
17143 
17144 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17145  VmaAllocator allocator,
17146  uint32_t frameIndex)
17147 {
17148  VMA_ASSERT(allocator);
17149  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17150 
17151  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17152 
17153  allocator->SetCurrentFrameIndex(frameIndex);
17154 }
17155 
17156 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17157  VmaAllocator allocator,
17158  VmaStats* pStats)
17159 {
17160  VMA_ASSERT(allocator && pStats);
17161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17162  allocator->CalculateStats(pStats);
17163 }
17164 
17165 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17166  VmaAllocator allocator,
17167  VmaBudget* pBudget)
17168 {
17169  VMA_ASSERT(allocator && pBudget);
17170  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17171  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17172 }
17173 
17174 #if VMA_STATS_STRING_ENABLED
17175 
17176 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17177  VmaAllocator allocator,
17178  char** ppStatsString,
17179  VkBool32 detailedMap)
17180 {
17181  VMA_ASSERT(allocator && ppStatsString);
17182  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17183 
17184  VmaStringBuilder sb(allocator);
17185  {
17186  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17187  json.BeginObject();
17188 
17189  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17190  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17191 
17192  VmaStats stats;
17193  allocator->CalculateStats(&stats);
17194 
17195  json.WriteString("Total");
17196  VmaPrintStatInfo(json, stats.total);
17197 
17198  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17199  {
17200  json.BeginString("Heap ");
17201  json.ContinueString(heapIndex);
17202  json.EndString();
17203  json.BeginObject();
17204 
17205  json.WriteString("Size");
17206  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17207 
17208  json.WriteString("Flags");
17209  json.BeginArray(true);
17210  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17211  {
17212  json.WriteString("DEVICE_LOCAL");
17213  }
17214  json.EndArray();
17215 
17216  json.WriteString("Budget");
17217  json.BeginObject();
17218  {
17219  json.WriteString("BlockBytes");
17220  json.WriteNumber(budget[heapIndex].blockBytes);
17221  json.WriteString("AllocationBytes");
17222  json.WriteNumber(budget[heapIndex].allocationBytes);
17223  json.WriteString("Usage");
17224  json.WriteNumber(budget[heapIndex].usage);
17225  json.WriteString("Budget");
17226  json.WriteNumber(budget[heapIndex].budget);
17227  }
17228  json.EndObject();
17229 
17230  if(stats.memoryHeap[heapIndex].blockCount > 0)
17231  {
17232  json.WriteString("Stats");
17233  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17234  }
17235 
17236  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17237  {
17238  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17239  {
17240  json.BeginString("Type ");
17241  json.ContinueString(typeIndex);
17242  json.EndString();
17243 
17244  json.BeginObject();
17245 
17246  json.WriteString("Flags");
17247  json.BeginArray(true);
17248  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17249  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17250  {
17251  json.WriteString("DEVICE_LOCAL");
17252  }
17253  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17254  {
17255  json.WriteString("HOST_VISIBLE");
17256  }
17257  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17258  {
17259  json.WriteString("HOST_COHERENT");
17260  }
17261  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17262  {
17263  json.WriteString("HOST_CACHED");
17264  }
17265  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17266  {
17267  json.WriteString("LAZILY_ALLOCATED");
17268  }
17269  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17270  {
17271  json.WriteString(" PROTECTED");
17272  }
17273  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17274  {
17275  json.WriteString(" DEVICE_COHERENT");
17276  }
17277  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17278  {
17279  json.WriteString(" DEVICE_UNCACHED");
17280  }
17281  json.EndArray();
17282 
17283  if(stats.memoryType[typeIndex].blockCount > 0)
17284  {
17285  json.WriteString("Stats");
17286  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17287  }
17288 
17289  json.EndObject();
17290  }
17291  }
17292 
17293  json.EndObject();
17294  }
17295  if(detailedMap == VK_TRUE)
17296  {
17297  allocator->PrintDetailedMap(json);
17298  }
17299 
17300  json.EndObject();
17301  }
17302 
17303  const size_t len = sb.GetLength();
17304  char* const pChars = vma_new_array(allocator, char, len + 1);
17305  if(len > 0)
17306  {
17307  memcpy(pChars, sb.GetData(), len);
17308  }
17309  pChars[len] = '\0';
17310  *ppStatsString = pChars;
17311 }
17312 
17313 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17314  VmaAllocator allocator,
17315  char* pStatsString)
17316 {
17317  if(pStatsString != VMA_NULL)
17318  {
17319  VMA_ASSERT(allocator);
17320  size_t len = strlen(pStatsString);
17321  vma_delete_array(allocator, pStatsString, len + 1);
17322  }
17323 }
17324 
17325 #endif // #if VMA_STATS_STRING_ENABLED
17326 
17327 /*
17328 This function is not protected by any mutex because it just reads immutable data.
17329 */
17330 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17331  VmaAllocator allocator,
17332  uint32_t memoryTypeBits,
17333  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17334  uint32_t* pMemoryTypeIndex)
17335 {
17336  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17337  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17338  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17339 
17340  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17341 
17342  if(pAllocationCreateInfo->memoryTypeBits != 0)
17343  {
17344  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17345  }
17346 
17347  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17348  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17349  uint32_t notPreferredFlags = 0;
17350 
17351  // Convert usage to requiredFlags and preferredFlags.
17352  switch(pAllocationCreateInfo->usage)
17353  {
17355  break;
17357  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17358  {
17359  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17360  }
17361  break;
17363  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17364  break;
17366  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17367  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17368  {
17369  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17370  }
17371  break;
17373  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17374  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17375  break;
17377  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17378  break;
17380  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17381  break;
17382  default:
17383  VMA_ASSERT(0);
17384  break;
17385  }
17386 
17387  // Avoid DEVICE_COHERENT unless explicitly requested.
17388  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17389  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17390  {
17391  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17392  }
17393 
17394  *pMemoryTypeIndex = UINT32_MAX;
17395  uint32_t minCost = UINT32_MAX;
17396  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17397  memTypeIndex < allocator->GetMemoryTypeCount();
17398  ++memTypeIndex, memTypeBit <<= 1)
17399  {
17400  // This memory type is acceptable according to memoryTypeBits bitmask.
17401  if((memTypeBit & memoryTypeBits) != 0)
17402  {
17403  const VkMemoryPropertyFlags currFlags =
17404  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17405  // This memory type contains requiredFlags.
17406  if((requiredFlags & ~currFlags) == 0)
17407  {
17408  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17409  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17410  VmaCountBitsSet(currFlags & notPreferredFlags);
17411  // Remember memory type with lowest cost.
17412  if(currCost < minCost)
17413  {
17414  *pMemoryTypeIndex = memTypeIndex;
17415  if(currCost == 0)
17416  {
17417  return VK_SUCCESS;
17418  }
17419  minCost = currCost;
17420  }
17421  }
17422  }
17423  }
17424  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17425 }
17426 
17427 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17428  VmaAllocator allocator,
17429  const VkBufferCreateInfo* pBufferCreateInfo,
17430  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17431  uint32_t* pMemoryTypeIndex)
17432 {
17433  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17434  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17435  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17436  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17437 
17438  const VkDevice hDev = allocator->m_hDevice;
17439  VkBuffer hBuffer = VK_NULL_HANDLE;
17440  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17441  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17442  if(res == VK_SUCCESS)
17443  {
17444  VkMemoryRequirements memReq = {};
17445  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17446  hDev, hBuffer, &memReq);
17447 
17448  res = vmaFindMemoryTypeIndex(
17449  allocator,
17450  memReq.memoryTypeBits,
17451  pAllocationCreateInfo,
17452  pMemoryTypeIndex);
17453 
17454  allocator->GetVulkanFunctions().vkDestroyBuffer(
17455  hDev, hBuffer, allocator->GetAllocationCallbacks());
17456  }
17457  return res;
17458 }
17459 
17460 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17461  VmaAllocator allocator,
17462  const VkImageCreateInfo* pImageCreateInfo,
17463  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17464  uint32_t* pMemoryTypeIndex)
17465 {
17466  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17467  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17468  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17469  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17470 
17471  const VkDevice hDev = allocator->m_hDevice;
17472  VkImage hImage = VK_NULL_HANDLE;
17473  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17474  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17475  if(res == VK_SUCCESS)
17476  {
17477  VkMemoryRequirements memReq = {};
17478  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17479  hDev, hImage, &memReq);
17480 
17481  res = vmaFindMemoryTypeIndex(
17482  allocator,
17483  memReq.memoryTypeBits,
17484  pAllocationCreateInfo,
17485  pMemoryTypeIndex);
17486 
17487  allocator->GetVulkanFunctions().vkDestroyImage(
17488  hDev, hImage, allocator->GetAllocationCallbacks());
17489  }
17490  return res;
17491 }
17492 
17493 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17494  VmaAllocator allocator,
17495  const VmaPoolCreateInfo* pCreateInfo,
17496  VmaPool* pPool)
17497 {
17498  VMA_ASSERT(allocator && pCreateInfo && pPool);
17499 
17500  VMA_DEBUG_LOG("vmaCreatePool");
17501 
17502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17503 
17504  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17505 
17506 #if VMA_RECORDING_ENABLED
17507  if(allocator->GetRecorder() != VMA_NULL)
17508  {
17509  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17510  }
17511 #endif
17512 
17513  return res;
17514 }
17515 
17516 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17517  VmaAllocator allocator,
17518  VmaPool pool)
17519 {
17520  VMA_ASSERT(allocator);
17521 
17522  if(pool == VK_NULL_HANDLE)
17523  {
17524  return;
17525  }
17526 
17527  VMA_DEBUG_LOG("vmaDestroyPool");
17528 
17529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17530 
17531 #if VMA_RECORDING_ENABLED
17532  if(allocator->GetRecorder() != VMA_NULL)
17533  {
17534  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17535  }
17536 #endif
17537 
17538  allocator->DestroyPool(pool);
17539 }
17540 
17541 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
17542  VmaAllocator allocator,
17543  VmaPool pool,
17544  VmaPoolStats* pPoolStats)
17545 {
17546  VMA_ASSERT(allocator && pool && pPoolStats);
17547 
17548  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17549 
17550  allocator->GetPoolStats(pool, pPoolStats);
17551 }
17552 
17553 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
17554  VmaAllocator allocator,
17555  VmaPool pool,
17556  size_t* pLostAllocationCount)
17557 {
17558  VMA_ASSERT(allocator && pool);
17559 
17560  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17561 
17562 #if VMA_RECORDING_ENABLED
17563  if(allocator->GetRecorder() != VMA_NULL)
17564  {
17565  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
17566  }
17567 #endif
17568 
17569  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
17570 }
17571 
17572 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
17573 {
17574  VMA_ASSERT(allocator && pool);
17575 
17576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17577 
17578  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
17579 
17580  return allocator->CheckPoolCorruption(pool);
17581 }
17582 
17583 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
17584  VmaAllocator allocator,
17585  VmaPool pool,
17586  const char** ppName)
17587 {
17588  VMA_ASSERT(allocator && pool);
17589 
17590  VMA_DEBUG_LOG("vmaGetPoolName");
17591 
17592  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17593 
17594  *ppName = pool->GetName();
17595 }
17596 
17597 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
17598  VmaAllocator allocator,
17599  VmaPool pool,
17600  const char* pName)
17601 {
17602  VMA_ASSERT(allocator && pool);
17603 
17604  VMA_DEBUG_LOG("vmaSetPoolName");
17605 
17606  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17607 
17608  pool->SetName(pName);
17609 
17610 #if VMA_RECORDING_ENABLED
17611  if(allocator->GetRecorder() != VMA_NULL)
17612  {
17613  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
17614  }
17615 #endif
17616 }
17617 
17618 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
17619  VmaAllocator allocator,
17620  const VkMemoryRequirements* pVkMemoryRequirements,
17621  const VmaAllocationCreateInfo* pCreateInfo,
17622  VmaAllocation* pAllocation,
17623  VmaAllocationInfo* pAllocationInfo)
17624 {
17625  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
17626 
17627  VMA_DEBUG_LOG("vmaAllocateMemory");
17628 
17629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17630 
17631  VkResult result = allocator->AllocateMemory(
17632  *pVkMemoryRequirements,
17633  false, // requiresDedicatedAllocation
17634  false, // prefersDedicatedAllocation
17635  VK_NULL_HANDLE, // dedicatedBuffer
17636  UINT32_MAX, // dedicatedBufferUsage
17637  VK_NULL_HANDLE, // dedicatedImage
17638  *pCreateInfo,
17639  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17640  1, // allocationCount
17641  pAllocation);
17642 
17643 #if VMA_RECORDING_ENABLED
17644  if(allocator->GetRecorder() != VMA_NULL)
17645  {
17646  allocator->GetRecorder()->RecordAllocateMemory(
17647  allocator->GetCurrentFrameIndex(),
17648  *pVkMemoryRequirements,
17649  *pCreateInfo,
17650  *pAllocation);
17651  }
17652 #endif
17653 
17654  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17655  {
17656  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17657  }
17658 
17659  return result;
17660 }
17661 
17662 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
17663  VmaAllocator allocator,
17664  const VkMemoryRequirements* pVkMemoryRequirements,
17665  const VmaAllocationCreateInfo* pCreateInfo,
17666  size_t allocationCount,
17667  VmaAllocation* pAllocations,
17668  VmaAllocationInfo* pAllocationInfo)
17669 {
17670  if(allocationCount == 0)
17671  {
17672  return VK_SUCCESS;
17673  }
17674 
17675  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
17676 
17677  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
17678 
17679  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17680 
17681  VkResult result = allocator->AllocateMemory(
17682  *pVkMemoryRequirements,
17683  false, // requiresDedicatedAllocation
17684  false, // prefersDedicatedAllocation
17685  VK_NULL_HANDLE, // dedicatedBuffer
17686  UINT32_MAX, // dedicatedBufferUsage
17687  VK_NULL_HANDLE, // dedicatedImage
17688  *pCreateInfo,
17689  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17690  allocationCount,
17691  pAllocations);
17692 
17693 #if VMA_RECORDING_ENABLED
17694  if(allocator->GetRecorder() != VMA_NULL)
17695  {
17696  allocator->GetRecorder()->RecordAllocateMemoryPages(
17697  allocator->GetCurrentFrameIndex(),
17698  *pVkMemoryRequirements,
17699  *pCreateInfo,
17700  (uint64_t)allocationCount,
17701  pAllocations);
17702  }
17703 #endif
17704 
17705  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17706  {
17707  for(size_t i = 0; i < allocationCount; ++i)
17708  {
17709  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
17710  }
17711  }
17712 
17713  return result;
17714 }
17715 
17716 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
17717  VmaAllocator allocator,
17718  VkBuffer buffer,
17719  const VmaAllocationCreateInfo* pCreateInfo,
17720  VmaAllocation* pAllocation,
17721  VmaAllocationInfo* pAllocationInfo)
17722 {
17723  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17724 
17725  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
17726 
17727  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17728 
17729  VkMemoryRequirements vkMemReq = {};
17730  bool requiresDedicatedAllocation = false;
17731  bool prefersDedicatedAllocation = false;
17732  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
17733  requiresDedicatedAllocation,
17734  prefersDedicatedAllocation);
17735 
17736  VkResult result = allocator->AllocateMemory(
17737  vkMemReq,
17738  requiresDedicatedAllocation,
17739  prefersDedicatedAllocation,
17740  buffer, // dedicatedBuffer
17741  UINT32_MAX, // dedicatedBufferUsage
17742  VK_NULL_HANDLE, // dedicatedImage
17743  *pCreateInfo,
17744  VMA_SUBALLOCATION_TYPE_BUFFER,
17745  1, // allocationCount
17746  pAllocation);
17747 
17748 #if VMA_RECORDING_ENABLED
17749  if(allocator->GetRecorder() != VMA_NULL)
17750  {
17751  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
17752  allocator->GetCurrentFrameIndex(),
17753  vkMemReq,
17754  requiresDedicatedAllocation,
17755  prefersDedicatedAllocation,
17756  *pCreateInfo,
17757  *pAllocation);
17758  }
17759 #endif
17760 
17761  if(pAllocationInfo && result == VK_SUCCESS)
17762  {
17763  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17764  }
17765 
17766  return result;
17767 }
17768 
17769 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17770  VmaAllocator allocator,
17771  VkImage image,
17772  const VmaAllocationCreateInfo* pCreateInfo,
17773  VmaAllocation* pAllocation,
17774  VmaAllocationInfo* pAllocationInfo)
17775 {
17776  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17777 
17778  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17779 
17780  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17781 
17782  VkMemoryRequirements vkMemReq = {};
17783  bool requiresDedicatedAllocation = false;
17784  bool prefersDedicatedAllocation = false;
17785  allocator->GetImageMemoryRequirements(image, vkMemReq,
17786  requiresDedicatedAllocation, prefersDedicatedAllocation);
17787 
17788  VkResult result = allocator->AllocateMemory(
17789  vkMemReq,
17790  requiresDedicatedAllocation,
17791  prefersDedicatedAllocation,
17792  VK_NULL_HANDLE, // dedicatedBuffer
17793  UINT32_MAX, // dedicatedBufferUsage
17794  image, // dedicatedImage
17795  *pCreateInfo,
17796  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17797  1, // allocationCount
17798  pAllocation);
17799 
17800 #if VMA_RECORDING_ENABLED
17801  if(allocator->GetRecorder() != VMA_NULL)
17802  {
17803  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17804  allocator->GetCurrentFrameIndex(),
17805  vkMemReq,
17806  requiresDedicatedAllocation,
17807  prefersDedicatedAllocation,
17808  *pCreateInfo,
17809  *pAllocation);
17810  }
17811 #endif
17812 
17813  if(pAllocationInfo && result == VK_SUCCESS)
17814  {
17815  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17816  }
17817 
17818  return result;
17819 }
17820 
17821 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17822  VmaAllocator allocator,
17823  VmaAllocation allocation)
17824 {
17825  VMA_ASSERT(allocator);
17826 
17827  if(allocation == VK_NULL_HANDLE)
17828  {
17829  return;
17830  }
17831 
17832  VMA_DEBUG_LOG("vmaFreeMemory");
17833 
17834  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17835 
17836 #if VMA_RECORDING_ENABLED
17837  if(allocator->GetRecorder() != VMA_NULL)
17838  {
17839  allocator->GetRecorder()->RecordFreeMemory(
17840  allocator->GetCurrentFrameIndex(),
17841  allocation);
17842  }
17843 #endif
17844 
17845  allocator->FreeMemory(
17846  1, // allocationCount
17847  &allocation);
17848 }
17849 
17850 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17851  VmaAllocator allocator,
17852  size_t allocationCount,
17853  VmaAllocation* pAllocations)
17854 {
17855  if(allocationCount == 0)
17856  {
17857  return;
17858  }
17859 
17860  VMA_ASSERT(allocator);
17861 
17862  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17863 
17864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17865 
17866 #if VMA_RECORDING_ENABLED
17867  if(allocator->GetRecorder() != VMA_NULL)
17868  {
17869  allocator->GetRecorder()->RecordFreeMemoryPages(
17870  allocator->GetCurrentFrameIndex(),
17871  (uint64_t)allocationCount,
17872  pAllocations);
17873  }
17874 #endif
17875 
17876  allocator->FreeMemory(allocationCount, pAllocations);
17877 }
17878 
17879 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17880  VmaAllocator allocator,
17881  VmaAllocation allocation,
17882  VkDeviceSize newSize)
17883 {
17884  VMA_ASSERT(allocator && allocation);
17885 
17886  VMA_DEBUG_LOG("vmaResizeAllocation");
17887 
17888  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17889 
17890  return allocator->ResizeAllocation(allocation, newSize);
17891 }
17892 
17893 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17894  VmaAllocator allocator,
17895  VmaAllocation allocation,
17896  VmaAllocationInfo* pAllocationInfo)
17897 {
17898  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17899 
17900  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17901 
17902 #if VMA_RECORDING_ENABLED
17903  if(allocator->GetRecorder() != VMA_NULL)
17904  {
17905  allocator->GetRecorder()->RecordGetAllocationInfo(
17906  allocator->GetCurrentFrameIndex(),
17907  allocation);
17908  }
17909 #endif
17910 
17911  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17912 }
17913 
17914 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17915  VmaAllocator allocator,
17916  VmaAllocation allocation)
17917 {
17918  VMA_ASSERT(allocator && allocation);
17919 
17920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17921 
17922 #if VMA_RECORDING_ENABLED
17923  if(allocator->GetRecorder() != VMA_NULL)
17924  {
17925  allocator->GetRecorder()->RecordTouchAllocation(
17926  allocator->GetCurrentFrameIndex(),
17927  allocation);
17928  }
17929 #endif
17930 
17931  return allocator->TouchAllocation(allocation);
17932 }
17933 
17934 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17935  VmaAllocator allocator,
17936  VmaAllocation allocation,
17937  void* pUserData)
17938 {
17939  VMA_ASSERT(allocator && allocation);
17940 
17941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17942 
17943  allocation->SetUserData(allocator, pUserData);
17944 
17945 #if VMA_RECORDING_ENABLED
17946  if(allocator->GetRecorder() != VMA_NULL)
17947  {
17948  allocator->GetRecorder()->RecordSetAllocationUserData(
17949  allocator->GetCurrentFrameIndex(),
17950  allocation,
17951  pUserData);
17952  }
17953 #endif
17954 }
17955 
17956 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17957  VmaAllocator allocator,
17958  VmaAllocation* pAllocation)
17959 {
17960  VMA_ASSERT(allocator && pAllocation);
17961 
17962  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17963 
17964  allocator->CreateLostAllocation(pAllocation);
17965 
17966 #if VMA_RECORDING_ENABLED
17967  if(allocator->GetRecorder() != VMA_NULL)
17968  {
17969  allocator->GetRecorder()->RecordCreateLostAllocation(
17970  allocator->GetCurrentFrameIndex(),
17971  *pAllocation);
17972  }
17973 #endif
17974 }
17975 
17976 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17977  VmaAllocator allocator,
17978  VmaAllocation allocation,
17979  void** ppData)
17980 {
17981  VMA_ASSERT(allocator && allocation && ppData);
17982 
17983  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17984 
17985  VkResult res = allocator->Map(allocation, ppData);
17986 
17987 #if VMA_RECORDING_ENABLED
17988  if(allocator->GetRecorder() != VMA_NULL)
17989  {
17990  allocator->GetRecorder()->RecordMapMemory(
17991  allocator->GetCurrentFrameIndex(),
17992  allocation);
17993  }
17994 #endif
17995 
17996  return res;
17997 }
17998 
17999 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18000  VmaAllocator allocator,
18001  VmaAllocation allocation)
18002 {
18003  VMA_ASSERT(allocator && allocation);
18004 
18005  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18006 
18007 #if VMA_RECORDING_ENABLED
18008  if(allocator->GetRecorder() != VMA_NULL)
18009  {
18010  allocator->GetRecorder()->RecordUnmapMemory(
18011  allocator->GetCurrentFrameIndex(),
18012  allocation);
18013  }
18014 #endif
18015 
18016  allocator->Unmap(allocation);
18017 }
18018 
18019 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18020 {
18021  VMA_ASSERT(allocator && allocation);
18022 
18023  VMA_DEBUG_LOG("vmaFlushAllocation");
18024 
18025  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18026 
18027  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18028 
18029 #if VMA_RECORDING_ENABLED
18030  if(allocator->GetRecorder() != VMA_NULL)
18031  {
18032  allocator->GetRecorder()->RecordFlushAllocation(
18033  allocator->GetCurrentFrameIndex(),
18034  allocation, offset, size);
18035  }
18036 #endif
18037 }
18038 
18039 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18040 {
18041  VMA_ASSERT(allocator && allocation);
18042 
18043  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18044 
18045  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18046 
18047  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18048 
18049 #if VMA_RECORDING_ENABLED
18050  if(allocator->GetRecorder() != VMA_NULL)
18051  {
18052  allocator->GetRecorder()->RecordInvalidateAllocation(
18053  allocator->GetCurrentFrameIndex(),
18054  allocation, offset, size);
18055  }
18056 #endif
18057 }
18058 
18059 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18060 {
18061  VMA_ASSERT(allocator);
18062 
18063  VMA_DEBUG_LOG("vmaCheckCorruption");
18064 
18065  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18066 
18067  return allocator->CheckCorruption(memoryTypeBits);
18068 }
18069 
18070 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18071  VmaAllocator allocator,
18072  VmaAllocation* pAllocations,
18073  size_t allocationCount,
18074  VkBool32* pAllocationsChanged,
18075  const VmaDefragmentationInfo *pDefragmentationInfo,
18076  VmaDefragmentationStats* pDefragmentationStats)
18077 {
18078  // Deprecated interface, reimplemented using new one.
18079 
18080  VmaDefragmentationInfo2 info2 = {};
18081  info2.allocationCount = (uint32_t)allocationCount;
18082  info2.pAllocations = pAllocations;
18083  info2.pAllocationsChanged = pAllocationsChanged;
18084  if(pDefragmentationInfo != VMA_NULL)
18085  {
18086  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18087  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18088  }
18089  else
18090  {
18091  info2.maxCpuAllocationsToMove = UINT32_MAX;
18092  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18093  }
18094  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18095 
18097  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18098  if(res == VK_NOT_READY)
18099  {
18100  res = vmaDefragmentationEnd( allocator, ctx);
18101  }
18102  return res;
18103 }
18104 
18105 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18106  VmaAllocator allocator,
18107  const VmaDefragmentationInfo2* pInfo,
18108  VmaDefragmentationStats* pStats,
18109  VmaDefragmentationContext *pContext)
18110 {
18111  VMA_ASSERT(allocator && pInfo && pContext);
18112 
18113  // Degenerate case: Nothing to defragment.
18114  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18115  {
18116  return VK_SUCCESS;
18117  }
18118 
18119  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18120  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18121  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18122  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18123 
18124  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18125 
18126  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18127 
18128  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18129 
18130 #if VMA_RECORDING_ENABLED
18131  if(allocator->GetRecorder() != VMA_NULL)
18132  {
18133  allocator->GetRecorder()->RecordDefragmentationBegin(
18134  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18135  }
18136 #endif
18137 
18138  return res;
18139 }
18140 
18141 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18142  VmaAllocator allocator,
18143  VmaDefragmentationContext context)
18144 {
18145  VMA_ASSERT(allocator);
18146 
18147  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18148 
18149  if(context != VK_NULL_HANDLE)
18150  {
18151  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18152 
18153 #if VMA_RECORDING_ENABLED
18154  if(allocator->GetRecorder() != VMA_NULL)
18155  {
18156  allocator->GetRecorder()->RecordDefragmentationEnd(
18157  allocator->GetCurrentFrameIndex(), context);
18158  }
18159 #endif
18160 
18161  return allocator->DefragmentationEnd(context);
18162  }
18163  else
18164  {
18165  return VK_SUCCESS;
18166  }
18167 }
18168 
18169 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18170  VmaAllocator allocator,
18171  VmaDefragmentationContext context,
18173  )
18174 {
18175  VMA_ASSERT(allocator);
18176  VMA_ASSERT(pInfo);
18177  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
18178 
18179  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18180 
18181  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18182 
18183  if(context == VK_NULL_HANDLE)
18184  {
18185  pInfo->moveCount = 0;
18186  return VK_SUCCESS;
18187  }
18188 
18189  return allocator->DefragmentationPassBegin(pInfo, context);
18190 }
18191 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18192  VmaAllocator allocator,
18193  VmaDefragmentationContext context)
18194 {
18195  VMA_ASSERT(allocator);
18196 
18197  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18198  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18199 
18200  if(context == VK_NULL_HANDLE)
18201  return VK_SUCCESS;
18202 
18203  return allocator->DefragmentationPassEnd(context);
18204 }
18205 
18206 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18207  VmaAllocator allocator,
18208  VmaAllocation allocation,
18209  VkBuffer buffer)
18210 {
18211  VMA_ASSERT(allocator && allocation && buffer);
18212 
18213  VMA_DEBUG_LOG("vmaBindBufferMemory");
18214 
18215  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18216 
18217  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18218 }
18219 
18220 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18221  VmaAllocator allocator,
18222  VmaAllocation allocation,
18223  VkDeviceSize allocationLocalOffset,
18224  VkBuffer buffer,
18225  const void* pNext)
18226 {
18227  VMA_ASSERT(allocator && allocation && buffer);
18228 
18229  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18230 
18231  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18232 
18233  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18234 }
18235 
18236 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18237  VmaAllocator allocator,
18238  VmaAllocation allocation,
18239  VkImage image)
18240 {
18241  VMA_ASSERT(allocator && allocation && image);
18242 
18243  VMA_DEBUG_LOG("vmaBindImageMemory");
18244 
18245  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18246 
18247  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18248 }
18249 
18250 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18251  VmaAllocator allocator,
18252  VmaAllocation allocation,
18253  VkDeviceSize allocationLocalOffset,
18254  VkImage image,
18255  const void* pNext)
18256 {
18257  VMA_ASSERT(allocator && allocation && image);
18258 
18259  VMA_DEBUG_LOG("vmaBindImageMemory2");
18260 
18261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18262 
18263  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18264 }
18265 
18266 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18267  VmaAllocator allocator,
18268  const VkBufferCreateInfo* pBufferCreateInfo,
18269  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18270  VkBuffer* pBuffer,
18271  VmaAllocation* pAllocation,
18272  VmaAllocationInfo* pAllocationInfo)
18273 {
18274  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18275 
18276  if(pBufferCreateInfo->size == 0)
18277  {
18278  return VK_ERROR_VALIDATION_FAILED_EXT;
18279  }
18280  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18281  !allocator->m_UseKhrBufferDeviceAddress)
18282  {
18283  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18284  return VK_ERROR_VALIDATION_FAILED_EXT;
18285  }
18286 
18287  VMA_DEBUG_LOG("vmaCreateBuffer");
18288 
18289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18290 
18291  *pBuffer = VK_NULL_HANDLE;
18292  *pAllocation = VK_NULL_HANDLE;
18293 
18294  // 1. Create VkBuffer.
18295  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18296  allocator->m_hDevice,
18297  pBufferCreateInfo,
18298  allocator->GetAllocationCallbacks(),
18299  pBuffer);
18300  if(res >= 0)
18301  {
18302  // 2. vkGetBufferMemoryRequirements.
18303  VkMemoryRequirements vkMemReq = {};
18304  bool requiresDedicatedAllocation = false;
18305  bool prefersDedicatedAllocation = false;
18306  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18307  requiresDedicatedAllocation, prefersDedicatedAllocation);
18308 
18309  // 3. Allocate memory using allocator.
18310  res = allocator->AllocateMemory(
18311  vkMemReq,
18312  requiresDedicatedAllocation,
18313  prefersDedicatedAllocation,
18314  *pBuffer, // dedicatedBuffer
18315  pBufferCreateInfo->usage, // dedicatedBufferUsage
18316  VK_NULL_HANDLE, // dedicatedImage
18317  *pAllocationCreateInfo,
18318  VMA_SUBALLOCATION_TYPE_BUFFER,
18319  1, // allocationCount
18320  pAllocation);
18321 
18322 #if VMA_RECORDING_ENABLED
18323  if(allocator->GetRecorder() != VMA_NULL)
18324  {
18325  allocator->GetRecorder()->RecordCreateBuffer(
18326  allocator->GetCurrentFrameIndex(),
18327  *pBufferCreateInfo,
18328  *pAllocationCreateInfo,
18329  *pAllocation);
18330  }
18331 #endif
18332 
18333  if(res >= 0)
18334  {
18335  // 3. Bind buffer with memory.
18336  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18337  {
18338  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18339  }
18340  if(res >= 0)
18341  {
18342  // All steps succeeded.
18343  #if VMA_STATS_STRING_ENABLED
18344  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18345  #endif
18346  if(pAllocationInfo != VMA_NULL)
18347  {
18348  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18349  }
18350 
18351  return VK_SUCCESS;
18352  }
18353  allocator->FreeMemory(
18354  1, // allocationCount
18355  pAllocation);
18356  *pAllocation = VK_NULL_HANDLE;
18357  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18358  *pBuffer = VK_NULL_HANDLE;
18359  return res;
18360  }
18361  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18362  *pBuffer = VK_NULL_HANDLE;
18363  return res;
18364  }
18365  return res;
18366 }
18367 
18368 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18369  VmaAllocator allocator,
18370  VkBuffer buffer,
18371  VmaAllocation allocation)
18372 {
18373  VMA_ASSERT(allocator);
18374 
18375  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18376  {
18377  return;
18378  }
18379 
18380  VMA_DEBUG_LOG("vmaDestroyBuffer");
18381 
18382  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18383 
18384 #if VMA_RECORDING_ENABLED
18385  if(allocator->GetRecorder() != VMA_NULL)
18386  {
18387  allocator->GetRecorder()->RecordDestroyBuffer(
18388  allocator->GetCurrentFrameIndex(),
18389  allocation);
18390  }
18391 #endif
18392 
18393  if(buffer != VK_NULL_HANDLE)
18394  {
18395  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18396  }
18397 
18398  if(allocation != VK_NULL_HANDLE)
18399  {
18400  allocator->FreeMemory(
18401  1, // allocationCount
18402  &allocation);
18403  }
18404 }
18405 
18406 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18407  VmaAllocator allocator,
18408  const VkImageCreateInfo* pImageCreateInfo,
18409  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18410  VkImage* pImage,
18411  VmaAllocation* pAllocation,
18412  VmaAllocationInfo* pAllocationInfo)
18413 {
18414  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18415 
18416  if(pImageCreateInfo->extent.width == 0 ||
18417  pImageCreateInfo->extent.height == 0 ||
18418  pImageCreateInfo->extent.depth == 0 ||
18419  pImageCreateInfo->mipLevels == 0 ||
18420  pImageCreateInfo->arrayLayers == 0)
18421  {
18422  return VK_ERROR_VALIDATION_FAILED_EXT;
18423  }
18424 
18425  VMA_DEBUG_LOG("vmaCreateImage");
18426 
18427  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18428 
18429  *pImage = VK_NULL_HANDLE;
18430  *pAllocation = VK_NULL_HANDLE;
18431 
18432  // 1. Create VkImage.
18433  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18434  allocator->m_hDevice,
18435  pImageCreateInfo,
18436  allocator->GetAllocationCallbacks(),
18437  pImage);
18438  if(res >= 0)
18439  {
18440  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18441  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18442  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18443 
18444  // 2. Allocate memory using allocator.
18445  VkMemoryRequirements vkMemReq = {};
18446  bool requiresDedicatedAllocation = false;
18447  bool prefersDedicatedAllocation = false;
18448  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18449  requiresDedicatedAllocation, prefersDedicatedAllocation);
18450 
18451  res = allocator->AllocateMemory(
18452  vkMemReq,
18453  requiresDedicatedAllocation,
18454  prefersDedicatedAllocation,
18455  VK_NULL_HANDLE, // dedicatedBuffer
18456  UINT32_MAX, // dedicatedBufferUsage
18457  *pImage, // dedicatedImage
18458  *pAllocationCreateInfo,
18459  suballocType,
18460  1, // allocationCount
18461  pAllocation);
18462 
18463 #if VMA_RECORDING_ENABLED
18464  if(allocator->GetRecorder() != VMA_NULL)
18465  {
18466  allocator->GetRecorder()->RecordCreateImage(
18467  allocator->GetCurrentFrameIndex(),
18468  *pImageCreateInfo,
18469  *pAllocationCreateInfo,
18470  *pAllocation);
18471  }
18472 #endif
18473 
18474  if(res >= 0)
18475  {
18476  // 3. Bind image with memory.
18477  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18478  {
18479  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
18480  }
18481  if(res >= 0)
18482  {
18483  // All steps succeeded.
18484  #if VMA_STATS_STRING_ENABLED
18485  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
18486  #endif
18487  if(pAllocationInfo != VMA_NULL)
18488  {
18489  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18490  }
18491 
18492  return VK_SUCCESS;
18493  }
18494  allocator->FreeMemory(
18495  1, // allocationCount
18496  pAllocation);
18497  *pAllocation = VK_NULL_HANDLE;
18498  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18499  *pImage = VK_NULL_HANDLE;
18500  return res;
18501  }
18502  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18503  *pImage = VK_NULL_HANDLE;
18504  return res;
18505  }
18506  return res;
18507 }
18508 
18509 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
18510  VmaAllocator allocator,
18511  VkImage image,
18512  VmaAllocation allocation)
18513 {
18514  VMA_ASSERT(allocator);
18515 
18516  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18517  {
18518  return;
18519  }
18520 
18521  VMA_DEBUG_LOG("vmaDestroyImage");
18522 
18523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18524 
18525 #if VMA_RECORDING_ENABLED
18526  if(allocator->GetRecorder() != VMA_NULL)
18527  {
18528  allocator->GetRecorder()->RecordDestroyImage(
18529  allocator->GetCurrentFrameIndex(),
18530  allocation);
18531  }
18532 #endif
18533 
18534  if(image != VK_NULL_HANDLE)
18535  {
18536  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
18537  }
18538  if(allocation != VK_NULL_HANDLE)
18539  {
18540  allocator->FreeMemory(
18541  1, // allocationCount
18542  &allocation);
18543  }
18544 }
18545 
18546 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2158
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2116
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2004
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2153
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2114
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2179
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2775
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3326
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2205
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2067
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2376
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3394
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2521
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2847
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2604
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2015
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2853
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2584
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2145
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2000
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2571
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2185
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2151
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2119
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2990
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2344
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2678
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2350
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2118
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1894
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3395
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3404
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2615
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2276
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2401
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2130
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2173
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2545
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2286
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3312
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2866
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2608
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2040
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3421
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2599
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2459
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2340
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2803
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2815
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3344
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3392
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2856
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2107
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2105
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3310
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2995
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2527
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2115
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2796
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2671
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2358
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2534
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2055
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3323
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2123
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3375
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3425
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2850
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2117
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2155
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2491
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3360
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2513
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3311
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2242
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2122
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2124
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3389
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2354
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2820
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2256
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2337
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3429
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3403
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2481
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2348
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2103
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2346
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2188
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2010
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2828
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2800
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2002
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2507
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2578
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2806
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2523
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2342
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2121
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3320
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3416
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2380
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2270
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2794
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2652
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2625
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2349
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2191
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2629
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2182
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2230
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3393
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2412
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2113
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3009
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2176
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3314
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2128
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2006
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2350
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2498
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2622
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2619
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2085
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2647
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2349
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3000
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2161
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2120
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3014
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2758
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2357
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2235
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2869
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3402
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2636
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2356
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2247
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2464
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3380
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2127
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2842
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2129
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3365
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2981
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2558
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2126
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1979
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2859
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2740
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3427
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2350
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2589
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1986
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2641
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2125
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2638
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2657
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3341
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3431
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2665
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2281
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3433
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2169
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2349
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2976
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2786
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2391
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2634
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3335
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2790
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3370
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3411
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2595
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.