Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1870 /*
1871 Define this macro to 0/1 to disable/enable support for recording functionality,
1872 available through VmaAllocatorCreateInfo::pRecordSettings.
1873 */
1874 #ifndef VMA_RECORDING_ENABLED
1875  #define VMA_RECORDING_ENABLED 0
1876 #endif
1877 
1878 #ifndef NOMINMAX
1879  #define NOMINMAX // For windows.h
1880 #endif
1881 
1882 #ifndef VULKAN_H_
1883  #include <vulkan/vulkan.h>
1884 #endif
1885 
1886 #if VMA_RECORDING_ENABLED
1887  #include <windows.h>
1888 #endif
1889 
1890 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1891 // where AAA = major, BBB = minor, CCC = patch.
1892 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1893 #if !defined(VMA_VULKAN_VERSION)
1894  #if defined(VK_VERSION_1_2)
1895  #define VMA_VULKAN_VERSION 1002000
1896  #elif defined(VK_VERSION_1_1)
1897  #define VMA_VULKAN_VERSION 1001000
1898  #else
1899  #define VMA_VULKAN_VERSION 1000000
1900  #endif
1901 #endif
1902 
1903 #if !defined(VMA_DEDICATED_ALLOCATION)
1904  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1905  #define VMA_DEDICATED_ALLOCATION 1
1906  #else
1907  #define VMA_DEDICATED_ALLOCATION 0
1908  #endif
1909 #endif
1910 
1911 #if !defined(VMA_BIND_MEMORY2)
1912  #if VK_KHR_bind_memory2
1913  #define VMA_BIND_MEMORY2 1
1914  #else
1915  #define VMA_BIND_MEMORY2 0
1916  #endif
1917 #endif
1918 
1919 #if !defined(VMA_MEMORY_BUDGET)
1920  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1921  #define VMA_MEMORY_BUDGET 1
1922  #else
1923  #define VMA_MEMORY_BUDGET 0
1924  #endif
1925 #endif
1926 
1927 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
1928 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
1929  #if VK_KHR_buffer_device_address || VK_EXT_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
1930  #define VMA_BUFFER_DEVICE_ADDRESS 1
1931  #else
1932  #define VMA_BUFFER_DEVICE_ADDRESS 0
1933  #endif
1934 #endif
1935 
1936 // Define these macros to decorate all public functions with additional code,
1937 // before and after returned type, appropriately. This may be useful for
1938 // exporing the functions when compiling VMA as a separate library. Example:
1939 // #define VMA_CALL_PRE __declspec(dllexport)
1940 // #define VMA_CALL_POST __cdecl
1941 #ifndef VMA_CALL_PRE
1942  #define VMA_CALL_PRE
1943 #endif
1944 #ifndef VMA_CALL_POST
1945  #define VMA_CALL_POST
1946 #endif
1947 
1957 VK_DEFINE_HANDLE(VmaAllocator)
1958 
1959 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1961  VmaAllocator allocator,
1962  uint32_t memoryType,
1963  VkDeviceMemory memory,
1964  VkDeviceSize size);
1966 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1967  VmaAllocator allocator,
1968  uint32_t memoryType,
1969  VkDeviceMemory memory,
1970  VkDeviceSize size);
1971 
1985 
2081 
2084 typedef VkFlags VmaAllocatorCreateFlags;
2085 
2090 typedef struct VmaVulkanFunctions {
2091  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2092  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2093  PFN_vkAllocateMemory vkAllocateMemory;
2094  PFN_vkFreeMemory vkFreeMemory;
2095  PFN_vkMapMemory vkMapMemory;
2096  PFN_vkUnmapMemory vkUnmapMemory;
2097  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2098  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2099  PFN_vkBindBufferMemory vkBindBufferMemory;
2100  PFN_vkBindImageMemory vkBindImageMemory;
2101  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2102  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2103  PFN_vkCreateBuffer vkCreateBuffer;
2104  PFN_vkDestroyBuffer vkDestroyBuffer;
2105  PFN_vkCreateImage vkCreateImage;
2106  PFN_vkDestroyImage vkDestroyImage;
2107  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2108 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2109  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
2110  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
2111 #endif
2112 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2113  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
2114  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
2115 #endif
2116 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2117  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
2118 #endif
2120 
2122 typedef enum VmaRecordFlagBits {
2129 
2132 typedef VkFlags VmaRecordFlags;
2133 
2135 typedef struct VmaRecordSettings
2136 {
2146  const char* pFilePath;
2148 
2151 {
2155 
2156  VkPhysicalDevice physicalDevice;
2158 
2159  VkDevice device;
2161 
2164 
2165  const VkAllocationCallbacks* pAllocationCallbacks;
2167 
2207  const VkDeviceSize* pHeapSizeLimit;
2232  VkInstance instance;
2243 
2245 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2246  const VmaAllocatorCreateInfo* pCreateInfo,
2247  VmaAllocator* pAllocator);
2248 
2250 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2251  VmaAllocator allocator);
2252 
2255 typedef struct VmaAllocatorInfo
2256 {
2261  VkInstance instance;
2266  VkPhysicalDevice physicalDevice;
2271  VkDevice device;
2273 
2279 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo);
2280 
2285 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2286  VmaAllocator allocator,
2287  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2288 
2293 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2294  VmaAllocator allocator,
2295  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2296 
2303 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2304  VmaAllocator allocator,
2305  uint32_t memoryTypeIndex,
2306  VkMemoryPropertyFlags* pFlags);
2307 
2316 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2317  VmaAllocator allocator,
2318  uint32_t frameIndex);
2319 
2322 typedef struct VmaStatInfo
2323 {
2325  uint32_t blockCount;
2331  VkDeviceSize usedBytes;
2333  VkDeviceSize unusedBytes;
2336 } VmaStatInfo;
2337 
2339 typedef struct VmaStats
2340 {
2341  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2342  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2344 } VmaStats;
2345 
2355 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2356  VmaAllocator allocator,
2357  VmaStats* pStats);
2358 
2361 typedef struct VmaBudget
2362 {
2365  VkDeviceSize blockBytes;
2366 
2376  VkDeviceSize allocationBytes;
2377 
2386  VkDeviceSize usage;
2387 
2397  VkDeviceSize budget;
2398 } VmaBudget;
2399 
2410 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2411  VmaAllocator allocator,
2412  VmaBudget* pBudget);
2413 
2414 #ifndef VMA_STATS_STRING_ENABLED
2415 #define VMA_STATS_STRING_ENABLED 1
2416 #endif
2417 
2418 #if VMA_STATS_STRING_ENABLED
2419 
2421 
2423 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2424  VmaAllocator allocator,
2425  char** ppStatsString,
2426  VkBool32 detailedMap);
2427 
2428 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2429  VmaAllocator allocator,
2430  char* pStatsString);
2431 
2432 #endif // #if VMA_STATS_STRING_ENABLED
2433 
2442 VK_DEFINE_HANDLE(VmaPool)
2443 
2444 typedef enum VmaMemoryUsage
2445 {
2507 
2509 } VmaMemoryUsage;
2510 
2520 
2585 
2601 
2611 
2618 
2622 
2624 {
2637  VkMemoryPropertyFlags requiredFlags;
2642  VkMemoryPropertyFlags preferredFlags;
2650  uint32_t memoryTypeBits;
2663  void* pUserData;
2665 
2682 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2683  VmaAllocator allocator,
2684  uint32_t memoryTypeBits,
2685  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2686  uint32_t* pMemoryTypeIndex);
2687 
2700 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2701  VmaAllocator allocator,
2702  const VkBufferCreateInfo* pBufferCreateInfo,
2703  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2704  uint32_t* pMemoryTypeIndex);
2705 
2718 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2719  VmaAllocator allocator,
2720  const VkImageCreateInfo* pImageCreateInfo,
2721  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2722  uint32_t* pMemoryTypeIndex);
2723 
2744 
2761 
2772 
2778 
2781 typedef VkFlags VmaPoolCreateFlags;
2782 
2785 typedef struct VmaPoolCreateInfo {
2800  VkDeviceSize blockSize;
2829 
2832 typedef struct VmaPoolStats {
2835  VkDeviceSize size;
2838  VkDeviceSize unusedSize;
2851  VkDeviceSize unusedRangeSizeMax;
2854  size_t blockCount;
2855 } VmaPoolStats;
2856 
2863 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2864  VmaAllocator allocator,
2865  const VmaPoolCreateInfo* pCreateInfo,
2866  VmaPool* pPool);
2867 
2870 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2871  VmaAllocator allocator,
2872  VmaPool pool);
2873 
2880 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2881  VmaAllocator allocator,
2882  VmaPool pool,
2883  VmaPoolStats* pPoolStats);
2884 
2891 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2892  VmaAllocator allocator,
2893  VmaPool pool,
2894  size_t* pLostAllocationCount);
2895 
2910 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2911 
2918 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2919  VmaAllocator allocator,
2920  VmaPool pool,
2921  const char** ppName);
2922 
2928 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2929  VmaAllocator allocator,
2930  VmaPool pool,
2931  const char* pName);
2932 
2957 VK_DEFINE_HANDLE(VmaAllocation)
2958 
2959 
2961 typedef struct VmaAllocationInfo {
2966  uint32_t memoryType;
2975  VkDeviceMemory deviceMemory;
2980  VkDeviceSize offset;
2985  VkDeviceSize size;
2999  void* pUserData;
3001 
3012 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3013  VmaAllocator allocator,
3014  const VkMemoryRequirements* pVkMemoryRequirements,
3015  const VmaAllocationCreateInfo* pCreateInfo,
3016  VmaAllocation* pAllocation,
3017  VmaAllocationInfo* pAllocationInfo);
3018 
3038 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3039  VmaAllocator allocator,
3040  const VkMemoryRequirements* pVkMemoryRequirements,
3041  const VmaAllocationCreateInfo* pCreateInfo,
3042  size_t allocationCount,
3043  VmaAllocation* pAllocations,
3044  VmaAllocationInfo* pAllocationInfo);
3045 
3052 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3053  VmaAllocator allocator,
3054  VkBuffer buffer,
3055  const VmaAllocationCreateInfo* pCreateInfo,
3056  VmaAllocation* pAllocation,
3057  VmaAllocationInfo* pAllocationInfo);
3058 
3060 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3061  VmaAllocator allocator,
3062  VkImage image,
3063  const VmaAllocationCreateInfo* pCreateInfo,
3064  VmaAllocation* pAllocation,
3065  VmaAllocationInfo* pAllocationInfo);
3066 
3071 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3072  VmaAllocator allocator,
3073  VmaAllocation allocation);
3074 
3085 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3086  VmaAllocator allocator,
3087  size_t allocationCount,
3088  VmaAllocation* pAllocations);
3089 
3097 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3098  VmaAllocator allocator,
3099  VmaAllocation allocation,
3100  VkDeviceSize newSize);
3101 
3118 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3119  VmaAllocator allocator,
3120  VmaAllocation allocation,
3121  VmaAllocationInfo* pAllocationInfo);
3122 
3137 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3138  VmaAllocator allocator,
3139  VmaAllocation allocation);
3140 
3154 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3155  VmaAllocator allocator,
3156  VmaAllocation allocation,
3157  void* pUserData);
3158 
3169 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3170  VmaAllocator allocator,
3171  VmaAllocation* pAllocation);
3172 
3211 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3212  VmaAllocator allocator,
3213  VmaAllocation allocation,
3214  void** ppData);
3215 
3224 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3225  VmaAllocator allocator,
3226  VmaAllocation allocation);
3227 
3246 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3247 
3266 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3267 
3284 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3285 
3292 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3293 
3294 typedef enum VmaDefragmentationFlagBits {
3299 typedef VkFlags VmaDefragmentationFlags;
3300 
3305 typedef struct VmaDefragmentationInfo2 {
3329  uint32_t poolCount;
3350  VkDeviceSize maxCpuBytesToMove;
3360  VkDeviceSize maxGpuBytesToMove;
3374  VkCommandBuffer commandBuffer;
3376 
3379  VkDeviceMemory memory;
3380  VkDeviceSize offset;
3382 
3388  uint32_t moveCount;
3391 
3396 typedef struct VmaDefragmentationInfo {
3401  VkDeviceSize maxBytesToMove;
3408 
3410 typedef struct VmaDefragmentationStats {
3412  VkDeviceSize bytesMoved;
3414  VkDeviceSize bytesFreed;
3420 
3450 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3451  VmaAllocator allocator,
3452  const VmaDefragmentationInfo2* pInfo,
3453  VmaDefragmentationStats* pStats,
3454  VmaDefragmentationContext *pContext);
3455 
3461 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3462  VmaAllocator allocator,
3463  VmaDefragmentationContext context);
3464 
3465 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3466  VmaAllocator allocator,
3467  VmaDefragmentationContext context,
3469 );
3470 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3471  VmaAllocator allocator,
3473 );
3474 
3515 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3516  VmaAllocator allocator,
3517  VmaAllocation* pAllocations,
3518  size_t allocationCount,
3519  VkBool32* pAllocationsChanged,
3520  const VmaDefragmentationInfo *pDefragmentationInfo,
3521  VmaDefragmentationStats* pDefragmentationStats);
3522 
3535 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3536  VmaAllocator allocator,
3537  VmaAllocation allocation,
3538  VkBuffer buffer);
3539 
3550 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3551  VmaAllocator allocator,
3552  VmaAllocation allocation,
3553  VkDeviceSize allocationLocalOffset,
3554  VkBuffer buffer,
3555  const void* pNext);
3556 
3569 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3570  VmaAllocator allocator,
3571  VmaAllocation allocation,
3572  VkImage image);
3573 
3584 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3585  VmaAllocator allocator,
3586  VmaAllocation allocation,
3587  VkDeviceSize allocationLocalOffset,
3588  VkImage image,
3589  const void* pNext);
3590 
3617 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3618  VmaAllocator allocator,
3619  const VkBufferCreateInfo* pBufferCreateInfo,
3620  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3621  VkBuffer* pBuffer,
3622  VmaAllocation* pAllocation,
3623  VmaAllocationInfo* pAllocationInfo);
3624 
3636 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3637  VmaAllocator allocator,
3638  VkBuffer buffer,
3639  VmaAllocation allocation);
3640 
3642 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3643  VmaAllocator allocator,
3644  const VkImageCreateInfo* pImageCreateInfo,
3645  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3646  VkImage* pImage,
3647  VmaAllocation* pAllocation,
3648  VmaAllocationInfo* pAllocationInfo);
3649 
3661 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3662  VmaAllocator allocator,
3663  VkImage image,
3664  VmaAllocation allocation);
3665 
3666 #ifdef __cplusplus
3667 }
3668 #endif
3669 
3670 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3671 
3672 // For Visual Studio IntelliSense.
3673 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3674 #define VMA_IMPLEMENTATION
3675 #endif
3676 
3677 #ifdef VMA_IMPLEMENTATION
3678 #undef VMA_IMPLEMENTATION
3679 
3680 #include <cstdint>
3681 #include <cstdlib>
3682 #include <cstring>
3683 #include <utility>
3684 
3685 /*******************************************************************************
3686 CONFIGURATION SECTION
3687 
3688 Define some of these macros before each #include of this header or change them
3689 here if you need other then default behavior depending on your environment.
3690 */
3691 
3692 /*
3693 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3694 internally, like:
3695 
3696  vulkanFunctions. = &vkAllocateMemory;
3697 
3698 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3699 VmaAllocatorCreateInfo::pVulkanFunctions.
3700 */
3701 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3702 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3703 #endif
3704 
3705 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3706 //#define VMA_USE_STL_CONTAINERS 1
3707 
3708 /* Set this macro to 1 to make the library including and using STL containers:
3709 std::pair, std::vector, std::list, std::unordered_map.
3710 
3711 Set it to 0 or undefined to make the library using its own implementation of
3712 the containers.
3713 */
3714 #if VMA_USE_STL_CONTAINERS
3715  #define VMA_USE_STL_VECTOR 1
3716  #define VMA_USE_STL_UNORDERED_MAP 1
3717  #define VMA_USE_STL_LIST 1
3718 #endif
3719 
3720 #ifndef VMA_USE_STL_SHARED_MUTEX
3721  // Compiler conforms to C++17.
3722  #if __cplusplus >= 201703L
3723  #define VMA_USE_STL_SHARED_MUTEX 1
3724  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3725  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3726  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3727  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3728  #define VMA_USE_STL_SHARED_MUTEX 1
3729  #else
3730  #define VMA_USE_STL_SHARED_MUTEX 0
3731  #endif
3732 #endif
3733 
3734 /*
3735 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3736 Library has its own container implementation.
3737 */
3738 #if VMA_USE_STL_VECTOR
3739  #include <vector>
3740 #endif
3741 
3742 #if VMA_USE_STL_UNORDERED_MAP
3743  #include <unordered_map>
3744 #endif
3745 
3746 #if VMA_USE_STL_LIST
3747  #include <list>
3748 #endif
3749 
3750 /*
3751 Following headers are used in this CONFIGURATION section only, so feel free to
3752 remove them if not needed.
3753 */
3754 #include <cassert> // for assert
3755 #include <algorithm> // for min, max
3756 #include <mutex>
3757 
3758 #ifndef VMA_NULL
3759  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3760  #define VMA_NULL nullptr
3761 #endif
3762 
3763 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3764 #include <cstdlib>
3765 void *aligned_alloc(size_t alignment, size_t size)
3766 {
3767  // alignment must be >= sizeof(void*)
3768  if(alignment < sizeof(void*))
3769  {
3770  alignment = sizeof(void*);
3771  }
3772 
3773  return memalign(alignment, size);
3774 }
3775 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3776 #include <cstdlib>
3777 void *aligned_alloc(size_t alignment, size_t size)
3778 {
3779  // alignment must be >= sizeof(void*)
3780  if(alignment < sizeof(void*))
3781  {
3782  alignment = sizeof(void*);
3783  }
3784 
3785  void *pointer;
3786  if(posix_memalign(&pointer, alignment, size) == 0)
3787  return pointer;
3788  return VMA_NULL;
3789 }
3790 #endif
3791 
3792 // If your compiler is not compatible with C++11 and definition of
3793 // aligned_alloc() function is missing, uncommeting following line may help:
3794 
3795 //#include <malloc.h>
3796 
3797 // Normal assert to check for programmer's errors, especially in Debug configuration.
3798 #ifndef VMA_ASSERT
3799  #ifdef NDEBUG
3800  #define VMA_ASSERT(expr)
3801  #else
3802  #define VMA_ASSERT(expr) assert(expr)
3803  #endif
3804 #endif
3805 
3806 // Assert that will be called very often, like inside data structures e.g. operator[].
3807 // Making it non-empty can make program slow.
3808 #ifndef VMA_HEAVY_ASSERT
3809  #ifdef NDEBUG
3810  #define VMA_HEAVY_ASSERT(expr)
3811  #else
3812  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3813  #endif
3814 #endif
3815 
3816 #ifndef VMA_ALIGN_OF
3817  #define VMA_ALIGN_OF(type) (__alignof(type))
3818 #endif
3819 
3820 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3821  #if defined(_WIN32)
3822  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3823  #else
3824  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3825  #endif
3826 #endif
3827 
3828 #ifndef VMA_SYSTEM_FREE
3829  #if defined(_WIN32)
3830  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3831  #else
3832  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3833  #endif
3834 #endif
3835 
3836 #ifndef VMA_MIN
3837  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3838 #endif
3839 
3840 #ifndef VMA_MAX
3841  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3842 #endif
3843 
3844 #ifndef VMA_SWAP
3845  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3846 #endif
3847 
3848 #ifndef VMA_SORT
3849  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3850 #endif
3851 
3852 #ifndef VMA_DEBUG_LOG
3853  #define VMA_DEBUG_LOG(format, ...)
3854  /*
3855  #define VMA_DEBUG_LOG(format, ...) do { \
3856  printf(format, __VA_ARGS__); \
3857  printf("\n"); \
3858  } while(false)
3859  */
3860 #endif
3861 
3862 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3863 #if VMA_STATS_STRING_ENABLED
3864  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3865  {
3866  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3867  }
3868  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3869  {
3870  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3871  }
3872  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3873  {
3874  snprintf(outStr, strLen, "%p", ptr);
3875  }
3876 #endif
3877 
3878 #ifndef VMA_MUTEX
3879  class VmaMutex
3880  {
3881  public:
3882  void Lock() { m_Mutex.lock(); }
3883  void Unlock() { m_Mutex.unlock(); }
3884  bool TryLock() { return m_Mutex.try_lock(); }
3885  private:
3886  std::mutex m_Mutex;
3887  };
3888  #define VMA_MUTEX VmaMutex
3889 #endif
3890 
3891 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3892 #ifndef VMA_RW_MUTEX
3893  #if VMA_USE_STL_SHARED_MUTEX
3894  // Use std::shared_mutex from C++17.
3895  #include <shared_mutex>
3896  class VmaRWMutex
3897  {
3898  public:
3899  void LockRead() { m_Mutex.lock_shared(); }
3900  void UnlockRead() { m_Mutex.unlock_shared(); }
3901  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
3902  void LockWrite() { m_Mutex.lock(); }
3903  void UnlockWrite() { m_Mutex.unlock(); }
3904  bool TryLockWrite() { return m_Mutex.try_lock(); }
3905  private:
3906  std::shared_mutex m_Mutex;
3907  };
3908  #define VMA_RW_MUTEX VmaRWMutex
3909  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3910  // Use SRWLOCK from WinAPI.
3911  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3912  class VmaRWMutex
3913  {
3914  public:
3915  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3916  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3917  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3918  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
3919  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3920  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3921  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
3922  private:
3923  SRWLOCK m_Lock;
3924  };
3925  #define VMA_RW_MUTEX VmaRWMutex
3926  #else
3927  // Less efficient fallback: Use normal mutex.
3928  class VmaRWMutex
3929  {
3930  public:
3931  void LockRead() { m_Mutex.Lock(); }
3932  void UnlockRead() { m_Mutex.Unlock(); }
3933  bool TryLockRead() { return m_Mutex.TryLock(); }
3934  void LockWrite() { m_Mutex.Lock(); }
3935  void UnlockWrite() { m_Mutex.Unlock(); }
3936  bool TryLockWrite() { return m_Mutex.TryLock(); }
3937  private:
3938  VMA_MUTEX m_Mutex;
3939  };
3940  #define VMA_RW_MUTEX VmaRWMutex
3941  #endif // #if VMA_USE_STL_SHARED_MUTEX
3942 #endif // #ifndef VMA_RW_MUTEX
3943 
3944 /*
3945 If providing your own implementation, you need to implement a subset of std::atomic.
3946 */
3947 #ifndef VMA_ATOMIC_UINT32
3948  #include <atomic>
3949  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3950 #endif
3951 
3952 #ifndef VMA_ATOMIC_UINT64
3953  #include <atomic>
3954  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3955 #endif
3956 
3957 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3958 
3962  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3963 #endif
3964 
3965 #ifndef VMA_DEBUG_ALIGNMENT
3966 
3970  #define VMA_DEBUG_ALIGNMENT (1)
3971 #endif
3972 
3973 #ifndef VMA_DEBUG_MARGIN
3974 
3978  #define VMA_DEBUG_MARGIN (0)
3979 #endif
3980 
3981 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3982 
3986  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3987 #endif
3988 
3989 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3990 
3995  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3996 #endif
3997 
3998 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3999 
4003  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4004 #endif
4005 
4006 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4007 
4011  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4012 #endif
4013 
4014 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4015  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4017 #endif
4018 
4019 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4020  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4022 #endif
4023 
4024 #ifndef VMA_CLASS_NO_COPY
4025  #define VMA_CLASS_NO_COPY(className) \
4026  private: \
4027  className(const className&) = delete; \
4028  className& operator=(const className&) = delete;
4029 #endif
4030 
4031 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4032 
4033 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4034 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4035 
4036 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4037 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4038 
4039 /*******************************************************************************
4040 END OF CONFIGURATION
4041 */
4042 
4043 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4044 
4045 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4046 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4047 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4048 
4049 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4050 
4051 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4052  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4053 
4054 // Returns number of bits set to 1 in (v).
4055 static inline uint32_t VmaCountBitsSet(uint32_t v)
4056 {
4057  uint32_t c = v - ((v >> 1) & 0x55555555);
4058  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4059  c = ((c >> 4) + c) & 0x0F0F0F0F;
4060  c = ((c >> 8) + c) & 0x00FF00FF;
4061  c = ((c >> 16) + c) & 0x0000FFFF;
4062  return c;
4063 }
4064 
4065 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4066 // Use types like uint32_t, uint64_t as T.
4067 template <typename T>
4068 static inline T VmaAlignUp(T val, T align)
4069 {
4070  return (val + align - 1) / align * align;
4071 }
4072 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4073 // Use types like uint32_t, uint64_t as T.
4074 template <typename T>
4075 static inline T VmaAlignDown(T val, T align)
4076 {
4077  return val / align * align;
4078 }
4079 
4080 // Division with mathematical rounding to nearest number.
4081 template <typename T>
4082 static inline T VmaRoundDiv(T x, T y)
4083 {
4084  return (x + (y / (T)2)) / y;
4085 }
4086 
4087 /*
4088 Returns true if given number is a power of two.
4089 T must be unsigned integer number or signed integer but always nonnegative.
4090 For 0 returns true.
4091 */
4092 template <typename T>
4093 inline bool VmaIsPow2(T x)
4094 {
4095  return (x & (x-1)) == 0;
4096 }
4097 
4098 // Returns smallest power of 2 greater or equal to v.
4099 static inline uint32_t VmaNextPow2(uint32_t v)
4100 {
4101  v--;
4102  v |= v >> 1;
4103  v |= v >> 2;
4104  v |= v >> 4;
4105  v |= v >> 8;
4106  v |= v >> 16;
4107  v++;
4108  return v;
4109 }
4110 static inline uint64_t VmaNextPow2(uint64_t v)
4111 {
4112  v--;
4113  v |= v >> 1;
4114  v |= v >> 2;
4115  v |= v >> 4;
4116  v |= v >> 8;
4117  v |= v >> 16;
4118  v |= v >> 32;
4119  v++;
4120  return v;
4121 }
4122 
4123 // Returns largest power of 2 less or equal to v.
4124 static inline uint32_t VmaPrevPow2(uint32_t v)
4125 {
4126  v |= v >> 1;
4127  v |= v >> 2;
4128  v |= v >> 4;
4129  v |= v >> 8;
4130  v |= v >> 16;
4131  v = v ^ (v >> 1);
4132  return v;
4133 }
4134 static inline uint64_t VmaPrevPow2(uint64_t v)
4135 {
4136  v |= v >> 1;
4137  v |= v >> 2;
4138  v |= v >> 4;
4139  v |= v >> 8;
4140  v |= v >> 16;
4141  v |= v >> 32;
4142  v = v ^ (v >> 1);
4143  return v;
4144 }
4145 
4146 static inline bool VmaStrIsEmpty(const char* pStr)
4147 {
4148  return pStr == VMA_NULL || *pStr == '\0';
4149 }
4150 
4151 #if VMA_STATS_STRING_ENABLED
4152 
4153 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4154 {
4155  switch(algorithm)
4156  {
4158  return "Linear";
4160  return "Buddy";
4161  case 0:
4162  return "Default";
4163  default:
4164  VMA_ASSERT(0);
4165  return "";
4166  }
4167 }
4168 
4169 #endif // #if VMA_STATS_STRING_ENABLED
4170 
4171 #ifndef VMA_SORT
4172 
4173 template<typename Iterator, typename Compare>
4174 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4175 {
4176  Iterator centerValue = end; --centerValue;
4177  Iterator insertIndex = beg;
4178  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4179  {
4180  if(cmp(*memTypeIndex, *centerValue))
4181  {
4182  if(insertIndex != memTypeIndex)
4183  {
4184  VMA_SWAP(*memTypeIndex, *insertIndex);
4185  }
4186  ++insertIndex;
4187  }
4188  }
4189  if(insertIndex != centerValue)
4190  {
4191  VMA_SWAP(*insertIndex, *centerValue);
4192  }
4193  return insertIndex;
4194 }
4195 
4196 template<typename Iterator, typename Compare>
4197 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4198 {
4199  if(beg < end)
4200  {
4201  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4202  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4203  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4204  }
4205 }
4206 
4207 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4208 
4209 #endif // #ifndef VMA_SORT
4210 
4211 /*
4212 Returns true if two memory blocks occupy overlapping pages.
4213 ResourceA must be in less memory offset than ResourceB.
4214 
4215 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4216 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4217 */
4218 static inline bool VmaBlocksOnSamePage(
4219  VkDeviceSize resourceAOffset,
4220  VkDeviceSize resourceASize,
4221  VkDeviceSize resourceBOffset,
4222  VkDeviceSize pageSize)
4223 {
4224  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4225  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4226  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4227  VkDeviceSize resourceBStart = resourceBOffset;
4228  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4229  return resourceAEndPage == resourceBStartPage;
4230 }
4231 
4232 enum VmaSuballocationType
4233 {
4234  VMA_SUBALLOCATION_TYPE_FREE = 0,
4235  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4236  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4237  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4238  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4239  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4240  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4241 };
4242 
4243 /*
4244 Returns true if given suballocation types could conflict and must respect
4245 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4246 or linear image and another one is optimal image. If type is unknown, behave
4247 conservatively.
4248 */
4249 static inline bool VmaIsBufferImageGranularityConflict(
4250  VmaSuballocationType suballocType1,
4251  VmaSuballocationType suballocType2)
4252 {
4253  if(suballocType1 > suballocType2)
4254  {
4255  VMA_SWAP(suballocType1, suballocType2);
4256  }
4257 
4258  switch(suballocType1)
4259  {
4260  case VMA_SUBALLOCATION_TYPE_FREE:
4261  return false;
4262  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4263  return true;
4264  case VMA_SUBALLOCATION_TYPE_BUFFER:
4265  return
4266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4268  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4269  return
4270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4272  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4273  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4274  return
4275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4276  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4277  return false;
4278  default:
4279  VMA_ASSERT(0);
4280  return true;
4281  }
4282 }
4283 
4284 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4285 {
4286 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4287  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4288  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4289  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4290  {
4291  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4292  }
4293 #else
4294  // no-op
4295 #endif
4296 }
4297 
4298 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4299 {
4300 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4301  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4302  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4303  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4304  {
4305  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4306  {
4307  return false;
4308  }
4309  }
4310 #endif
4311  return true;
4312 }
4313 
4314 /*
4315 Fills structure with parameters of an example buffer to be used for transfers
4316 during GPU memory defragmentation.
4317 */
4318 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4319 {
4320  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4321  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4322  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4323  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4324 }
4325 
4326 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4327 struct VmaMutexLock
4328 {
4329  VMA_CLASS_NO_COPY(VmaMutexLock)
4330 public:
4331  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4332  m_pMutex(useMutex ? &mutex : VMA_NULL)
4333  { if(m_pMutex) { m_pMutex->Lock(); } }
4334  ~VmaMutexLock()
4335  { if(m_pMutex) { m_pMutex->Unlock(); } }
4336 private:
4337  VMA_MUTEX* m_pMutex;
4338 };
4339 
4340 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4341 struct VmaMutexLockRead
4342 {
4343  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4344 public:
4345  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4346  m_pMutex(useMutex ? &mutex : VMA_NULL)
4347  { if(m_pMutex) { m_pMutex->LockRead(); } }
4348  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4349 private:
4350  VMA_RW_MUTEX* m_pMutex;
4351 };
4352 
4353 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4354 struct VmaMutexLockWrite
4355 {
4356  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4357 public:
4358  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4359  m_pMutex(useMutex ? &mutex : VMA_NULL)
4360  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4361  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4362 private:
4363  VMA_RW_MUTEX* m_pMutex;
4364 };
4365 
4366 #if VMA_DEBUG_GLOBAL_MUTEX
4367  static VMA_MUTEX gDebugGlobalMutex;
4368  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4369 #else
4370  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4371 #endif
4372 
4373 // Minimum size of a free suballocation to register it in the free suballocation collection.
4374 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4375 
4376 /*
4377 Performs binary search and returns iterator to first element that is greater or
4378 equal to (key), according to comparison (cmp).
4379 
4380 Cmp should return true if first argument is less than second argument.
4381 
4382 Returned value is the found element, if present in the collection or place where
4383 new element with value (key) should be inserted.
4384 */
4385 template <typename CmpLess, typename IterT, typename KeyT>
4386 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4387 {
4388  size_t down = 0, up = (end - beg);
4389  while(down < up)
4390  {
4391  const size_t mid = (down + up) / 2;
4392  if(cmp(*(beg+mid), key))
4393  {
4394  down = mid + 1;
4395  }
4396  else
4397  {
4398  up = mid;
4399  }
4400  }
4401  return beg + down;
4402 }
4403 
4404 template<typename CmpLess, typename IterT, typename KeyT>
4405 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4406 {
4407  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4408  beg, end, value, cmp);
4409  if(it == end ||
4410  (!cmp(*it, value) && !cmp(value, *it)))
4411  {
4412  return it;
4413  }
4414  return end;
4415 }
4416 
4417 /*
4418 Returns true if all pointers in the array are not-null and unique.
4419 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4420 T must be pointer type, e.g. VmaAllocation, VmaPool.
4421 */
4422 template<typename T>
4423 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4424 {
4425  for(uint32_t i = 0; i < count; ++i)
4426  {
4427  const T iPtr = arr[i];
4428  if(iPtr == VMA_NULL)
4429  {
4430  return false;
4431  }
4432  for(uint32_t j = i + 1; j < count; ++j)
4433  {
4434  if(iPtr == arr[j])
4435  {
4436  return false;
4437  }
4438  }
4439  }
4440  return true;
4441 }
4442 
4443 template<typename MainT, typename NewT>
4444 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4445 {
4446  newStruct->pNext = mainStruct->pNext;
4447  mainStruct->pNext = newStruct;
4448 }
4449 
4451 // Memory allocation
4452 
4453 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4454 {
4455  if((pAllocationCallbacks != VMA_NULL) &&
4456  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4457  {
4458  return (*pAllocationCallbacks->pfnAllocation)(
4459  pAllocationCallbacks->pUserData,
4460  size,
4461  alignment,
4462  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4463  }
4464  else
4465  {
4466  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4467  }
4468 }
4469 
4470 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4471 {
4472  if((pAllocationCallbacks != VMA_NULL) &&
4473  (pAllocationCallbacks->pfnFree != VMA_NULL))
4474  {
4475  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4476  }
4477  else
4478  {
4479  VMA_SYSTEM_FREE(ptr);
4480  }
4481 }
4482 
4483 template<typename T>
4484 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4485 {
4486  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4487 }
4488 
4489 template<typename T>
4490 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4491 {
4492  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4493 }
4494 
4495 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4496 
4497 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4498 
4499 template<typename T>
4500 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4501 {
4502  ptr->~T();
4503  VmaFree(pAllocationCallbacks, ptr);
4504 }
4505 
4506 template<typename T>
4507 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4508 {
4509  if(ptr != VMA_NULL)
4510  {
4511  for(size_t i = count; i--; )
4512  {
4513  ptr[i].~T();
4514  }
4515  VmaFree(pAllocationCallbacks, ptr);
4516  }
4517 }
4518 
4519 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4520 {
4521  if(srcStr != VMA_NULL)
4522  {
4523  const size_t len = strlen(srcStr);
4524  char* const result = vma_new_array(allocs, char, len + 1);
4525  memcpy(result, srcStr, len + 1);
4526  return result;
4527  }
4528  else
4529  {
4530  return VMA_NULL;
4531  }
4532 }
4533 
4534 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4535 {
4536  if(str != VMA_NULL)
4537  {
4538  const size_t len = strlen(str);
4539  vma_delete_array(allocs, str, len + 1);
4540  }
4541 }
4542 
4543 // STL-compatible allocator.
4544 template<typename T>
4545 class VmaStlAllocator
4546 {
4547 public:
4548  const VkAllocationCallbacks* const m_pCallbacks;
4549  typedef T value_type;
4550 
4551  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4552  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4553 
4554  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4555  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4556 
4557  template<typename U>
4558  bool operator==(const VmaStlAllocator<U>& rhs) const
4559  {
4560  return m_pCallbacks == rhs.m_pCallbacks;
4561  }
4562  template<typename U>
4563  bool operator!=(const VmaStlAllocator<U>& rhs) const
4564  {
4565  return m_pCallbacks != rhs.m_pCallbacks;
4566  }
4567 
4568  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4569 };
4570 
4571 #if VMA_USE_STL_VECTOR
4572 
4573 #define VmaVector std::vector
4574 
4575 template<typename T, typename allocatorT>
4576 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4577 {
4578  vec.insert(vec.begin() + index, item);
4579 }
4580 
4581 template<typename T, typename allocatorT>
4582 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4583 {
4584  vec.erase(vec.begin() + index);
4585 }
4586 
4587 #else // #if VMA_USE_STL_VECTOR
4588 
4589 /* Class with interface compatible with subset of std::vector.
4590 T must be POD because constructors and destructors are not called and memcpy is
4591 used for these objects. */
4592 template<typename T, typename AllocatorT>
4593 class VmaVector
4594 {
4595 public:
4596  typedef T value_type;
4597 
4598  VmaVector(const AllocatorT& allocator) :
4599  m_Allocator(allocator),
4600  m_pArray(VMA_NULL),
4601  m_Count(0),
4602  m_Capacity(0)
4603  {
4604  }
4605 
4606  VmaVector(size_t count, const AllocatorT& allocator) :
4607  m_Allocator(allocator),
4608  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4609  m_Count(count),
4610  m_Capacity(count)
4611  {
4612  }
4613 
4614  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4615  // value is unused.
4616  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4617  : VmaVector(count, allocator) {}
4618 
4619  VmaVector(const VmaVector<T, AllocatorT>& src) :
4620  m_Allocator(src.m_Allocator),
4621  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4622  m_Count(src.m_Count),
4623  m_Capacity(src.m_Count)
4624  {
4625  if(m_Count != 0)
4626  {
4627  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4628  }
4629  }
4630 
4631  ~VmaVector()
4632  {
4633  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4634  }
4635 
4636  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4637  {
4638  if(&rhs != this)
4639  {
4640  resize(rhs.m_Count);
4641  if(m_Count != 0)
4642  {
4643  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4644  }
4645  }
4646  return *this;
4647  }
4648 
4649  bool empty() const { return m_Count == 0; }
4650  size_t size() const { return m_Count; }
4651  T* data() { return m_pArray; }
4652  const T* data() const { return m_pArray; }
4653 
4654  T& operator[](size_t index)
4655  {
4656  VMA_HEAVY_ASSERT(index < m_Count);
4657  return m_pArray[index];
4658  }
4659  const T& operator[](size_t index) const
4660  {
4661  VMA_HEAVY_ASSERT(index < m_Count);
4662  return m_pArray[index];
4663  }
4664 
4665  T& front()
4666  {
4667  VMA_HEAVY_ASSERT(m_Count > 0);
4668  return m_pArray[0];
4669  }
4670  const T& front() const
4671  {
4672  VMA_HEAVY_ASSERT(m_Count > 0);
4673  return m_pArray[0];
4674  }
4675  T& back()
4676  {
4677  VMA_HEAVY_ASSERT(m_Count > 0);
4678  return m_pArray[m_Count - 1];
4679  }
4680  const T& back() const
4681  {
4682  VMA_HEAVY_ASSERT(m_Count > 0);
4683  return m_pArray[m_Count - 1];
4684  }
4685 
4686  void reserve(size_t newCapacity, bool freeMemory = false)
4687  {
4688  newCapacity = VMA_MAX(newCapacity, m_Count);
4689 
4690  if((newCapacity < m_Capacity) && !freeMemory)
4691  {
4692  newCapacity = m_Capacity;
4693  }
4694 
4695  if(newCapacity != m_Capacity)
4696  {
4697  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4698  if(m_Count != 0)
4699  {
4700  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4701  }
4702  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4703  m_Capacity = newCapacity;
4704  m_pArray = newArray;
4705  }
4706  }
4707 
4708  void resize(size_t newCount, bool freeMemory = false)
4709  {
4710  size_t newCapacity = m_Capacity;
4711  if(newCount > m_Capacity)
4712  {
4713  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4714  }
4715  else if(freeMemory)
4716  {
4717  newCapacity = newCount;
4718  }
4719 
4720  if(newCapacity != m_Capacity)
4721  {
4722  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4723  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4724  if(elementsToCopy != 0)
4725  {
4726  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4727  }
4728  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4729  m_Capacity = newCapacity;
4730  m_pArray = newArray;
4731  }
4732 
4733  m_Count = newCount;
4734  }
4735 
4736  void clear(bool freeMemory = false)
4737  {
4738  resize(0, freeMemory);
4739  }
4740 
4741  void insert(size_t index, const T& src)
4742  {
4743  VMA_HEAVY_ASSERT(index <= m_Count);
4744  const size_t oldCount = size();
4745  resize(oldCount + 1);
4746  if(index < oldCount)
4747  {
4748  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4749  }
4750  m_pArray[index] = src;
4751  }
4752 
4753  void remove(size_t index)
4754  {
4755  VMA_HEAVY_ASSERT(index < m_Count);
4756  const size_t oldCount = size();
4757  if(index < oldCount - 1)
4758  {
4759  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4760  }
4761  resize(oldCount - 1);
4762  }
4763 
4764  void push_back(const T& src)
4765  {
4766  const size_t newIndex = size();
4767  resize(newIndex + 1);
4768  m_pArray[newIndex] = src;
4769  }
4770 
4771  void pop_back()
4772  {
4773  VMA_HEAVY_ASSERT(m_Count > 0);
4774  resize(size() - 1);
4775  }
4776 
4777  void push_front(const T& src)
4778  {
4779  insert(0, src);
4780  }
4781 
4782  void pop_front()
4783  {
4784  VMA_HEAVY_ASSERT(m_Count > 0);
4785  remove(0);
4786  }
4787 
4788  typedef T* iterator;
4789 
4790  iterator begin() { return m_pArray; }
4791  iterator end() { return m_pArray + m_Count; }
4792 
4793 private:
4794  AllocatorT m_Allocator;
4795  T* m_pArray;
4796  size_t m_Count;
4797  size_t m_Capacity;
4798 };
4799 
4800 template<typename T, typename allocatorT>
4801 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4802 {
4803  vec.insert(index, item);
4804 }
4805 
4806 template<typename T, typename allocatorT>
4807 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4808 {
4809  vec.remove(index);
4810 }
4811 
4812 #endif // #if VMA_USE_STL_VECTOR
4813 
4814 template<typename CmpLess, typename VectorT>
4815 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4816 {
4817  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4818  vector.data(),
4819  vector.data() + vector.size(),
4820  value,
4821  CmpLess()) - vector.data();
4822  VmaVectorInsert(vector, indexToInsert, value);
4823  return indexToInsert;
4824 }
4825 
4826 template<typename CmpLess, typename VectorT>
4827 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4828 {
4829  CmpLess comparator;
4830  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4831  vector.begin(),
4832  vector.end(),
4833  value,
4834  comparator);
4835  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4836  {
4837  size_t indexToRemove = it - vector.begin();
4838  VmaVectorRemove(vector, indexToRemove);
4839  return true;
4840  }
4841  return false;
4842 }
4843 
4845 // class VmaPoolAllocator
4846 
4847 /*
4848 Allocator for objects of type T using a list of arrays (pools) to speed up
4849 allocation. Number of elements that can be allocated is not bounded because
4850 allocator can create multiple blocks.
4851 */
4852 template<typename T>
4853 class VmaPoolAllocator
4854 {
4855  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4856 public:
4857  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4858  ~VmaPoolAllocator();
4859  template<typename... Types> T* Alloc(Types... args);
4860  void Free(T* ptr);
4861 
4862 private:
4863  union Item
4864  {
4865  uint32_t NextFreeIndex;
4866  alignas(T) char Value[sizeof(T)];
4867  };
4868 
4869  struct ItemBlock
4870  {
4871  Item* pItems;
4872  uint32_t Capacity;
4873  uint32_t FirstFreeIndex;
4874  };
4875 
4876  const VkAllocationCallbacks* m_pAllocationCallbacks;
4877  const uint32_t m_FirstBlockCapacity;
4878  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4879 
4880  ItemBlock& CreateNewBlock();
4881 };
4882 
4883 template<typename T>
4884 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4885  m_pAllocationCallbacks(pAllocationCallbacks),
4886  m_FirstBlockCapacity(firstBlockCapacity),
4887  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4888 {
4889  VMA_ASSERT(m_FirstBlockCapacity > 1);
4890 }
4891 
4892 template<typename T>
4893 VmaPoolAllocator<T>::~VmaPoolAllocator()
4894 {
4895  for(size_t i = m_ItemBlocks.size(); i--; )
4896  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4897  m_ItemBlocks.clear();
4898 }
4899 
4900 template<typename T>
4901 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
4902 {
4903  for(size_t i = m_ItemBlocks.size(); i--; )
4904  {
4905  ItemBlock& block = m_ItemBlocks[i];
4906  // This block has some free items: Use first one.
4907  if(block.FirstFreeIndex != UINT32_MAX)
4908  {
4909  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4910  block.FirstFreeIndex = pItem->NextFreeIndex;
4911  T* result = (T*)&pItem->Value;
4912  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4913  return result;
4914  }
4915  }
4916 
4917  // No block has free item: Create new one and use it.
4918  ItemBlock& newBlock = CreateNewBlock();
4919  Item* const pItem = &newBlock.pItems[0];
4920  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4921  T* result = (T*)&pItem->Value;
4922  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4923  return result;
4924 }
4925 
4926 template<typename T>
4927 void VmaPoolAllocator<T>::Free(T* ptr)
4928 {
4929  // Search all memory blocks to find ptr.
4930  for(size_t i = m_ItemBlocks.size(); i--; )
4931  {
4932  ItemBlock& block = m_ItemBlocks[i];
4933 
4934  // Casting to union.
4935  Item* pItemPtr;
4936  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4937 
4938  // Check if pItemPtr is in address range of this block.
4939  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4940  {
4941  ptr->~T(); // Explicit destructor call.
4942  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4943  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4944  block.FirstFreeIndex = index;
4945  return;
4946  }
4947  }
4948  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4949 }
4950 
4951 template<typename T>
4952 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4953 {
4954  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4955  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4956 
4957  const ItemBlock newBlock = {
4958  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4959  newBlockCapacity,
4960  0 };
4961 
4962  m_ItemBlocks.push_back(newBlock);
4963 
4964  // Setup singly-linked list of all free items in this block.
4965  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4966  newBlock.pItems[i].NextFreeIndex = i + 1;
4967  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4968  return m_ItemBlocks.back();
4969 }
4970 
4972 // class VmaRawList, VmaList
4973 
4974 #if VMA_USE_STL_LIST
4975 
4976 #define VmaList std::list
4977 
4978 #else // #if VMA_USE_STL_LIST
4979 
4980 template<typename T>
4981 struct VmaListItem
4982 {
4983  VmaListItem* pPrev;
4984  VmaListItem* pNext;
4985  T Value;
4986 };
4987 
4988 // Doubly linked list.
4989 template<typename T>
4990 class VmaRawList
4991 {
4992  VMA_CLASS_NO_COPY(VmaRawList)
4993 public:
4994  typedef VmaListItem<T> ItemType;
4995 
4996  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4997  ~VmaRawList();
4998  void Clear();
4999 
5000  size_t GetCount() const { return m_Count; }
5001  bool IsEmpty() const { return m_Count == 0; }
5002 
5003  ItemType* Front() { return m_pFront; }
5004  const ItemType* Front() const { return m_pFront; }
5005  ItemType* Back() { return m_pBack; }
5006  const ItemType* Back() const { return m_pBack; }
5007 
5008  ItemType* PushBack();
5009  ItemType* PushFront();
5010  ItemType* PushBack(const T& value);
5011  ItemType* PushFront(const T& value);
5012  void PopBack();
5013  void PopFront();
5014 
5015  // Item can be null - it means PushBack.
5016  ItemType* InsertBefore(ItemType* pItem);
5017  // Item can be null - it means PushFront.
5018  ItemType* InsertAfter(ItemType* pItem);
5019 
5020  ItemType* InsertBefore(ItemType* pItem, const T& value);
5021  ItemType* InsertAfter(ItemType* pItem, const T& value);
5022 
5023  void Remove(ItemType* pItem);
5024 
5025 private:
5026  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5027  VmaPoolAllocator<ItemType> m_ItemAllocator;
5028  ItemType* m_pFront;
5029  ItemType* m_pBack;
5030  size_t m_Count;
5031 };
5032 
5033 template<typename T>
5034 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5035  m_pAllocationCallbacks(pAllocationCallbacks),
5036  m_ItemAllocator(pAllocationCallbacks, 128),
5037  m_pFront(VMA_NULL),
5038  m_pBack(VMA_NULL),
5039  m_Count(0)
5040 {
5041 }
5042 
5043 template<typename T>
5044 VmaRawList<T>::~VmaRawList()
5045 {
5046  // Intentionally not calling Clear, because that would be unnecessary
5047  // computations to return all items to m_ItemAllocator as free.
5048 }
5049 
5050 template<typename T>
5051 void VmaRawList<T>::Clear()
5052 {
5053  if(IsEmpty() == false)
5054  {
5055  ItemType* pItem = m_pBack;
5056  while(pItem != VMA_NULL)
5057  {
5058  ItemType* const pPrevItem = pItem->pPrev;
5059  m_ItemAllocator.Free(pItem);
5060  pItem = pPrevItem;
5061  }
5062  m_pFront = VMA_NULL;
5063  m_pBack = VMA_NULL;
5064  m_Count = 0;
5065  }
5066 }
5067 
5068 template<typename T>
5069 VmaListItem<T>* VmaRawList<T>::PushBack()
5070 {
5071  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5072  pNewItem->pNext = VMA_NULL;
5073  if(IsEmpty())
5074  {
5075  pNewItem->pPrev = VMA_NULL;
5076  m_pFront = pNewItem;
5077  m_pBack = pNewItem;
5078  m_Count = 1;
5079  }
5080  else
5081  {
5082  pNewItem->pPrev = m_pBack;
5083  m_pBack->pNext = pNewItem;
5084  m_pBack = pNewItem;
5085  ++m_Count;
5086  }
5087  return pNewItem;
5088 }
5089 
5090 template<typename T>
5091 VmaListItem<T>* VmaRawList<T>::PushFront()
5092 {
5093  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5094  pNewItem->pPrev = VMA_NULL;
5095  if(IsEmpty())
5096  {
5097  pNewItem->pNext = VMA_NULL;
5098  m_pFront = pNewItem;
5099  m_pBack = pNewItem;
5100  m_Count = 1;
5101  }
5102  else
5103  {
5104  pNewItem->pNext = m_pFront;
5105  m_pFront->pPrev = pNewItem;
5106  m_pFront = pNewItem;
5107  ++m_Count;
5108  }
5109  return pNewItem;
5110 }
5111 
5112 template<typename T>
5113 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5114 {
5115  ItemType* const pNewItem = PushBack();
5116  pNewItem->Value = value;
5117  return pNewItem;
5118 }
5119 
5120 template<typename T>
5121 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5122 {
5123  ItemType* const pNewItem = PushFront();
5124  pNewItem->Value = value;
5125  return pNewItem;
5126 }
5127 
5128 template<typename T>
5129 void VmaRawList<T>::PopBack()
5130 {
5131  VMA_HEAVY_ASSERT(m_Count > 0);
5132  ItemType* const pBackItem = m_pBack;
5133  ItemType* const pPrevItem = pBackItem->pPrev;
5134  if(pPrevItem != VMA_NULL)
5135  {
5136  pPrevItem->pNext = VMA_NULL;
5137  }
5138  m_pBack = pPrevItem;
5139  m_ItemAllocator.Free(pBackItem);
5140  --m_Count;
5141 }
5142 
5143 template<typename T>
5144 void VmaRawList<T>::PopFront()
5145 {
5146  VMA_HEAVY_ASSERT(m_Count > 0);
5147  ItemType* const pFrontItem = m_pFront;
5148  ItemType* const pNextItem = pFrontItem->pNext;
5149  if(pNextItem != VMA_NULL)
5150  {
5151  pNextItem->pPrev = VMA_NULL;
5152  }
5153  m_pFront = pNextItem;
5154  m_ItemAllocator.Free(pFrontItem);
5155  --m_Count;
5156 }
5157 
5158 template<typename T>
5159 void VmaRawList<T>::Remove(ItemType* pItem)
5160 {
5161  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5162  VMA_HEAVY_ASSERT(m_Count > 0);
5163 
5164  if(pItem->pPrev != VMA_NULL)
5165  {
5166  pItem->pPrev->pNext = pItem->pNext;
5167  }
5168  else
5169  {
5170  VMA_HEAVY_ASSERT(m_pFront == pItem);
5171  m_pFront = pItem->pNext;
5172  }
5173 
5174  if(pItem->pNext != VMA_NULL)
5175  {
5176  pItem->pNext->pPrev = pItem->pPrev;
5177  }
5178  else
5179  {
5180  VMA_HEAVY_ASSERT(m_pBack == pItem);
5181  m_pBack = pItem->pPrev;
5182  }
5183 
5184  m_ItemAllocator.Free(pItem);
5185  --m_Count;
5186 }
5187 
5188 template<typename T>
5189 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5190 {
5191  if(pItem != VMA_NULL)
5192  {
5193  ItemType* const prevItem = pItem->pPrev;
5194  ItemType* const newItem = m_ItemAllocator.Alloc();
5195  newItem->pPrev = prevItem;
5196  newItem->pNext = pItem;
5197  pItem->pPrev = newItem;
5198  if(prevItem != VMA_NULL)
5199  {
5200  prevItem->pNext = newItem;
5201  }
5202  else
5203  {
5204  VMA_HEAVY_ASSERT(m_pFront == pItem);
5205  m_pFront = newItem;
5206  }
5207  ++m_Count;
5208  return newItem;
5209  }
5210  else
5211  return PushBack();
5212 }
5213 
5214 template<typename T>
5215 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5216 {
5217  if(pItem != VMA_NULL)
5218  {
5219  ItemType* const nextItem = pItem->pNext;
5220  ItemType* const newItem = m_ItemAllocator.Alloc();
5221  newItem->pNext = nextItem;
5222  newItem->pPrev = pItem;
5223  pItem->pNext = newItem;
5224  if(nextItem != VMA_NULL)
5225  {
5226  nextItem->pPrev = newItem;
5227  }
5228  else
5229  {
5230  VMA_HEAVY_ASSERT(m_pBack == pItem);
5231  m_pBack = newItem;
5232  }
5233  ++m_Count;
5234  return newItem;
5235  }
5236  else
5237  return PushFront();
5238 }
5239 
5240 template<typename T>
5241 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5242 {
5243  ItemType* const newItem = InsertBefore(pItem);
5244  newItem->Value = value;
5245  return newItem;
5246 }
5247 
5248 template<typename T>
5249 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5250 {
5251  ItemType* const newItem = InsertAfter(pItem);
5252  newItem->Value = value;
5253  return newItem;
5254 }
5255 
5256 template<typename T, typename AllocatorT>
5257 class VmaList
5258 {
5259  VMA_CLASS_NO_COPY(VmaList)
5260 public:
5261  class iterator
5262  {
5263  public:
5264  iterator() :
5265  m_pList(VMA_NULL),
5266  m_pItem(VMA_NULL)
5267  {
5268  }
5269 
5270  T& operator*() const
5271  {
5272  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5273  return m_pItem->Value;
5274  }
5275  T* operator->() const
5276  {
5277  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5278  return &m_pItem->Value;
5279  }
5280 
5281  iterator& operator++()
5282  {
5283  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5284  m_pItem = m_pItem->pNext;
5285  return *this;
5286  }
5287  iterator& operator--()
5288  {
5289  if(m_pItem != VMA_NULL)
5290  {
5291  m_pItem = m_pItem->pPrev;
5292  }
5293  else
5294  {
5295  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5296  m_pItem = m_pList->Back();
5297  }
5298  return *this;
5299  }
5300 
5301  iterator operator++(int)
5302  {
5303  iterator result = *this;
5304  ++*this;
5305  return result;
5306  }
5307  iterator operator--(int)
5308  {
5309  iterator result = *this;
5310  --*this;
5311  return result;
5312  }
5313 
5314  bool operator==(const iterator& rhs) const
5315  {
5316  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5317  return m_pItem == rhs.m_pItem;
5318  }
5319  bool operator!=(const iterator& rhs) const
5320  {
5321  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5322  return m_pItem != rhs.m_pItem;
5323  }
5324 
5325  private:
5326  VmaRawList<T>* m_pList;
5327  VmaListItem<T>* m_pItem;
5328 
5329  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5330  m_pList(pList),
5331  m_pItem(pItem)
5332  {
5333  }
5334 
5335  friend class VmaList<T, AllocatorT>;
5336  };
5337 
5338  class const_iterator
5339  {
5340  public:
5341  const_iterator() :
5342  m_pList(VMA_NULL),
5343  m_pItem(VMA_NULL)
5344  {
5345  }
5346 
5347  const_iterator(const iterator& src) :
5348  m_pList(src.m_pList),
5349  m_pItem(src.m_pItem)
5350  {
5351  }
5352 
5353  const T& operator*() const
5354  {
5355  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5356  return m_pItem->Value;
5357  }
5358  const T* operator->() const
5359  {
5360  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5361  return &m_pItem->Value;
5362  }
5363 
5364  const_iterator& operator++()
5365  {
5366  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5367  m_pItem = m_pItem->pNext;
5368  return *this;
5369  }
5370  const_iterator& operator--()
5371  {
5372  if(m_pItem != VMA_NULL)
5373  {
5374  m_pItem = m_pItem->pPrev;
5375  }
5376  else
5377  {
5378  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5379  m_pItem = m_pList->Back();
5380  }
5381  return *this;
5382  }
5383 
5384  const_iterator operator++(int)
5385  {
5386  const_iterator result = *this;
5387  ++*this;
5388  return result;
5389  }
5390  const_iterator operator--(int)
5391  {
5392  const_iterator result = *this;
5393  --*this;
5394  return result;
5395  }
5396 
5397  bool operator==(const const_iterator& rhs) const
5398  {
5399  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5400  return m_pItem == rhs.m_pItem;
5401  }
5402  bool operator!=(const const_iterator& rhs) const
5403  {
5404  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5405  return m_pItem != rhs.m_pItem;
5406  }
5407 
5408  private:
5409  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5410  m_pList(pList),
5411  m_pItem(pItem)
5412  {
5413  }
5414 
5415  const VmaRawList<T>* m_pList;
5416  const VmaListItem<T>* m_pItem;
5417 
5418  friend class VmaList<T, AllocatorT>;
5419  };
5420 
5421  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5422 
5423  bool empty() const { return m_RawList.IsEmpty(); }
5424  size_t size() const { return m_RawList.GetCount(); }
5425 
5426  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5427  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5428 
5429  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5430  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5431 
5432  void clear() { m_RawList.Clear(); }
5433  void push_back(const T& value) { m_RawList.PushBack(value); }
5434  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5435  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5436 
5437 private:
5438  VmaRawList<T> m_RawList;
5439 };
5440 
5441 #endif // #if VMA_USE_STL_LIST
5442 
5444 // class VmaMap
5445 
5446 // Unused in this version.
5447 #if 0
5448 
5449 #if VMA_USE_STL_UNORDERED_MAP
5450 
5451 #define VmaPair std::pair
5452 
5453 #define VMA_MAP_TYPE(KeyT, ValueT) \
5454  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5455 
5456 #else // #if VMA_USE_STL_UNORDERED_MAP
5457 
5458 template<typename T1, typename T2>
5459 struct VmaPair
5460 {
5461  T1 first;
5462  T2 second;
5463 
5464  VmaPair() : first(), second() { }
5465  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5466 };
5467 
5468 /* Class compatible with subset of interface of std::unordered_map.
5469 KeyT, ValueT must be POD because they will be stored in VmaVector.
5470 */
5471 template<typename KeyT, typename ValueT>
5472 class VmaMap
5473 {
5474 public:
5475  typedef VmaPair<KeyT, ValueT> PairType;
5476  typedef PairType* iterator;
5477 
5478  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5479 
5480  iterator begin() { return m_Vector.begin(); }
5481  iterator end() { return m_Vector.end(); }
5482 
5483  void insert(const PairType& pair);
5484  iterator find(const KeyT& key);
5485  void erase(iterator it);
5486 
5487 private:
5488  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5489 };
5490 
5491 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5492 
5493 template<typename FirstT, typename SecondT>
5494 struct VmaPairFirstLess
5495 {
5496  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5497  {
5498  return lhs.first < rhs.first;
5499  }
5500  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5501  {
5502  return lhs.first < rhsFirst;
5503  }
5504 };
5505 
5506 template<typename KeyT, typename ValueT>
5507 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5508 {
5509  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5510  m_Vector.data(),
5511  m_Vector.data() + m_Vector.size(),
5512  pair,
5513  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5514  VmaVectorInsert(m_Vector, indexToInsert, pair);
5515 }
5516 
5517 template<typename KeyT, typename ValueT>
5518 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5519 {
5520  PairType* it = VmaBinaryFindFirstNotLess(
5521  m_Vector.data(),
5522  m_Vector.data() + m_Vector.size(),
5523  key,
5524  VmaPairFirstLess<KeyT, ValueT>());
5525  if((it != m_Vector.end()) && (it->first == key))
5526  {
5527  return it;
5528  }
5529  else
5530  {
5531  return m_Vector.end();
5532  }
5533 }
5534 
5535 template<typename KeyT, typename ValueT>
5536 void VmaMap<KeyT, ValueT>::erase(iterator it)
5537 {
5538  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5539 }
5540 
5541 #endif // #if VMA_USE_STL_UNORDERED_MAP
5542 
5543 #endif // #if 0
5544 
5546 
5547 class VmaDeviceMemoryBlock;
5548 
5549 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5550 
5551 struct VmaAllocation_T
5552 {
5553 private:
5554  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5555 
5556  enum FLAGS
5557  {
5558  FLAG_USER_DATA_STRING = 0x01,
5559  };
5560 
5561 public:
5562  enum ALLOCATION_TYPE
5563  {
5564  ALLOCATION_TYPE_NONE,
5565  ALLOCATION_TYPE_BLOCK,
5566  ALLOCATION_TYPE_DEDICATED,
5567  };
5568 
5569  /*
5570  This struct is allocated using VmaPoolAllocator.
5571  */
5572 
5573  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5574  m_Alignment{1},
5575  m_Size{0},
5576  m_pUserData{VMA_NULL},
5577  m_LastUseFrameIndex{currentFrameIndex},
5578  m_MemoryTypeIndex{0},
5579  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5580  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5581  m_MapCount{0},
5582  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5583  {
5584 #if VMA_STATS_STRING_ENABLED
5585  m_CreationFrameIndex = currentFrameIndex;
5586  m_BufferImageUsage = 0;
5587 #endif
5588  }
5589 
5590  ~VmaAllocation_T()
5591  {
5592  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5593 
5594  // Check if owned string was freed.
5595  VMA_ASSERT(m_pUserData == VMA_NULL);
5596  }
5597 
5598  void InitBlockAllocation(
5599  VmaDeviceMemoryBlock* block,
5600  VkDeviceSize offset,
5601  VkDeviceSize alignment,
5602  VkDeviceSize size,
5603  uint32_t memoryTypeIndex,
5604  VmaSuballocationType suballocationType,
5605  bool mapped,
5606  bool canBecomeLost)
5607  {
5608  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5609  VMA_ASSERT(block != VMA_NULL);
5610  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5611  m_Alignment = alignment;
5612  m_Size = size;
5613  m_MemoryTypeIndex = memoryTypeIndex;
5614  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5615  m_SuballocationType = (uint8_t)suballocationType;
5616  m_BlockAllocation.m_Block = block;
5617  m_BlockAllocation.m_Offset = offset;
5618  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5619  }
5620 
5621  void InitLost()
5622  {
5623  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5624  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5625  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5626  m_MemoryTypeIndex = 0;
5627  m_BlockAllocation.m_Block = VMA_NULL;
5628  m_BlockAllocation.m_Offset = 0;
5629  m_BlockAllocation.m_CanBecomeLost = true;
5630  }
5631 
5632  void ChangeBlockAllocation(
5633  VmaAllocator hAllocator,
5634  VmaDeviceMemoryBlock* block,
5635  VkDeviceSize offset);
5636 
5637  void ChangeOffset(VkDeviceSize newOffset);
5638 
5639  // pMappedData not null means allocation is created with MAPPED flag.
5640  void InitDedicatedAllocation(
5641  uint32_t memoryTypeIndex,
5642  VkDeviceMemory hMemory,
5643  VmaSuballocationType suballocationType,
5644  void* pMappedData,
5645  VkDeviceSize size)
5646  {
5647  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5648  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5649  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5650  m_Alignment = 0;
5651  m_Size = size;
5652  m_MemoryTypeIndex = memoryTypeIndex;
5653  m_SuballocationType = (uint8_t)suballocationType;
5654  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5655  m_DedicatedAllocation.m_hMemory = hMemory;
5656  m_DedicatedAllocation.m_pMappedData = pMappedData;
5657  }
5658 
5659  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5660  VkDeviceSize GetAlignment() const { return m_Alignment; }
5661  VkDeviceSize GetSize() const { return m_Size; }
5662  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5663  void* GetUserData() const { return m_pUserData; }
5664  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5665  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5666 
5667  VmaDeviceMemoryBlock* GetBlock() const
5668  {
5669  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5670  return m_BlockAllocation.m_Block;
5671  }
5672  VkDeviceSize GetOffset() const;
5673  VkDeviceMemory GetMemory() const;
5674  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5675  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5676  void* GetMappedData() const;
5677  bool CanBecomeLost() const;
5678 
5679  uint32_t GetLastUseFrameIndex() const
5680  {
5681  return m_LastUseFrameIndex.load();
5682  }
5683  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5684  {
5685  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5686  }
5687  /*
5688  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5689  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5690  - Else, returns false.
5691 
5692  If hAllocation is already lost, assert - you should not call it then.
5693  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5694  */
5695  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5696 
5697  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5698  {
5699  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5700  outInfo.blockCount = 1;
5701  outInfo.allocationCount = 1;
5702  outInfo.unusedRangeCount = 0;
5703  outInfo.usedBytes = m_Size;
5704  outInfo.unusedBytes = 0;
5705  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5706  outInfo.unusedRangeSizeMin = UINT64_MAX;
5707  outInfo.unusedRangeSizeMax = 0;
5708  }
5709 
5710  void BlockAllocMap();
5711  void BlockAllocUnmap();
5712  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5713  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5714 
5715 #if VMA_STATS_STRING_ENABLED
5716  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5717  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5718 
5719  void InitBufferImageUsage(uint32_t bufferImageUsage)
5720  {
5721  VMA_ASSERT(m_BufferImageUsage == 0);
5722  m_BufferImageUsage = bufferImageUsage;
5723  }
5724 
5725  void PrintParameters(class VmaJsonWriter& json) const;
5726 #endif
5727 
5728 private:
5729  VkDeviceSize m_Alignment;
5730  VkDeviceSize m_Size;
5731  void* m_pUserData;
5732  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5733  uint32_t m_MemoryTypeIndex;
5734  uint8_t m_Type; // ALLOCATION_TYPE
5735  uint8_t m_SuballocationType; // VmaSuballocationType
5736  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5737  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5738  uint8_t m_MapCount;
5739  uint8_t m_Flags; // enum FLAGS
5740 
5741  // Allocation out of VmaDeviceMemoryBlock.
5742  struct BlockAllocation
5743  {
5744  VmaDeviceMemoryBlock* m_Block;
5745  VkDeviceSize m_Offset;
5746  bool m_CanBecomeLost;
5747  };
5748 
5749  // Allocation for an object that has its own private VkDeviceMemory.
5750  struct DedicatedAllocation
5751  {
5752  VkDeviceMemory m_hMemory;
5753  void* m_pMappedData; // Not null means memory is mapped.
5754  };
5755 
5756  union
5757  {
5758  // Allocation out of VmaDeviceMemoryBlock.
5759  BlockAllocation m_BlockAllocation;
5760  // Allocation for an object that has its own private VkDeviceMemory.
5761  DedicatedAllocation m_DedicatedAllocation;
5762  };
5763 
5764 #if VMA_STATS_STRING_ENABLED
5765  uint32_t m_CreationFrameIndex;
5766  uint32_t m_BufferImageUsage; // 0 if unknown.
5767 #endif
5768 
5769  void FreeUserDataString(VmaAllocator hAllocator);
5770 };
5771 
5772 /*
5773 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5774 allocated memory block or free.
5775 */
5776 struct VmaSuballocation
5777 {
5778  VkDeviceSize offset;
5779  VkDeviceSize size;
5780  VmaAllocation hAllocation;
5781  VmaSuballocationType type;
5782 };
5783 
5784 // Comparator for offsets.
5785 struct VmaSuballocationOffsetLess
5786 {
5787  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5788  {
5789  return lhs.offset < rhs.offset;
5790  }
5791 };
5792 struct VmaSuballocationOffsetGreater
5793 {
5794  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5795  {
5796  return lhs.offset > rhs.offset;
5797  }
5798 };
5799 
5800 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5801 
5802 // Cost of one additional allocation lost, as equivalent in bytes.
5803 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5804 
5805 enum class VmaAllocationRequestType
5806 {
5807  Normal,
5808  // Used by "Linear" algorithm.
5809  UpperAddress,
5810  EndOf1st,
5811  EndOf2nd,
5812 };
5813 
5814 /*
5815 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5816 
5817 If canMakeOtherLost was false:
5818 - item points to a FREE suballocation.
5819 - itemsToMakeLostCount is 0.
5820 
5821 If canMakeOtherLost was true:
5822 - item points to first of sequence of suballocations, which are either FREE,
5823  or point to VmaAllocations that can become lost.
5824 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5825  the requested allocation to succeed.
5826 */
5827 struct VmaAllocationRequest
5828 {
5829  VkDeviceSize offset;
5830  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5831  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5832  VmaSuballocationList::iterator item;
5833  size_t itemsToMakeLostCount;
5834  void* customData;
5835  VmaAllocationRequestType type;
5836 
5837  VkDeviceSize CalcCost() const
5838  {
5839  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5840  }
5841 };
5842 
5843 /*
5844 Data structure used for bookkeeping of allocations and unused ranges of memory
5845 in a single VkDeviceMemory block.
5846 */
5847 class VmaBlockMetadata
5848 {
5849 public:
5850  VmaBlockMetadata(VmaAllocator hAllocator);
5851  virtual ~VmaBlockMetadata() { }
5852  virtual void Init(VkDeviceSize size) { m_Size = size; }
5853 
5854  // Validates all data structures inside this object. If not valid, returns false.
5855  virtual bool Validate() const = 0;
5856  VkDeviceSize GetSize() const { return m_Size; }
5857  virtual size_t GetAllocationCount() const = 0;
5858  virtual VkDeviceSize GetSumFreeSize() const = 0;
5859  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5860  // Returns true if this block is empty - contains only single free suballocation.
5861  virtual bool IsEmpty() const = 0;
5862 
5863  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5864  // Shouldn't modify blockCount.
5865  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5866 
5867 #if VMA_STATS_STRING_ENABLED
5868  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5869 #endif
5870 
5871  // Tries to find a place for suballocation with given parameters inside this block.
5872  // If succeeded, fills pAllocationRequest and returns true.
5873  // If failed, returns false.
5874  virtual bool CreateAllocationRequest(
5875  uint32_t currentFrameIndex,
5876  uint32_t frameInUseCount,
5877  VkDeviceSize bufferImageGranularity,
5878  VkDeviceSize allocSize,
5879  VkDeviceSize allocAlignment,
5880  bool upperAddress,
5881  VmaSuballocationType allocType,
5882  bool canMakeOtherLost,
5883  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5884  uint32_t strategy,
5885  VmaAllocationRequest* pAllocationRequest) = 0;
5886 
5887  virtual bool MakeRequestedAllocationsLost(
5888  uint32_t currentFrameIndex,
5889  uint32_t frameInUseCount,
5890  VmaAllocationRequest* pAllocationRequest) = 0;
5891 
5892  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5893 
5894  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5895 
5896  // Makes actual allocation based on request. Request must already be checked and valid.
5897  virtual void Alloc(
5898  const VmaAllocationRequest& request,
5899  VmaSuballocationType type,
5900  VkDeviceSize allocSize,
5901  VmaAllocation hAllocation) = 0;
5902 
5903  // Frees suballocation assigned to given memory region.
5904  virtual void Free(const VmaAllocation allocation) = 0;
5905  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5906 
5907 protected:
5908  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5909 
5910 #if VMA_STATS_STRING_ENABLED
5911  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5912  VkDeviceSize unusedBytes,
5913  size_t allocationCount,
5914  size_t unusedRangeCount) const;
5915  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5916  VkDeviceSize offset,
5917  VmaAllocation hAllocation) const;
5918  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5919  VkDeviceSize offset,
5920  VkDeviceSize size) const;
5921  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5922 #endif
5923 
5924 private:
5925  VkDeviceSize m_Size;
5926  const VkAllocationCallbacks* m_pAllocationCallbacks;
5927 };
5928 
5929 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5930  VMA_ASSERT(0 && "Validation failed: " #cond); \
5931  return false; \
5932  } } while(false)
5933 
5934 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5935 {
5936  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5937 public:
5938  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5939  virtual ~VmaBlockMetadata_Generic();
5940  virtual void Init(VkDeviceSize size);
5941 
5942  virtual bool Validate() const;
5943  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5944  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5945  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5946  virtual bool IsEmpty() const;
5947 
5948  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5949  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5950 
5951 #if VMA_STATS_STRING_ENABLED
5952  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5953 #endif
5954 
5955  virtual bool CreateAllocationRequest(
5956  uint32_t currentFrameIndex,
5957  uint32_t frameInUseCount,
5958  VkDeviceSize bufferImageGranularity,
5959  VkDeviceSize allocSize,
5960  VkDeviceSize allocAlignment,
5961  bool upperAddress,
5962  VmaSuballocationType allocType,
5963  bool canMakeOtherLost,
5964  uint32_t strategy,
5965  VmaAllocationRequest* pAllocationRequest);
5966 
5967  virtual bool MakeRequestedAllocationsLost(
5968  uint32_t currentFrameIndex,
5969  uint32_t frameInUseCount,
5970  VmaAllocationRequest* pAllocationRequest);
5971 
5972  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5973 
5974  virtual VkResult CheckCorruption(const void* pBlockData);
5975 
5976  virtual void Alloc(
5977  const VmaAllocationRequest& request,
5978  VmaSuballocationType type,
5979  VkDeviceSize allocSize,
5980  VmaAllocation hAllocation);
5981 
5982  virtual void Free(const VmaAllocation allocation);
5983  virtual void FreeAtOffset(VkDeviceSize offset);
5984 
5986  // For defragmentation
5987 
5988  bool IsBufferImageGranularityConflictPossible(
5989  VkDeviceSize bufferImageGranularity,
5990  VmaSuballocationType& inOutPrevSuballocType) const;
5991 
5992 private:
5993  friend class VmaDefragmentationAlgorithm_Generic;
5994  friend class VmaDefragmentationAlgorithm_Fast;
5995 
5996  uint32_t m_FreeCount;
5997  VkDeviceSize m_SumFreeSize;
5998  VmaSuballocationList m_Suballocations;
5999  // Suballocations that are free and have size greater than certain threshold.
6000  // Sorted by size, ascending.
6001  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6002 
6003  bool ValidateFreeSuballocationList() const;
6004 
6005  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6006  // If yes, fills pOffset and returns true. If no, returns false.
6007  bool CheckAllocation(
6008  uint32_t currentFrameIndex,
6009  uint32_t frameInUseCount,
6010  VkDeviceSize bufferImageGranularity,
6011  VkDeviceSize allocSize,
6012  VkDeviceSize allocAlignment,
6013  VmaSuballocationType allocType,
6014  VmaSuballocationList::const_iterator suballocItem,
6015  bool canMakeOtherLost,
6016  VkDeviceSize* pOffset,
6017  size_t* itemsToMakeLostCount,
6018  VkDeviceSize* pSumFreeSize,
6019  VkDeviceSize* pSumItemSize) const;
6020  // Given free suballocation, it merges it with following one, which must also be free.
6021  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6022  // Releases given suballocation, making it free.
6023  // Merges it with adjacent free suballocations if applicable.
6024  // Returns iterator to new free suballocation at this place.
6025  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6026  // Given free suballocation, it inserts it into sorted list of
6027  // m_FreeSuballocationsBySize if it's suitable.
6028  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6029  // Given free suballocation, it removes it from sorted list of
6030  // m_FreeSuballocationsBySize if it's suitable.
6031  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6032 };
6033 
6034 /*
6035 Allocations and their references in internal data structure look like this:
6036 
6037 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6038 
6039  0 +-------+
6040  | |
6041  | |
6042  | |
6043  +-------+
6044  | Alloc | 1st[m_1stNullItemsBeginCount]
6045  +-------+
6046  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6047  +-------+
6048  | ... |
6049  +-------+
6050  | Alloc | 1st[1st.size() - 1]
6051  +-------+
6052  | |
6053  | |
6054  | |
6055 GetSize() +-------+
6056 
6057 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6058 
6059  0 +-------+
6060  | Alloc | 2nd[0]
6061  +-------+
6062  | Alloc | 2nd[1]
6063  +-------+
6064  | ... |
6065  +-------+
6066  | Alloc | 2nd[2nd.size() - 1]
6067  +-------+
6068  | |
6069  | |
6070  | |
6071  +-------+
6072  | Alloc | 1st[m_1stNullItemsBeginCount]
6073  +-------+
6074  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6075  +-------+
6076  | ... |
6077  +-------+
6078  | Alloc | 1st[1st.size() - 1]
6079  +-------+
6080  | |
6081 GetSize() +-------+
6082 
6083 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6084 
6085  0 +-------+
6086  | |
6087  | |
6088  | |
6089  +-------+
6090  | Alloc | 1st[m_1stNullItemsBeginCount]
6091  +-------+
6092  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6093  +-------+
6094  | ... |
6095  +-------+
6096  | Alloc | 1st[1st.size() - 1]
6097  +-------+
6098  | |
6099  | |
6100  | |
6101  +-------+
6102  | Alloc | 2nd[2nd.size() - 1]
6103  +-------+
6104  | ... |
6105  +-------+
6106  | Alloc | 2nd[1]
6107  +-------+
6108  | Alloc | 2nd[0]
6109 GetSize() +-------+
6110 
6111 */
6112 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6113 {
6114  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6115 public:
6116  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6117  virtual ~VmaBlockMetadata_Linear();
6118  virtual void Init(VkDeviceSize size);
6119 
6120  virtual bool Validate() const;
6121  virtual size_t GetAllocationCount() const;
6122  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6123  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6124  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6125 
6126  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6127  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6128 
6129 #if VMA_STATS_STRING_ENABLED
6130  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6131 #endif
6132 
6133  virtual bool CreateAllocationRequest(
6134  uint32_t currentFrameIndex,
6135  uint32_t frameInUseCount,
6136  VkDeviceSize bufferImageGranularity,
6137  VkDeviceSize allocSize,
6138  VkDeviceSize allocAlignment,
6139  bool upperAddress,
6140  VmaSuballocationType allocType,
6141  bool canMakeOtherLost,
6142  uint32_t strategy,
6143  VmaAllocationRequest* pAllocationRequest);
6144 
6145  virtual bool MakeRequestedAllocationsLost(
6146  uint32_t currentFrameIndex,
6147  uint32_t frameInUseCount,
6148  VmaAllocationRequest* pAllocationRequest);
6149 
6150  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6151 
6152  virtual VkResult CheckCorruption(const void* pBlockData);
6153 
6154  virtual void Alloc(
6155  const VmaAllocationRequest& request,
6156  VmaSuballocationType type,
6157  VkDeviceSize allocSize,
6158  VmaAllocation hAllocation);
6159 
6160  virtual void Free(const VmaAllocation allocation);
6161  virtual void FreeAtOffset(VkDeviceSize offset);
6162 
6163 private:
6164  /*
6165  There are two suballocation vectors, used in ping-pong way.
6166  The one with index m_1stVectorIndex is called 1st.
6167  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6168  2nd can be non-empty only when 1st is not empty.
6169  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6170  */
6171  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6172 
6173  enum SECOND_VECTOR_MODE
6174  {
6175  SECOND_VECTOR_EMPTY,
6176  /*
6177  Suballocations in 2nd vector are created later than the ones in 1st, but they
6178  all have smaller offset.
6179  */
6180  SECOND_VECTOR_RING_BUFFER,
6181  /*
6182  Suballocations in 2nd vector are upper side of double stack.
6183  They all have offsets higher than those in 1st vector.
6184  Top of this stack means smaller offsets, but higher indices in this vector.
6185  */
6186  SECOND_VECTOR_DOUBLE_STACK,
6187  };
6188 
6189  VkDeviceSize m_SumFreeSize;
6190  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6191  uint32_t m_1stVectorIndex;
6192  SECOND_VECTOR_MODE m_2ndVectorMode;
6193 
6194  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6195  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6196  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6197  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6198 
6199  // Number of items in 1st vector with hAllocation = null at the beginning.
6200  size_t m_1stNullItemsBeginCount;
6201  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6202  size_t m_1stNullItemsMiddleCount;
6203  // Number of items in 2nd vector with hAllocation = null.
6204  size_t m_2ndNullItemsCount;
6205 
6206  bool ShouldCompact1st() const;
6207  void CleanupAfterFree();
6208 
6209  bool CreateAllocationRequest_LowerAddress(
6210  uint32_t currentFrameIndex,
6211  uint32_t frameInUseCount,
6212  VkDeviceSize bufferImageGranularity,
6213  VkDeviceSize allocSize,
6214  VkDeviceSize allocAlignment,
6215  VmaSuballocationType allocType,
6216  bool canMakeOtherLost,
6217  uint32_t strategy,
6218  VmaAllocationRequest* pAllocationRequest);
6219  bool CreateAllocationRequest_UpperAddress(
6220  uint32_t currentFrameIndex,
6221  uint32_t frameInUseCount,
6222  VkDeviceSize bufferImageGranularity,
6223  VkDeviceSize allocSize,
6224  VkDeviceSize allocAlignment,
6225  VmaSuballocationType allocType,
6226  bool canMakeOtherLost,
6227  uint32_t strategy,
6228  VmaAllocationRequest* pAllocationRequest);
6229 };
6230 
6231 /*
6232 - GetSize() is the original size of allocated memory block.
6233 - m_UsableSize is this size aligned down to a power of two.
6234  All allocations and calculations happen relative to m_UsableSize.
6235 - GetUnusableSize() is the difference between them.
6236  It is repoted as separate, unused range, not available for allocations.
6237 
6238 Node at level 0 has size = m_UsableSize.
6239 Each next level contains nodes with size 2 times smaller than current level.
6240 m_LevelCount is the maximum number of levels to use in the current object.
6241 */
6242 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6243 {
6244  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6245 public:
6246  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6247  virtual ~VmaBlockMetadata_Buddy();
6248  virtual void Init(VkDeviceSize size);
6249 
6250  virtual bool Validate() const;
6251  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6252  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6253  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6254  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6255 
6256  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6257  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6258 
6259 #if VMA_STATS_STRING_ENABLED
6260  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6261 #endif
6262 
6263  virtual bool CreateAllocationRequest(
6264  uint32_t currentFrameIndex,
6265  uint32_t frameInUseCount,
6266  VkDeviceSize bufferImageGranularity,
6267  VkDeviceSize allocSize,
6268  VkDeviceSize allocAlignment,
6269  bool upperAddress,
6270  VmaSuballocationType allocType,
6271  bool canMakeOtherLost,
6272  uint32_t strategy,
6273  VmaAllocationRequest* pAllocationRequest);
6274 
6275  virtual bool MakeRequestedAllocationsLost(
6276  uint32_t currentFrameIndex,
6277  uint32_t frameInUseCount,
6278  VmaAllocationRequest* pAllocationRequest);
6279 
6280  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6281 
6282  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6283 
6284  virtual void Alloc(
6285  const VmaAllocationRequest& request,
6286  VmaSuballocationType type,
6287  VkDeviceSize allocSize,
6288  VmaAllocation hAllocation);
6289 
6290  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6291  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6292 
6293 private:
6294  static const VkDeviceSize MIN_NODE_SIZE = 32;
6295  static const size_t MAX_LEVELS = 30;
6296 
6297  struct ValidationContext
6298  {
6299  size_t calculatedAllocationCount;
6300  size_t calculatedFreeCount;
6301  VkDeviceSize calculatedSumFreeSize;
6302 
6303  ValidationContext() :
6304  calculatedAllocationCount(0),
6305  calculatedFreeCount(0),
6306  calculatedSumFreeSize(0) { }
6307  };
6308 
6309  struct Node
6310  {
6311  VkDeviceSize offset;
6312  enum TYPE
6313  {
6314  TYPE_FREE,
6315  TYPE_ALLOCATION,
6316  TYPE_SPLIT,
6317  TYPE_COUNT
6318  } type;
6319  Node* parent;
6320  Node* buddy;
6321 
6322  union
6323  {
6324  struct
6325  {
6326  Node* prev;
6327  Node* next;
6328  } free;
6329  struct
6330  {
6331  VmaAllocation alloc;
6332  } allocation;
6333  struct
6334  {
6335  Node* leftChild;
6336  } split;
6337  };
6338  };
6339 
6340  // Size of the memory block aligned down to a power of two.
6341  VkDeviceSize m_UsableSize;
6342  uint32_t m_LevelCount;
6343 
6344  Node* m_Root;
6345  struct {
6346  Node* front;
6347  Node* back;
6348  } m_FreeList[MAX_LEVELS];
6349  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6350  size_t m_AllocationCount;
6351  // Number of nodes in the tree with type == TYPE_FREE.
6352  size_t m_FreeCount;
6353  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6354  VkDeviceSize m_SumFreeSize;
6355 
6356  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6357  void DeleteNode(Node* node);
6358  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6359  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6360  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6361  // Alloc passed just for validation. Can be null.
6362  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6363  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6364  // Adds node to the front of FreeList at given level.
6365  // node->type must be FREE.
6366  // node->free.prev, next can be undefined.
6367  void AddToFreeListFront(uint32_t level, Node* node);
6368  // Removes node from FreeList at given level.
6369  // node->type must be FREE.
6370  // node->free.prev, next stay untouched.
6371  void RemoveFromFreeList(uint32_t level, Node* node);
6372 
6373 #if VMA_STATS_STRING_ENABLED
6374  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6375 #endif
6376 };
6377 
6378 /*
6379 Represents a single block of device memory (`VkDeviceMemory`) with all the
6380 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6381 
6382 Thread-safety: This class must be externally synchronized.
6383 */
6384 class VmaDeviceMemoryBlock
6385 {
6386  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6387 public:
6388  VmaBlockMetadata* m_pMetadata;
6389 
6390  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6391 
6392  ~VmaDeviceMemoryBlock()
6393  {
6394  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6395  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6396  }
6397 
6398  // Always call after construction.
6399  void Init(
6400  VmaAllocator hAllocator,
6401  VmaPool hParentPool,
6402  uint32_t newMemoryTypeIndex,
6403  VkDeviceMemory newMemory,
6404  VkDeviceSize newSize,
6405  uint32_t id,
6406  uint32_t algorithm);
6407  // Always call before destruction.
6408  void Destroy(VmaAllocator allocator);
6409 
6410  VmaPool GetParentPool() const { return m_hParentPool; }
6411  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6412  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6413  uint32_t GetId() const { return m_Id; }
6414  void* GetMappedData() const { return m_pMappedData; }
6415 
6416  // Validates all data structures inside this object. If not valid, returns false.
6417  bool Validate() const;
6418 
6419  VkResult CheckCorruption(VmaAllocator hAllocator);
6420 
6421  // ppData can be null.
6422  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6423  void Unmap(VmaAllocator hAllocator, uint32_t count);
6424 
6425  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6426  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6427 
6428  VkResult BindBufferMemory(
6429  const VmaAllocator hAllocator,
6430  const VmaAllocation hAllocation,
6431  VkDeviceSize allocationLocalOffset,
6432  VkBuffer hBuffer,
6433  const void* pNext);
6434  VkResult BindImageMemory(
6435  const VmaAllocator hAllocator,
6436  const VmaAllocation hAllocation,
6437  VkDeviceSize allocationLocalOffset,
6438  VkImage hImage,
6439  const void* pNext);
6440 
6441 private:
6442  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6443  uint32_t m_MemoryTypeIndex;
6444  uint32_t m_Id;
6445  VkDeviceMemory m_hMemory;
6446 
6447  /*
6448  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6449  Also protects m_MapCount, m_pMappedData.
6450  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6451  */
6452  VMA_MUTEX m_Mutex;
6453  uint32_t m_MapCount;
6454  void* m_pMappedData;
6455 };
6456 
6457 struct VmaPointerLess
6458 {
6459  bool operator()(const void* lhs, const void* rhs) const
6460  {
6461  return lhs < rhs;
6462  }
6463 };
6464 
6465 struct VmaDefragmentationMove
6466 {
6467  size_t srcBlockIndex;
6468  size_t dstBlockIndex;
6469  VkDeviceSize srcOffset;
6470  VkDeviceSize dstOffset;
6471  VkDeviceSize size;
6472  VmaAllocation hAllocation;
6473  VmaDeviceMemoryBlock* pSrcBlock;
6474  VmaDeviceMemoryBlock* pDstBlock;
6475 };
6476 
6477 class VmaDefragmentationAlgorithm;
6478 
6479 /*
6480 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6481 Vulkan memory type.
6482 
6483 Synchronized internally with a mutex.
6484 */
6485 struct VmaBlockVector
6486 {
6487  VMA_CLASS_NO_COPY(VmaBlockVector)
6488 public:
6489  VmaBlockVector(
6490  VmaAllocator hAllocator,
6491  VmaPool hParentPool,
6492  uint32_t memoryTypeIndex,
6493  VkDeviceSize preferredBlockSize,
6494  size_t minBlockCount,
6495  size_t maxBlockCount,
6496  VkDeviceSize bufferImageGranularity,
6497  uint32_t frameInUseCount,
6498  bool explicitBlockSize,
6499  uint32_t algorithm);
6500  ~VmaBlockVector();
6501 
6502  VkResult CreateMinBlocks();
6503 
6504  VmaAllocator GetAllocator() const { return m_hAllocator; }
6505  VmaPool GetParentPool() const { return m_hParentPool; }
6506  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6507  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6508  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6509  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6510  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6511  uint32_t GetAlgorithm() const { return m_Algorithm; }
6512 
6513  void GetPoolStats(VmaPoolStats* pStats);
6514 
6515  bool IsEmpty();
6516  bool IsCorruptionDetectionEnabled() const;
6517 
6518  VkResult Allocate(
6519  uint32_t currentFrameIndex,
6520  VkDeviceSize size,
6521  VkDeviceSize alignment,
6522  const VmaAllocationCreateInfo& createInfo,
6523  VmaSuballocationType suballocType,
6524  size_t allocationCount,
6525  VmaAllocation* pAllocations);
6526 
6527  void Free(const VmaAllocation hAllocation);
6528 
6529  // Adds statistics of this BlockVector to pStats.
6530  void AddStats(VmaStats* pStats);
6531 
6532 #if VMA_STATS_STRING_ENABLED
6533  void PrintDetailedMap(class VmaJsonWriter& json);
6534 #endif
6535 
6536  void MakePoolAllocationsLost(
6537  uint32_t currentFrameIndex,
6538  size_t* pLostAllocationCount);
6539  VkResult CheckCorruption();
6540 
6541  // Saves results in pCtx->res.
6542  void Defragment(
6543  class VmaBlockVectorDefragmentationContext* pCtx,
6545  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6546  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6547  VkCommandBuffer commandBuffer);
6548  void DefragmentationEnd(
6549  class VmaBlockVectorDefragmentationContext* pCtx,
6550  VmaDefragmentationStats* pStats);
6551 
6552  uint32_t ProcessDefragmentations(
6553  class VmaBlockVectorDefragmentationContext *pCtx,
6554  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6555 
6556  void CommitDefragmentations(
6557  class VmaBlockVectorDefragmentationContext *pCtx,
6558  VmaDefragmentationStats* pStats);
6559 
6561  // To be used only while the m_Mutex is locked. Used during defragmentation.
6562 
6563  size_t GetBlockCount() const { return m_Blocks.size(); }
6564  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6565  size_t CalcAllocationCount() const;
6566  bool IsBufferImageGranularityConflictPossible() const;
6567 
6568 private:
6569  friend class VmaDefragmentationAlgorithm_Generic;
6570 
6571  const VmaAllocator m_hAllocator;
6572  const VmaPool m_hParentPool;
6573  const uint32_t m_MemoryTypeIndex;
6574  const VkDeviceSize m_PreferredBlockSize;
6575  const size_t m_MinBlockCount;
6576  const size_t m_MaxBlockCount;
6577  const VkDeviceSize m_BufferImageGranularity;
6578  const uint32_t m_FrameInUseCount;
6579  const bool m_ExplicitBlockSize;
6580  const uint32_t m_Algorithm;
6581  VMA_RW_MUTEX m_Mutex;
6582 
6583  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6584  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6585  bool m_HasEmptyBlock;
6586  // Incrementally sorted by sumFreeSize, ascending.
6587  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6588  uint32_t m_NextBlockId;
6589 
6590  VkDeviceSize CalcMaxBlockSize() const;
6591 
6592  // Finds and removes given block from vector.
6593  void Remove(VmaDeviceMemoryBlock* pBlock);
6594 
6595  // Performs single step in sorting m_Blocks. They may not be fully sorted
6596  // after this call.
6597  void IncrementallySortBlocks();
6598 
6599  VkResult AllocatePage(
6600  uint32_t currentFrameIndex,
6601  VkDeviceSize size,
6602  VkDeviceSize alignment,
6603  const VmaAllocationCreateInfo& createInfo,
6604  VmaSuballocationType suballocType,
6605  VmaAllocation* pAllocation);
6606 
6607  // To be used only without CAN_MAKE_OTHER_LOST flag.
6608  VkResult AllocateFromBlock(
6609  VmaDeviceMemoryBlock* pBlock,
6610  uint32_t currentFrameIndex,
6611  VkDeviceSize size,
6612  VkDeviceSize alignment,
6613  VmaAllocationCreateFlags allocFlags,
6614  void* pUserData,
6615  VmaSuballocationType suballocType,
6616  uint32_t strategy,
6617  VmaAllocation* pAllocation);
6618 
6619  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6620 
6621  // Saves result to pCtx->res.
6622  void ApplyDefragmentationMovesCpu(
6623  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6624  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6625  // Saves result to pCtx->res.
6626  void ApplyDefragmentationMovesGpu(
6627  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6628  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6629  VkCommandBuffer commandBuffer);
6630 
6631  /*
6632  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6633  - updated with new data.
6634  */
6635  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6636 
6637  void UpdateHasEmptyBlock();
6638 };
6639 
6640 struct VmaPool_T
6641 {
6642  VMA_CLASS_NO_COPY(VmaPool_T)
6643 public:
6644  VmaBlockVector m_BlockVector;
6645 
6646  VmaPool_T(
6647  VmaAllocator hAllocator,
6648  const VmaPoolCreateInfo& createInfo,
6649  VkDeviceSize preferredBlockSize);
6650  ~VmaPool_T();
6651 
6652  uint32_t GetId() const { return m_Id; }
6653  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6654 
6655  const char* GetName() const { return m_Name; }
6656  void SetName(const char* pName);
6657 
6658 #if VMA_STATS_STRING_ENABLED
6659  //void PrintDetailedMap(class VmaStringBuilder& sb);
6660 #endif
6661 
6662 private:
6663  uint32_t m_Id;
6664  char* m_Name;
6665 };
6666 
6667 /*
6668 Performs defragmentation:
6669 
6670 - Updates `pBlockVector->m_pMetadata`.
6671 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6672 - Does not move actual data, only returns requested moves as `moves`.
6673 */
6674 class VmaDefragmentationAlgorithm
6675 {
6676  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6677 public:
6678  VmaDefragmentationAlgorithm(
6679  VmaAllocator hAllocator,
6680  VmaBlockVector* pBlockVector,
6681  uint32_t currentFrameIndex) :
6682  m_hAllocator(hAllocator),
6683  m_pBlockVector(pBlockVector),
6684  m_CurrentFrameIndex(currentFrameIndex)
6685  {
6686  }
6687  virtual ~VmaDefragmentationAlgorithm()
6688  {
6689  }
6690 
6691  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6692  virtual void AddAll() = 0;
6693 
6694  virtual VkResult Defragment(
6695  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6696  VkDeviceSize maxBytesToMove,
6697  uint32_t maxAllocationsToMove,
6698  VmaDefragmentationFlags flags) = 0;
6699 
6700  virtual VkDeviceSize GetBytesMoved() const = 0;
6701  virtual uint32_t GetAllocationsMoved() const = 0;
6702 
6703 protected:
6704  VmaAllocator const m_hAllocator;
6705  VmaBlockVector* const m_pBlockVector;
6706  const uint32_t m_CurrentFrameIndex;
6707 
6708  struct AllocationInfo
6709  {
6710  VmaAllocation m_hAllocation;
6711  VkBool32* m_pChanged;
6712 
6713  AllocationInfo() :
6714  m_hAllocation(VK_NULL_HANDLE),
6715  m_pChanged(VMA_NULL)
6716  {
6717  }
6718  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6719  m_hAllocation(hAlloc),
6720  m_pChanged(pChanged)
6721  {
6722  }
6723  };
6724 };
6725 
6726 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6727 {
6728  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6729 public:
6730  VmaDefragmentationAlgorithm_Generic(
6731  VmaAllocator hAllocator,
6732  VmaBlockVector* pBlockVector,
6733  uint32_t currentFrameIndex,
6734  bool overlappingMoveSupported);
6735  virtual ~VmaDefragmentationAlgorithm_Generic();
6736 
6737  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6738  virtual void AddAll() { m_AllAllocations = true; }
6739 
6740  virtual VkResult Defragment(
6741  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6742  VkDeviceSize maxBytesToMove,
6743  uint32_t maxAllocationsToMove,
6744  VmaDefragmentationFlags flags);
6745 
6746  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6747  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6748 
6749 private:
6750  uint32_t m_AllocationCount;
6751  bool m_AllAllocations;
6752 
6753  VkDeviceSize m_BytesMoved;
6754  uint32_t m_AllocationsMoved;
6755 
6756  struct AllocationInfoSizeGreater
6757  {
6758  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6759  {
6760  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6761  }
6762  };
6763 
6764  struct AllocationInfoOffsetGreater
6765  {
6766  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6767  {
6768  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6769  }
6770  };
6771 
6772  struct BlockInfo
6773  {
6774  size_t m_OriginalBlockIndex;
6775  VmaDeviceMemoryBlock* m_pBlock;
6776  bool m_HasNonMovableAllocations;
6777  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6778 
6779  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6780  m_OriginalBlockIndex(SIZE_MAX),
6781  m_pBlock(VMA_NULL),
6782  m_HasNonMovableAllocations(true),
6783  m_Allocations(pAllocationCallbacks)
6784  {
6785  }
6786 
6787  void CalcHasNonMovableAllocations()
6788  {
6789  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6790  const size_t defragmentAllocCount = m_Allocations.size();
6791  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6792  }
6793 
6794  void SortAllocationsBySizeDescending()
6795  {
6796  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6797  }
6798 
6799  void SortAllocationsByOffsetDescending()
6800  {
6801  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6802  }
6803  };
6804 
6805  struct BlockPointerLess
6806  {
6807  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6808  {
6809  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6810  }
6811  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6812  {
6813  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6814  }
6815  };
6816 
6817  // 1. Blocks with some non-movable allocations go first.
6818  // 2. Blocks with smaller sumFreeSize go first.
6819  struct BlockInfoCompareMoveDestination
6820  {
6821  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6822  {
6823  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6824  {
6825  return true;
6826  }
6827  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6828  {
6829  return false;
6830  }
6831  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6832  {
6833  return true;
6834  }
6835  return false;
6836  }
6837  };
6838 
6839  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6840  BlockInfoVector m_Blocks;
6841 
6842  VkResult DefragmentRound(
6843  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6844  VkDeviceSize maxBytesToMove,
6845  uint32_t maxAllocationsToMove,
6846  bool freeOldAllocations);
6847 
6848  size_t CalcBlocksWithNonMovableCount() const;
6849 
6850  static bool MoveMakesSense(
6851  size_t dstBlockIndex, VkDeviceSize dstOffset,
6852  size_t srcBlockIndex, VkDeviceSize srcOffset);
6853 };
6854 
6855 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6856 {
6857  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6858 public:
6859  VmaDefragmentationAlgorithm_Fast(
6860  VmaAllocator hAllocator,
6861  VmaBlockVector* pBlockVector,
6862  uint32_t currentFrameIndex,
6863  bool overlappingMoveSupported);
6864  virtual ~VmaDefragmentationAlgorithm_Fast();
6865 
6866  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6867  virtual void AddAll() { m_AllAllocations = true; }
6868 
6869  virtual VkResult Defragment(
6870  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6871  VkDeviceSize maxBytesToMove,
6872  uint32_t maxAllocationsToMove,
6873  VmaDefragmentationFlags flags);
6874 
6875  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6876  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6877 
6878 private:
6879  struct BlockInfo
6880  {
6881  size_t origBlockIndex;
6882  };
6883 
6884  class FreeSpaceDatabase
6885  {
6886  public:
6887  FreeSpaceDatabase()
6888  {
6889  FreeSpace s = {};
6890  s.blockInfoIndex = SIZE_MAX;
6891  for(size_t i = 0; i < MAX_COUNT; ++i)
6892  {
6893  m_FreeSpaces[i] = s;
6894  }
6895  }
6896 
6897  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6898  {
6899  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6900  {
6901  return;
6902  }
6903 
6904  // Find first invalid or the smallest structure.
6905  size_t bestIndex = SIZE_MAX;
6906  for(size_t i = 0; i < MAX_COUNT; ++i)
6907  {
6908  // Empty structure.
6909  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6910  {
6911  bestIndex = i;
6912  break;
6913  }
6914  if(m_FreeSpaces[i].size < size &&
6915  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6916  {
6917  bestIndex = i;
6918  }
6919  }
6920 
6921  if(bestIndex != SIZE_MAX)
6922  {
6923  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6924  m_FreeSpaces[bestIndex].offset = offset;
6925  m_FreeSpaces[bestIndex].size = size;
6926  }
6927  }
6928 
6929  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6930  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6931  {
6932  size_t bestIndex = SIZE_MAX;
6933  VkDeviceSize bestFreeSpaceAfter = 0;
6934  for(size_t i = 0; i < MAX_COUNT; ++i)
6935  {
6936  // Structure is valid.
6937  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6938  {
6939  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6940  // Allocation fits into this structure.
6941  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6942  {
6943  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6944  (dstOffset + size);
6945  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6946  {
6947  bestIndex = i;
6948  bestFreeSpaceAfter = freeSpaceAfter;
6949  }
6950  }
6951  }
6952  }
6953 
6954  if(bestIndex != SIZE_MAX)
6955  {
6956  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6957  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6958 
6959  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6960  {
6961  // Leave this structure for remaining empty space.
6962  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6963  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6964  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6965  }
6966  else
6967  {
6968  // This structure becomes invalid.
6969  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6970  }
6971 
6972  return true;
6973  }
6974 
6975  return false;
6976  }
6977 
6978  private:
6979  static const size_t MAX_COUNT = 4;
6980 
6981  struct FreeSpace
6982  {
6983  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6984  VkDeviceSize offset;
6985  VkDeviceSize size;
6986  } m_FreeSpaces[MAX_COUNT];
6987  };
6988 
6989  const bool m_OverlappingMoveSupported;
6990 
6991  uint32_t m_AllocationCount;
6992  bool m_AllAllocations;
6993 
6994  VkDeviceSize m_BytesMoved;
6995  uint32_t m_AllocationsMoved;
6996 
6997  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6998 
6999  void PreprocessMetadata();
7000  void PostprocessMetadata();
7001  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7002 };
7003 
7004 struct VmaBlockDefragmentationContext
7005 {
7006  enum BLOCK_FLAG
7007  {
7008  BLOCK_FLAG_USED = 0x00000001,
7009  };
7010  uint32_t flags;
7011  VkBuffer hBuffer;
7012 };
7013 
7014 class VmaBlockVectorDefragmentationContext
7015 {
7016  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7017 public:
7018  VkResult res;
7019  bool mutexLocked;
7020  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7021  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7022  uint32_t defragmentationMovesProcessed;
7023  uint32_t defragmentationMovesCommitted;
7024  bool hasDefragmentationPlan;
7025 
7026  VmaBlockVectorDefragmentationContext(
7027  VmaAllocator hAllocator,
7028  VmaPool hCustomPool, // Optional.
7029  VmaBlockVector* pBlockVector,
7030  uint32_t currFrameIndex);
7031  ~VmaBlockVectorDefragmentationContext();
7032 
7033  VmaPool GetCustomPool() const { return m_hCustomPool; }
7034  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7035  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7036 
7037  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7038  void AddAll() { m_AllAllocations = true; }
7039 
7040  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7041 
7042 private:
7043  const VmaAllocator m_hAllocator;
7044  // Null if not from custom pool.
7045  const VmaPool m_hCustomPool;
7046  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7047  VmaBlockVector* const m_pBlockVector;
7048  const uint32_t m_CurrFrameIndex;
7049  // Owner of this object.
7050  VmaDefragmentationAlgorithm* m_pAlgorithm;
7051 
7052  struct AllocInfo
7053  {
7054  VmaAllocation hAlloc;
7055  VkBool32* pChanged;
7056  };
7057  // Used between constructor and Begin.
7058  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7059  bool m_AllAllocations;
7060 };
7061 
7062 struct VmaDefragmentationContext_T
7063 {
7064 private:
7065  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7066 public:
7067  VmaDefragmentationContext_T(
7068  VmaAllocator hAllocator,
7069  uint32_t currFrameIndex,
7070  uint32_t flags,
7071  VmaDefragmentationStats* pStats);
7072  ~VmaDefragmentationContext_T();
7073 
7074  void AddPools(uint32_t poolCount, VmaPool* pPools);
7075  void AddAllocations(
7076  uint32_t allocationCount,
7077  VmaAllocation* pAllocations,
7078  VkBool32* pAllocationsChanged);
7079 
7080  /*
7081  Returns:
7082  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7083  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7084  - Negative value if error occured and object can be destroyed immediately.
7085  */
7086  VkResult Defragment(
7087  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7088  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7089  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7090 
7091  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7092  VkResult DefragmentPassEnd();
7093 
7094 private:
7095  const VmaAllocator m_hAllocator;
7096  const uint32_t m_CurrFrameIndex;
7097  const uint32_t m_Flags;
7098  VmaDefragmentationStats* const m_pStats;
7099 
7100  VkDeviceSize m_MaxCpuBytesToMove;
7101  uint32_t m_MaxCpuAllocationsToMove;
7102  VkDeviceSize m_MaxGpuBytesToMove;
7103  uint32_t m_MaxGpuAllocationsToMove;
7104 
7105  // Owner of these objects.
7106  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7107  // Owner of these objects.
7108  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7109 };
7110 
7111 #if VMA_RECORDING_ENABLED
7112 
7113 class VmaRecorder
7114 {
7115 public:
7116  VmaRecorder();
7117  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7118  void WriteConfiguration(
7119  const VkPhysicalDeviceProperties& devProps,
7120  const VkPhysicalDeviceMemoryProperties& memProps,
7121  uint32_t vulkanApiVersion,
7122  bool dedicatedAllocationExtensionEnabled,
7123  bool bindMemory2ExtensionEnabled,
7124  bool memoryBudgetExtensionEnabled,
7125  bool deviceCoherentMemoryExtensionEnabled);
7126  ~VmaRecorder();
7127 
7128  void RecordCreateAllocator(uint32_t frameIndex);
7129  void RecordDestroyAllocator(uint32_t frameIndex);
7130  void RecordCreatePool(uint32_t frameIndex,
7131  const VmaPoolCreateInfo& createInfo,
7132  VmaPool pool);
7133  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7134  void RecordAllocateMemory(uint32_t frameIndex,
7135  const VkMemoryRequirements& vkMemReq,
7136  const VmaAllocationCreateInfo& createInfo,
7137  VmaAllocation allocation);
7138  void RecordAllocateMemoryPages(uint32_t frameIndex,
7139  const VkMemoryRequirements& vkMemReq,
7140  const VmaAllocationCreateInfo& createInfo,
7141  uint64_t allocationCount,
7142  const VmaAllocation* pAllocations);
7143  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7144  const VkMemoryRequirements& vkMemReq,
7145  bool requiresDedicatedAllocation,
7146  bool prefersDedicatedAllocation,
7147  const VmaAllocationCreateInfo& createInfo,
7148  VmaAllocation allocation);
7149  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7150  const VkMemoryRequirements& vkMemReq,
7151  bool requiresDedicatedAllocation,
7152  bool prefersDedicatedAllocation,
7153  const VmaAllocationCreateInfo& createInfo,
7154  VmaAllocation allocation);
7155  void RecordFreeMemory(uint32_t frameIndex,
7156  VmaAllocation allocation);
7157  void RecordFreeMemoryPages(uint32_t frameIndex,
7158  uint64_t allocationCount,
7159  const VmaAllocation* pAllocations);
7160  void RecordSetAllocationUserData(uint32_t frameIndex,
7161  VmaAllocation allocation,
7162  const void* pUserData);
7163  void RecordCreateLostAllocation(uint32_t frameIndex,
7164  VmaAllocation allocation);
7165  void RecordMapMemory(uint32_t frameIndex,
7166  VmaAllocation allocation);
7167  void RecordUnmapMemory(uint32_t frameIndex,
7168  VmaAllocation allocation);
7169  void RecordFlushAllocation(uint32_t frameIndex,
7170  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7171  void RecordInvalidateAllocation(uint32_t frameIndex,
7172  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7173  void RecordCreateBuffer(uint32_t frameIndex,
7174  const VkBufferCreateInfo& bufCreateInfo,
7175  const VmaAllocationCreateInfo& allocCreateInfo,
7176  VmaAllocation allocation);
7177  void RecordCreateImage(uint32_t frameIndex,
7178  const VkImageCreateInfo& imageCreateInfo,
7179  const VmaAllocationCreateInfo& allocCreateInfo,
7180  VmaAllocation allocation);
7181  void RecordDestroyBuffer(uint32_t frameIndex,
7182  VmaAllocation allocation);
7183  void RecordDestroyImage(uint32_t frameIndex,
7184  VmaAllocation allocation);
7185  void RecordTouchAllocation(uint32_t frameIndex,
7186  VmaAllocation allocation);
7187  void RecordGetAllocationInfo(uint32_t frameIndex,
7188  VmaAllocation allocation);
7189  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7190  VmaPool pool);
7191  void RecordDefragmentationBegin(uint32_t frameIndex,
7192  const VmaDefragmentationInfo2& info,
7194  void RecordDefragmentationEnd(uint32_t frameIndex,
7196  void RecordSetPoolName(uint32_t frameIndex,
7197  VmaPool pool,
7198  const char* name);
7199 
7200 private:
7201  struct CallParams
7202  {
7203  uint32_t threadId;
7204  double time;
7205  };
7206 
7207  class UserDataString
7208  {
7209  public:
7210  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7211  const char* GetString() const { return m_Str; }
7212 
7213  private:
7214  char m_PtrStr[17];
7215  const char* m_Str;
7216  };
7217 
7218  bool m_UseMutex;
7219  VmaRecordFlags m_Flags;
7220  FILE* m_File;
7221  VMA_MUTEX m_FileMutex;
7222  int64_t m_Freq;
7223  int64_t m_StartCounter;
7224 
7225  void GetBasicParams(CallParams& outParams);
7226 
7227  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7228  template<typename T>
7229  void PrintPointerList(uint64_t count, const T* pItems)
7230  {
7231  if(count)
7232  {
7233  fprintf(m_File, "%p", pItems[0]);
7234  for(uint64_t i = 1; i < count; ++i)
7235  {
7236  fprintf(m_File, " %p", pItems[i]);
7237  }
7238  }
7239  }
7240 
7241  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7242  void Flush();
7243 };
7244 
7245 #endif // #if VMA_RECORDING_ENABLED
7246 
7247 /*
7248 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7249 */
7250 class VmaAllocationObjectAllocator
7251 {
7252  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7253 public:
7254  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7255 
7256  template<typename... Types> VmaAllocation Allocate(Types... args);
7257  void Free(VmaAllocation hAlloc);
7258 
7259 private:
7260  VMA_MUTEX m_Mutex;
7261  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7262 };
7263 
7264 struct VmaCurrentBudgetData
7265 {
7266  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7267  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7268 
7269 #if VMA_MEMORY_BUDGET
7270  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7271  VMA_RW_MUTEX m_BudgetMutex;
7272  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7273  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7274  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7275 #endif // #if VMA_MEMORY_BUDGET
7276 
7277  VmaCurrentBudgetData()
7278  {
7279  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7280  {
7281  m_BlockBytes[heapIndex] = 0;
7282  m_AllocationBytes[heapIndex] = 0;
7283 #if VMA_MEMORY_BUDGET
7284  m_VulkanUsage[heapIndex] = 0;
7285  m_VulkanBudget[heapIndex] = 0;
7286  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7287 #endif
7288  }
7289 
7290 #if VMA_MEMORY_BUDGET
7291  m_OperationsSinceBudgetFetch = 0;
7292 #endif
7293  }
7294 
7295  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7296  {
7297  m_AllocationBytes[heapIndex] += allocationSize;
7298 #if VMA_MEMORY_BUDGET
7299  ++m_OperationsSinceBudgetFetch;
7300 #endif
7301  }
7302 
7303  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7304  {
7305  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7306  m_AllocationBytes[heapIndex] -= allocationSize;
7307 #if VMA_MEMORY_BUDGET
7308  ++m_OperationsSinceBudgetFetch;
7309 #endif
7310  }
7311 };
7312 
7313 // Main allocator object.
7314 struct VmaAllocator_T
7315 {
7316  VMA_CLASS_NO_COPY(VmaAllocator_T)
7317 public:
7318  bool m_UseMutex;
7319  uint32_t m_VulkanApiVersion;
7320  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7321  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7322  bool m_UseExtMemoryBudget;
7323  bool m_UseAmdDeviceCoherentMemory;
7324  bool m_UseKhrBufferDeviceAddress;
7325  VkDevice m_hDevice;
7326  VkInstance m_hInstance;
7327  bool m_AllocationCallbacksSpecified;
7328  VkAllocationCallbacks m_AllocationCallbacks;
7329  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7330  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7331 
7332  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7333  uint32_t m_HeapSizeLimitMask;
7334 
7335  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7336  VkPhysicalDeviceMemoryProperties m_MemProps;
7337 
7338  // Default pools.
7339  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7340 
7341  // Each vector is sorted by memory (handle value).
7342  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7343  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7344  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7345 
7346  VmaCurrentBudgetData m_Budget;
7347 
7348  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7349  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7350  ~VmaAllocator_T();
7351 
7352  const VkAllocationCallbacks* GetAllocationCallbacks() const
7353  {
7354  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7355  }
7356  const VmaVulkanFunctions& GetVulkanFunctions() const
7357  {
7358  return m_VulkanFunctions;
7359  }
7360 
7361  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7362 
7363  VkDeviceSize GetBufferImageGranularity() const
7364  {
7365  return VMA_MAX(
7366  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7367  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7368  }
7369 
7370  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7371  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7372 
7373  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7374  {
7375  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7376  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7377  }
7378  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7379  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7380  {
7381  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7382  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7383  }
7384  // Minimum alignment for all allocations in specific memory type.
7385  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7386  {
7387  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7388  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7389  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7390  }
7391 
7392  bool IsIntegratedGpu() const
7393  {
7394  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7395  }
7396 
7397  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7398 
7399 #if VMA_RECORDING_ENABLED
7400  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7401 #endif
7402 
7403  void GetBufferMemoryRequirements(
7404  VkBuffer hBuffer,
7405  VkMemoryRequirements& memReq,
7406  bool& requiresDedicatedAllocation,
7407  bool& prefersDedicatedAllocation) const;
7408  void GetImageMemoryRequirements(
7409  VkImage hImage,
7410  VkMemoryRequirements& memReq,
7411  bool& requiresDedicatedAllocation,
7412  bool& prefersDedicatedAllocation) const;
7413 
7414  // Main allocation function.
7415  VkResult AllocateMemory(
7416  const VkMemoryRequirements& vkMemReq,
7417  bool requiresDedicatedAllocation,
7418  bool prefersDedicatedAllocation,
7419  VkBuffer dedicatedBuffer,
7420  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7421  VkImage dedicatedImage,
7422  const VmaAllocationCreateInfo& createInfo,
7423  VmaSuballocationType suballocType,
7424  size_t allocationCount,
7425  VmaAllocation* pAllocations);
7426 
7427  // Main deallocation function.
7428  void FreeMemory(
7429  size_t allocationCount,
7430  const VmaAllocation* pAllocations);
7431 
7432  VkResult ResizeAllocation(
7433  const VmaAllocation alloc,
7434  VkDeviceSize newSize);
7435 
7436  void CalculateStats(VmaStats* pStats);
7437 
7438  void GetBudget(
7439  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7440 
7441 #if VMA_STATS_STRING_ENABLED
7442  void PrintDetailedMap(class VmaJsonWriter& json);
7443 #endif
7444 
7445  VkResult DefragmentationBegin(
7446  const VmaDefragmentationInfo2& info,
7447  VmaDefragmentationStats* pStats,
7448  VmaDefragmentationContext* pContext);
7449  VkResult DefragmentationEnd(
7450  VmaDefragmentationContext context);
7451 
7452  VkResult DefragmentationPassBegin(
7454  VmaDefragmentationContext context);
7455  VkResult DefragmentationPassEnd(
7456  VmaDefragmentationContext context);
7457 
7458  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7459  bool TouchAllocation(VmaAllocation hAllocation);
7460 
7461  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7462  void DestroyPool(VmaPool pool);
7463  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7464 
7465  void SetCurrentFrameIndex(uint32_t frameIndex);
7466  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7467 
7468  void MakePoolAllocationsLost(
7469  VmaPool hPool,
7470  size_t* pLostAllocationCount);
7471  VkResult CheckPoolCorruption(VmaPool hPool);
7472  VkResult CheckCorruption(uint32_t memoryTypeBits);
7473 
7474  void CreateLostAllocation(VmaAllocation* pAllocation);
7475 
7476  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7477  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7478  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7479  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7480  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7481  VkResult BindVulkanBuffer(
7482  VkDeviceMemory memory,
7483  VkDeviceSize memoryOffset,
7484  VkBuffer buffer,
7485  const void* pNext);
7486  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7487  VkResult BindVulkanImage(
7488  VkDeviceMemory memory,
7489  VkDeviceSize memoryOffset,
7490  VkImage image,
7491  const void* pNext);
7492 
7493  VkResult Map(VmaAllocation hAllocation, void** ppData);
7494  void Unmap(VmaAllocation hAllocation);
7495 
7496  VkResult BindBufferMemory(
7497  VmaAllocation hAllocation,
7498  VkDeviceSize allocationLocalOffset,
7499  VkBuffer hBuffer,
7500  const void* pNext);
7501  VkResult BindImageMemory(
7502  VmaAllocation hAllocation,
7503  VkDeviceSize allocationLocalOffset,
7504  VkImage hImage,
7505  const void* pNext);
7506 
7507  void FlushOrInvalidateAllocation(
7508  VmaAllocation hAllocation,
7509  VkDeviceSize offset, VkDeviceSize size,
7510  VMA_CACHE_OPERATION op);
7511 
7512  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7513 
7514  /*
7515  Returns bit mask of memory types that can support defragmentation on GPU as
7516  they support creation of required buffer for copy operations.
7517  */
7518  uint32_t GetGpuDefragmentationMemoryTypeBits();
7519 
7520 private:
7521  VkDeviceSize m_PreferredLargeHeapBlockSize;
7522 
7523  VkPhysicalDevice m_PhysicalDevice;
7524  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7525  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7526 
7527  VMA_RW_MUTEX m_PoolsMutex;
7528  // Protected by m_PoolsMutex. Sorted by pointer value.
7529  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7530  uint32_t m_NextPoolId;
7531 
7532  VmaVulkanFunctions m_VulkanFunctions;
7533 
7534  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7535  uint32_t m_GlobalMemoryTypeBits;
7536 
7537 #if VMA_RECORDING_ENABLED
7538  VmaRecorder* m_pRecorder;
7539 #endif
7540 
7541  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7542 
7543  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7544 
7545  VkResult AllocateMemoryOfType(
7546  VkDeviceSize size,
7547  VkDeviceSize alignment,
7548  bool dedicatedAllocation,
7549  VkBuffer dedicatedBuffer,
7550  VkBufferUsageFlags dedicatedBufferUsage,
7551  VkImage dedicatedImage,
7552  const VmaAllocationCreateInfo& createInfo,
7553  uint32_t memTypeIndex,
7554  VmaSuballocationType suballocType,
7555  size_t allocationCount,
7556  VmaAllocation* pAllocations);
7557 
7558  // Helper function only to be used inside AllocateDedicatedMemory.
7559  VkResult AllocateDedicatedMemoryPage(
7560  VkDeviceSize size,
7561  VmaSuballocationType suballocType,
7562  uint32_t memTypeIndex,
7563  const VkMemoryAllocateInfo& allocInfo,
7564  bool map,
7565  bool isUserDataString,
7566  void* pUserData,
7567  VmaAllocation* pAllocation);
7568 
7569  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7570  VkResult AllocateDedicatedMemory(
7571  VkDeviceSize size,
7572  VmaSuballocationType suballocType,
7573  uint32_t memTypeIndex,
7574  bool withinBudget,
7575  bool map,
7576  bool isUserDataString,
7577  void* pUserData,
7578  VkBuffer dedicatedBuffer,
7579  VkBufferUsageFlags dedicatedBufferUsage,
7580  VkImage dedicatedImage,
7581  size_t allocationCount,
7582  VmaAllocation* pAllocations);
7583 
7584  void FreeDedicatedMemory(const VmaAllocation allocation);
7585 
7586  /*
7587  Calculates and returns bit mask of memory types that can support defragmentation
7588  on GPU as they support creation of required buffer for copy operations.
7589  */
7590  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7591 
7592  uint32_t CalculateGlobalMemoryTypeBits() const;
7593 
7594 #if VMA_MEMORY_BUDGET
7595  void UpdateVulkanBudget();
7596 #endif // #if VMA_MEMORY_BUDGET
7597 };
7598 
7600 // Memory allocation #2 after VmaAllocator_T definition
7601 
7602 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7603 {
7604  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7605 }
7606 
7607 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7608 {
7609  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7610 }
7611 
7612 template<typename T>
7613 static T* VmaAllocate(VmaAllocator hAllocator)
7614 {
7615  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7616 }
7617 
7618 template<typename T>
7619 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7620 {
7621  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7622 }
7623 
7624 template<typename T>
7625 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7626 {
7627  if(ptr != VMA_NULL)
7628  {
7629  ptr->~T();
7630  VmaFree(hAllocator, ptr);
7631  }
7632 }
7633 
7634 template<typename T>
7635 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7636 {
7637  if(ptr != VMA_NULL)
7638  {
7639  for(size_t i = count; i--; )
7640  ptr[i].~T();
7641  VmaFree(hAllocator, ptr);
7642  }
7643 }
7644 
7646 // VmaStringBuilder
7647 
7648 #if VMA_STATS_STRING_ENABLED
7649 
7650 class VmaStringBuilder
7651 {
7652 public:
7653  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7654  size_t GetLength() const { return m_Data.size(); }
7655  const char* GetData() const { return m_Data.data(); }
7656 
7657  void Add(char ch) { m_Data.push_back(ch); }
7658  void Add(const char* pStr);
7659  void AddNewLine() { Add('\n'); }
7660  void AddNumber(uint32_t num);
7661  void AddNumber(uint64_t num);
7662  void AddPointer(const void* ptr);
7663 
7664 private:
7665  VmaVector< char, VmaStlAllocator<char> > m_Data;
7666 };
7667 
7668 void VmaStringBuilder::Add(const char* pStr)
7669 {
7670  const size_t strLen = strlen(pStr);
7671  if(strLen > 0)
7672  {
7673  const size_t oldCount = m_Data.size();
7674  m_Data.resize(oldCount + strLen);
7675  memcpy(m_Data.data() + oldCount, pStr, strLen);
7676  }
7677 }
7678 
7679 void VmaStringBuilder::AddNumber(uint32_t num)
7680 {
7681  char buf[11];
7682  buf[10] = '\0';
7683  char *p = &buf[10];
7684  do
7685  {
7686  *--p = '0' + (num % 10);
7687  num /= 10;
7688  }
7689  while(num);
7690  Add(p);
7691 }
7692 
7693 void VmaStringBuilder::AddNumber(uint64_t num)
7694 {
7695  char buf[21];
7696  buf[20] = '\0';
7697  char *p = &buf[20];
7698  do
7699  {
7700  *--p = '0' + (num % 10);
7701  num /= 10;
7702  }
7703  while(num);
7704  Add(p);
7705 }
7706 
7707 void VmaStringBuilder::AddPointer(const void* ptr)
7708 {
7709  char buf[21];
7710  VmaPtrToStr(buf, sizeof(buf), ptr);
7711  Add(buf);
7712 }
7713 
7714 #endif // #if VMA_STATS_STRING_ENABLED
7715 
7717 // VmaJsonWriter
7718 
7719 #if VMA_STATS_STRING_ENABLED
7720 
7721 class VmaJsonWriter
7722 {
7723  VMA_CLASS_NO_COPY(VmaJsonWriter)
7724 public:
7725  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7726  ~VmaJsonWriter();
7727 
7728  void BeginObject(bool singleLine = false);
7729  void EndObject();
7730 
7731  void BeginArray(bool singleLine = false);
7732  void EndArray();
7733 
7734  void WriteString(const char* pStr);
7735  void BeginString(const char* pStr = VMA_NULL);
7736  void ContinueString(const char* pStr);
7737  void ContinueString(uint32_t n);
7738  void ContinueString(uint64_t n);
7739  void ContinueString_Pointer(const void* ptr);
7740  void EndString(const char* pStr = VMA_NULL);
7741 
7742  void WriteNumber(uint32_t n);
7743  void WriteNumber(uint64_t n);
7744  void WriteBool(bool b);
7745  void WriteNull();
7746 
7747 private:
7748  static const char* const INDENT;
7749 
7750  enum COLLECTION_TYPE
7751  {
7752  COLLECTION_TYPE_OBJECT,
7753  COLLECTION_TYPE_ARRAY,
7754  };
7755  struct StackItem
7756  {
7757  COLLECTION_TYPE type;
7758  uint32_t valueCount;
7759  bool singleLineMode;
7760  };
7761 
7762  VmaStringBuilder& m_SB;
7763  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7764  bool m_InsideString;
7765 
7766  void BeginValue(bool isString);
7767  void WriteIndent(bool oneLess = false);
7768 };
7769 
7770 const char* const VmaJsonWriter::INDENT = " ";
7771 
7772 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7773  m_SB(sb),
7774  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7775  m_InsideString(false)
7776 {
7777 }
7778 
7779 VmaJsonWriter::~VmaJsonWriter()
7780 {
7781  VMA_ASSERT(!m_InsideString);
7782  VMA_ASSERT(m_Stack.empty());
7783 }
7784 
7785 void VmaJsonWriter::BeginObject(bool singleLine)
7786 {
7787  VMA_ASSERT(!m_InsideString);
7788 
7789  BeginValue(false);
7790  m_SB.Add('{');
7791 
7792  StackItem item;
7793  item.type = COLLECTION_TYPE_OBJECT;
7794  item.valueCount = 0;
7795  item.singleLineMode = singleLine;
7796  m_Stack.push_back(item);
7797 }
7798 
7799 void VmaJsonWriter::EndObject()
7800 {
7801  VMA_ASSERT(!m_InsideString);
7802 
7803  WriteIndent(true);
7804  m_SB.Add('}');
7805 
7806  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7807  m_Stack.pop_back();
7808 }
7809 
7810 void VmaJsonWriter::BeginArray(bool singleLine)
7811 {
7812  VMA_ASSERT(!m_InsideString);
7813 
7814  BeginValue(false);
7815  m_SB.Add('[');
7816 
7817  StackItem item;
7818  item.type = COLLECTION_TYPE_ARRAY;
7819  item.valueCount = 0;
7820  item.singleLineMode = singleLine;
7821  m_Stack.push_back(item);
7822 }
7823 
7824 void VmaJsonWriter::EndArray()
7825 {
7826  VMA_ASSERT(!m_InsideString);
7827 
7828  WriteIndent(true);
7829  m_SB.Add(']');
7830 
7831  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7832  m_Stack.pop_back();
7833 }
7834 
7835 void VmaJsonWriter::WriteString(const char* pStr)
7836 {
7837  BeginString(pStr);
7838  EndString();
7839 }
7840 
7841 void VmaJsonWriter::BeginString(const char* pStr)
7842 {
7843  VMA_ASSERT(!m_InsideString);
7844 
7845  BeginValue(true);
7846  m_SB.Add('"');
7847  m_InsideString = true;
7848  if(pStr != VMA_NULL && pStr[0] != '\0')
7849  {
7850  ContinueString(pStr);
7851  }
7852 }
7853 
7854 void VmaJsonWriter::ContinueString(const char* pStr)
7855 {
7856  VMA_ASSERT(m_InsideString);
7857 
7858  const size_t strLen = strlen(pStr);
7859  for(size_t i = 0; i < strLen; ++i)
7860  {
7861  char ch = pStr[i];
7862  if(ch == '\\')
7863  {
7864  m_SB.Add("\\\\");
7865  }
7866  else if(ch == '"')
7867  {
7868  m_SB.Add("\\\"");
7869  }
7870  else if(ch >= 32)
7871  {
7872  m_SB.Add(ch);
7873  }
7874  else switch(ch)
7875  {
7876  case '\b':
7877  m_SB.Add("\\b");
7878  break;
7879  case '\f':
7880  m_SB.Add("\\f");
7881  break;
7882  case '\n':
7883  m_SB.Add("\\n");
7884  break;
7885  case '\r':
7886  m_SB.Add("\\r");
7887  break;
7888  case '\t':
7889  m_SB.Add("\\t");
7890  break;
7891  default:
7892  VMA_ASSERT(0 && "Character not currently supported.");
7893  break;
7894  }
7895  }
7896 }
7897 
7898 void VmaJsonWriter::ContinueString(uint32_t n)
7899 {
7900  VMA_ASSERT(m_InsideString);
7901  m_SB.AddNumber(n);
7902 }
7903 
7904 void VmaJsonWriter::ContinueString(uint64_t n)
7905 {
7906  VMA_ASSERT(m_InsideString);
7907  m_SB.AddNumber(n);
7908 }
7909 
7910 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7911 {
7912  VMA_ASSERT(m_InsideString);
7913  m_SB.AddPointer(ptr);
7914 }
7915 
7916 void VmaJsonWriter::EndString(const char* pStr)
7917 {
7918  VMA_ASSERT(m_InsideString);
7919  if(pStr != VMA_NULL && pStr[0] != '\0')
7920  {
7921  ContinueString(pStr);
7922  }
7923  m_SB.Add('"');
7924  m_InsideString = false;
7925 }
7926 
7927 void VmaJsonWriter::WriteNumber(uint32_t n)
7928 {
7929  VMA_ASSERT(!m_InsideString);
7930  BeginValue(false);
7931  m_SB.AddNumber(n);
7932 }
7933 
7934 void VmaJsonWriter::WriteNumber(uint64_t n)
7935 {
7936  VMA_ASSERT(!m_InsideString);
7937  BeginValue(false);
7938  m_SB.AddNumber(n);
7939 }
7940 
7941 void VmaJsonWriter::WriteBool(bool b)
7942 {
7943  VMA_ASSERT(!m_InsideString);
7944  BeginValue(false);
7945  m_SB.Add(b ? "true" : "false");
7946 }
7947 
7948 void VmaJsonWriter::WriteNull()
7949 {
7950  VMA_ASSERT(!m_InsideString);
7951  BeginValue(false);
7952  m_SB.Add("null");
7953 }
7954 
7955 void VmaJsonWriter::BeginValue(bool isString)
7956 {
7957  if(!m_Stack.empty())
7958  {
7959  StackItem& currItem = m_Stack.back();
7960  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7961  currItem.valueCount % 2 == 0)
7962  {
7963  VMA_ASSERT(isString);
7964  }
7965 
7966  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7967  currItem.valueCount % 2 != 0)
7968  {
7969  m_SB.Add(": ");
7970  }
7971  else if(currItem.valueCount > 0)
7972  {
7973  m_SB.Add(", ");
7974  WriteIndent();
7975  }
7976  else
7977  {
7978  WriteIndent();
7979  }
7980  ++currItem.valueCount;
7981  }
7982 }
7983 
7984 void VmaJsonWriter::WriteIndent(bool oneLess)
7985 {
7986  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7987  {
7988  m_SB.AddNewLine();
7989 
7990  size_t count = m_Stack.size();
7991  if(count > 0 && oneLess)
7992  {
7993  --count;
7994  }
7995  for(size_t i = 0; i < count; ++i)
7996  {
7997  m_SB.Add(INDENT);
7998  }
7999  }
8000 }
8001 
8002 #endif // #if VMA_STATS_STRING_ENABLED
8003 
8005 
8006 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8007 {
8008  if(IsUserDataString())
8009  {
8010  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8011 
8012  FreeUserDataString(hAllocator);
8013 
8014  if(pUserData != VMA_NULL)
8015  {
8016  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8017  }
8018  }
8019  else
8020  {
8021  m_pUserData = pUserData;
8022  }
8023 }
8024 
8025 void VmaAllocation_T::ChangeBlockAllocation(
8026  VmaAllocator hAllocator,
8027  VmaDeviceMemoryBlock* block,
8028  VkDeviceSize offset)
8029 {
8030  VMA_ASSERT(block != VMA_NULL);
8031  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8032 
8033  // Move mapping reference counter from old block to new block.
8034  if(block != m_BlockAllocation.m_Block)
8035  {
8036  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8037  if(IsPersistentMap())
8038  ++mapRefCount;
8039  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8040  block->Map(hAllocator, mapRefCount, VMA_NULL);
8041  }
8042 
8043  m_BlockAllocation.m_Block = block;
8044  m_BlockAllocation.m_Offset = offset;
8045 }
8046 
8047 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8048 {
8049  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8050  m_BlockAllocation.m_Offset = newOffset;
8051 }
8052 
8053 VkDeviceSize VmaAllocation_T::GetOffset() const
8054 {
8055  switch(m_Type)
8056  {
8057  case ALLOCATION_TYPE_BLOCK:
8058  return m_BlockAllocation.m_Offset;
8059  case ALLOCATION_TYPE_DEDICATED:
8060  return 0;
8061  default:
8062  VMA_ASSERT(0);
8063  return 0;
8064  }
8065 }
8066 
8067 VkDeviceMemory VmaAllocation_T::GetMemory() const
8068 {
8069  switch(m_Type)
8070  {
8071  case ALLOCATION_TYPE_BLOCK:
8072  return m_BlockAllocation.m_Block->GetDeviceMemory();
8073  case ALLOCATION_TYPE_DEDICATED:
8074  return m_DedicatedAllocation.m_hMemory;
8075  default:
8076  VMA_ASSERT(0);
8077  return VK_NULL_HANDLE;
8078  }
8079 }
8080 
8081 void* VmaAllocation_T::GetMappedData() const
8082 {
8083  switch(m_Type)
8084  {
8085  case ALLOCATION_TYPE_BLOCK:
8086  if(m_MapCount != 0)
8087  {
8088  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8089  VMA_ASSERT(pBlockData != VMA_NULL);
8090  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8091  }
8092  else
8093  {
8094  return VMA_NULL;
8095  }
8096  break;
8097  case ALLOCATION_TYPE_DEDICATED:
8098  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8099  return m_DedicatedAllocation.m_pMappedData;
8100  default:
8101  VMA_ASSERT(0);
8102  return VMA_NULL;
8103  }
8104 }
8105 
8106 bool VmaAllocation_T::CanBecomeLost() const
8107 {
8108  switch(m_Type)
8109  {
8110  case ALLOCATION_TYPE_BLOCK:
8111  return m_BlockAllocation.m_CanBecomeLost;
8112  case ALLOCATION_TYPE_DEDICATED:
8113  return false;
8114  default:
8115  VMA_ASSERT(0);
8116  return false;
8117  }
8118 }
8119 
8120 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8121 {
8122  VMA_ASSERT(CanBecomeLost());
8123 
8124  /*
8125  Warning: This is a carefully designed algorithm.
8126  Do not modify unless you really know what you're doing :)
8127  */
8128  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8129  for(;;)
8130  {
8131  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8132  {
8133  VMA_ASSERT(0);
8134  return false;
8135  }
8136  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8137  {
8138  return false;
8139  }
8140  else // Last use time earlier than current time.
8141  {
8142  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8143  {
8144  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8145  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8146  return true;
8147  }
8148  }
8149  }
8150 }
8151 
8152 #if VMA_STATS_STRING_ENABLED
8153 
8154 // Correspond to values of enum VmaSuballocationType.
8155 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8156  "FREE",
8157  "UNKNOWN",
8158  "BUFFER",
8159  "IMAGE_UNKNOWN",
8160  "IMAGE_LINEAR",
8161  "IMAGE_OPTIMAL",
8162 };
8163 
8164 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8165 {
8166  json.WriteString("Type");
8167  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8168 
8169  json.WriteString("Size");
8170  json.WriteNumber(m_Size);
8171 
8172  if(m_pUserData != VMA_NULL)
8173  {
8174  json.WriteString("UserData");
8175  if(IsUserDataString())
8176  {
8177  json.WriteString((const char*)m_pUserData);
8178  }
8179  else
8180  {
8181  json.BeginString();
8182  json.ContinueString_Pointer(m_pUserData);
8183  json.EndString();
8184  }
8185  }
8186 
8187  json.WriteString("CreationFrameIndex");
8188  json.WriteNumber(m_CreationFrameIndex);
8189 
8190  json.WriteString("LastUseFrameIndex");
8191  json.WriteNumber(GetLastUseFrameIndex());
8192 
8193  if(m_BufferImageUsage != 0)
8194  {
8195  json.WriteString("Usage");
8196  json.WriteNumber(m_BufferImageUsage);
8197  }
8198 }
8199 
8200 #endif
8201 
8202 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8203 {
8204  VMA_ASSERT(IsUserDataString());
8205  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8206  m_pUserData = VMA_NULL;
8207 }
8208 
8209 void VmaAllocation_T::BlockAllocMap()
8210 {
8211  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8212 
8213  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8214  {
8215  ++m_MapCount;
8216  }
8217  else
8218  {
8219  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8220  }
8221 }
8222 
8223 void VmaAllocation_T::BlockAllocUnmap()
8224 {
8225  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8226 
8227  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8228  {
8229  --m_MapCount;
8230  }
8231  else
8232  {
8233  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8234  }
8235 }
8236 
8237 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8238 {
8239  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8240 
8241  if(m_MapCount != 0)
8242  {
8243  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8244  {
8245  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8246  *ppData = m_DedicatedAllocation.m_pMappedData;
8247  ++m_MapCount;
8248  return VK_SUCCESS;
8249  }
8250  else
8251  {
8252  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8253  return VK_ERROR_MEMORY_MAP_FAILED;
8254  }
8255  }
8256  else
8257  {
8258  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8259  hAllocator->m_hDevice,
8260  m_DedicatedAllocation.m_hMemory,
8261  0, // offset
8262  VK_WHOLE_SIZE,
8263  0, // flags
8264  ppData);
8265  if(result == VK_SUCCESS)
8266  {
8267  m_DedicatedAllocation.m_pMappedData = *ppData;
8268  m_MapCount = 1;
8269  }
8270  return result;
8271  }
8272 }
8273 
8274 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8275 {
8276  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8277 
8278  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8279  {
8280  --m_MapCount;
8281  if(m_MapCount == 0)
8282  {
8283  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8284  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8285  hAllocator->m_hDevice,
8286  m_DedicatedAllocation.m_hMemory);
8287  }
8288  }
8289  else
8290  {
8291  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8292  }
8293 }
8294 
8295 #if VMA_STATS_STRING_ENABLED
8296 
8297 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8298 {
8299  json.BeginObject();
8300 
8301  json.WriteString("Blocks");
8302  json.WriteNumber(stat.blockCount);
8303 
8304  json.WriteString("Allocations");
8305  json.WriteNumber(stat.allocationCount);
8306 
8307  json.WriteString("UnusedRanges");
8308  json.WriteNumber(stat.unusedRangeCount);
8309 
8310  json.WriteString("UsedBytes");
8311  json.WriteNumber(stat.usedBytes);
8312 
8313  json.WriteString("UnusedBytes");
8314  json.WriteNumber(stat.unusedBytes);
8315 
8316  if(stat.allocationCount > 1)
8317  {
8318  json.WriteString("AllocationSize");
8319  json.BeginObject(true);
8320  json.WriteString("Min");
8321  json.WriteNumber(stat.allocationSizeMin);
8322  json.WriteString("Avg");
8323  json.WriteNumber(stat.allocationSizeAvg);
8324  json.WriteString("Max");
8325  json.WriteNumber(stat.allocationSizeMax);
8326  json.EndObject();
8327  }
8328 
8329  if(stat.unusedRangeCount > 1)
8330  {
8331  json.WriteString("UnusedRangeSize");
8332  json.BeginObject(true);
8333  json.WriteString("Min");
8334  json.WriteNumber(stat.unusedRangeSizeMin);
8335  json.WriteString("Avg");
8336  json.WriteNumber(stat.unusedRangeSizeAvg);
8337  json.WriteString("Max");
8338  json.WriteNumber(stat.unusedRangeSizeMax);
8339  json.EndObject();
8340  }
8341 
8342  json.EndObject();
8343 }
8344 
8345 #endif // #if VMA_STATS_STRING_ENABLED
8346 
8347 struct VmaSuballocationItemSizeLess
8348 {
8349  bool operator()(
8350  const VmaSuballocationList::iterator lhs,
8351  const VmaSuballocationList::iterator rhs) const
8352  {
8353  return lhs->size < rhs->size;
8354  }
8355  bool operator()(
8356  const VmaSuballocationList::iterator lhs,
8357  VkDeviceSize rhsSize) const
8358  {
8359  return lhs->size < rhsSize;
8360  }
8361 };
8362 
8363 
8365 // class VmaBlockMetadata
8366 
8367 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8368  m_Size(0),
8369  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8370 {
8371 }
8372 
8373 #if VMA_STATS_STRING_ENABLED
8374 
8375 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8376  VkDeviceSize unusedBytes,
8377  size_t allocationCount,
8378  size_t unusedRangeCount) const
8379 {
8380  json.BeginObject();
8381 
8382  json.WriteString("TotalBytes");
8383  json.WriteNumber(GetSize());
8384 
8385  json.WriteString("UnusedBytes");
8386  json.WriteNumber(unusedBytes);
8387 
8388  json.WriteString("Allocations");
8389  json.WriteNumber((uint64_t)allocationCount);
8390 
8391  json.WriteString("UnusedRanges");
8392  json.WriteNumber((uint64_t)unusedRangeCount);
8393 
8394  json.WriteString("Suballocations");
8395  json.BeginArray();
8396 }
8397 
8398 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8399  VkDeviceSize offset,
8400  VmaAllocation hAllocation) const
8401 {
8402  json.BeginObject(true);
8403 
8404  json.WriteString("Offset");
8405  json.WriteNumber(offset);
8406 
8407  hAllocation->PrintParameters(json);
8408 
8409  json.EndObject();
8410 }
8411 
8412 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8413  VkDeviceSize offset,
8414  VkDeviceSize size) const
8415 {
8416  json.BeginObject(true);
8417 
8418  json.WriteString("Offset");
8419  json.WriteNumber(offset);
8420 
8421  json.WriteString("Type");
8422  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8423 
8424  json.WriteString("Size");
8425  json.WriteNumber(size);
8426 
8427  json.EndObject();
8428 }
8429 
8430 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8431 {
8432  json.EndArray();
8433  json.EndObject();
8434 }
8435 
8436 #endif // #if VMA_STATS_STRING_ENABLED
8437 
8439 // class VmaBlockMetadata_Generic
8440 
8441 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8442  VmaBlockMetadata(hAllocator),
8443  m_FreeCount(0),
8444  m_SumFreeSize(0),
8445  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8446  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8447 {
8448 }
8449 
8450 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8451 {
8452 }
8453 
8454 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8455 {
8456  VmaBlockMetadata::Init(size);
8457 
8458  m_FreeCount = 1;
8459  m_SumFreeSize = size;
8460 
8461  VmaSuballocation suballoc = {};
8462  suballoc.offset = 0;
8463  suballoc.size = size;
8464  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8465  suballoc.hAllocation = VK_NULL_HANDLE;
8466 
8467  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8468  m_Suballocations.push_back(suballoc);
8469  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8470  --suballocItem;
8471  m_FreeSuballocationsBySize.push_back(suballocItem);
8472 }
8473 
8474 bool VmaBlockMetadata_Generic::Validate() const
8475 {
8476  VMA_VALIDATE(!m_Suballocations.empty());
8477 
8478  // Expected offset of new suballocation as calculated from previous ones.
8479  VkDeviceSize calculatedOffset = 0;
8480  // Expected number of free suballocations as calculated from traversing their list.
8481  uint32_t calculatedFreeCount = 0;
8482  // Expected sum size of free suballocations as calculated from traversing their list.
8483  VkDeviceSize calculatedSumFreeSize = 0;
8484  // Expected number of free suballocations that should be registered in
8485  // m_FreeSuballocationsBySize calculated from traversing their list.
8486  size_t freeSuballocationsToRegister = 0;
8487  // True if previous visited suballocation was free.
8488  bool prevFree = false;
8489 
8490  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8491  suballocItem != m_Suballocations.cend();
8492  ++suballocItem)
8493  {
8494  const VmaSuballocation& subAlloc = *suballocItem;
8495 
8496  // Actual offset of this suballocation doesn't match expected one.
8497  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8498 
8499  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8500  // Two adjacent free suballocations are invalid. They should be merged.
8501  VMA_VALIDATE(!prevFree || !currFree);
8502 
8503  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8504 
8505  if(currFree)
8506  {
8507  calculatedSumFreeSize += subAlloc.size;
8508  ++calculatedFreeCount;
8509  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8510  {
8511  ++freeSuballocationsToRegister;
8512  }
8513 
8514  // Margin required between allocations - every free space must be at least that large.
8515  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8516  }
8517  else
8518  {
8519  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8520  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8521 
8522  // Margin required between allocations - previous allocation must be free.
8523  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8524  }
8525 
8526  calculatedOffset += subAlloc.size;
8527  prevFree = currFree;
8528  }
8529 
8530  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8531  // match expected one.
8532  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8533 
8534  VkDeviceSize lastSize = 0;
8535  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8536  {
8537  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8538 
8539  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8540  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8541  // They must be sorted by size ascending.
8542  VMA_VALIDATE(suballocItem->size >= lastSize);
8543 
8544  lastSize = suballocItem->size;
8545  }
8546 
8547  // Check if totals match calculacted values.
8548  VMA_VALIDATE(ValidateFreeSuballocationList());
8549  VMA_VALIDATE(calculatedOffset == GetSize());
8550  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8551  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8552 
8553  return true;
8554 }
8555 
8556 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8557 {
8558  if(!m_FreeSuballocationsBySize.empty())
8559  {
8560  return m_FreeSuballocationsBySize.back()->size;
8561  }
8562  else
8563  {
8564  return 0;
8565  }
8566 }
8567 
8568 bool VmaBlockMetadata_Generic::IsEmpty() const
8569 {
8570  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8571 }
8572 
8573 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8574 {
8575  outInfo.blockCount = 1;
8576 
8577  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8578  outInfo.allocationCount = rangeCount - m_FreeCount;
8579  outInfo.unusedRangeCount = m_FreeCount;
8580 
8581  outInfo.unusedBytes = m_SumFreeSize;
8582  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8583 
8584  outInfo.allocationSizeMin = UINT64_MAX;
8585  outInfo.allocationSizeMax = 0;
8586  outInfo.unusedRangeSizeMin = UINT64_MAX;
8587  outInfo.unusedRangeSizeMax = 0;
8588 
8589  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8590  suballocItem != m_Suballocations.cend();
8591  ++suballocItem)
8592  {
8593  const VmaSuballocation& suballoc = *suballocItem;
8594  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8595  {
8596  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8597  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8598  }
8599  else
8600  {
8601  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8602  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8603  }
8604  }
8605 }
8606 
8607 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8608 {
8609  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8610 
8611  inoutStats.size += GetSize();
8612  inoutStats.unusedSize += m_SumFreeSize;
8613  inoutStats.allocationCount += rangeCount - m_FreeCount;
8614  inoutStats.unusedRangeCount += m_FreeCount;
8615  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8616 }
8617 
8618 #if VMA_STATS_STRING_ENABLED
8619 
8620 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8621 {
8622  PrintDetailedMap_Begin(json,
8623  m_SumFreeSize, // unusedBytes
8624  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8625  m_FreeCount); // unusedRangeCount
8626 
8627  size_t i = 0;
8628  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8629  suballocItem != m_Suballocations.cend();
8630  ++suballocItem, ++i)
8631  {
8632  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8633  {
8634  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8635  }
8636  else
8637  {
8638  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8639  }
8640  }
8641 
8642  PrintDetailedMap_End(json);
8643 }
8644 
8645 #endif // #if VMA_STATS_STRING_ENABLED
8646 
8647 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8648  uint32_t currentFrameIndex,
8649  uint32_t frameInUseCount,
8650  VkDeviceSize bufferImageGranularity,
8651  VkDeviceSize allocSize,
8652  VkDeviceSize allocAlignment,
8653  bool upperAddress,
8654  VmaSuballocationType allocType,
8655  bool canMakeOtherLost,
8656  uint32_t strategy,
8657  VmaAllocationRequest* pAllocationRequest)
8658 {
8659  VMA_ASSERT(allocSize > 0);
8660  VMA_ASSERT(!upperAddress);
8661  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8662  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8663  VMA_HEAVY_ASSERT(Validate());
8664 
8665  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8666 
8667  // There is not enough total free space in this block to fullfill the request: Early return.
8668  if(canMakeOtherLost == false &&
8669  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8670  {
8671  return false;
8672  }
8673 
8674  // New algorithm, efficiently searching freeSuballocationsBySize.
8675  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8676  if(freeSuballocCount > 0)
8677  {
8679  {
8680  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8681  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8682  m_FreeSuballocationsBySize.data(),
8683  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8684  allocSize + 2 * VMA_DEBUG_MARGIN,
8685  VmaSuballocationItemSizeLess());
8686  size_t index = it - m_FreeSuballocationsBySize.data();
8687  for(; index < freeSuballocCount; ++index)
8688  {
8689  if(CheckAllocation(
8690  currentFrameIndex,
8691  frameInUseCount,
8692  bufferImageGranularity,
8693  allocSize,
8694  allocAlignment,
8695  allocType,
8696  m_FreeSuballocationsBySize[index],
8697  false, // canMakeOtherLost
8698  &pAllocationRequest->offset,
8699  &pAllocationRequest->itemsToMakeLostCount,
8700  &pAllocationRequest->sumFreeSize,
8701  &pAllocationRequest->sumItemSize))
8702  {
8703  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8704  return true;
8705  }
8706  }
8707  }
8708  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8709  {
8710  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8711  it != m_Suballocations.end();
8712  ++it)
8713  {
8714  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8715  currentFrameIndex,
8716  frameInUseCount,
8717  bufferImageGranularity,
8718  allocSize,
8719  allocAlignment,
8720  allocType,
8721  it,
8722  false, // canMakeOtherLost
8723  &pAllocationRequest->offset,
8724  &pAllocationRequest->itemsToMakeLostCount,
8725  &pAllocationRequest->sumFreeSize,
8726  &pAllocationRequest->sumItemSize))
8727  {
8728  pAllocationRequest->item = it;
8729  return true;
8730  }
8731  }
8732  }
8733  else // WORST_FIT, FIRST_FIT
8734  {
8735  // Search staring from biggest suballocations.
8736  for(size_t index = freeSuballocCount; index--; )
8737  {
8738  if(CheckAllocation(
8739  currentFrameIndex,
8740  frameInUseCount,
8741  bufferImageGranularity,
8742  allocSize,
8743  allocAlignment,
8744  allocType,
8745  m_FreeSuballocationsBySize[index],
8746  false, // canMakeOtherLost
8747  &pAllocationRequest->offset,
8748  &pAllocationRequest->itemsToMakeLostCount,
8749  &pAllocationRequest->sumFreeSize,
8750  &pAllocationRequest->sumItemSize))
8751  {
8752  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8753  return true;
8754  }
8755  }
8756  }
8757  }
8758 
8759  if(canMakeOtherLost)
8760  {
8761  // Brute-force algorithm. TODO: Come up with something better.
8762 
8763  bool found = false;
8764  VmaAllocationRequest tmpAllocRequest = {};
8765  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8766  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8767  suballocIt != m_Suballocations.end();
8768  ++suballocIt)
8769  {
8770  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8771  suballocIt->hAllocation->CanBecomeLost())
8772  {
8773  if(CheckAllocation(
8774  currentFrameIndex,
8775  frameInUseCount,
8776  bufferImageGranularity,
8777  allocSize,
8778  allocAlignment,
8779  allocType,
8780  suballocIt,
8781  canMakeOtherLost,
8782  &tmpAllocRequest.offset,
8783  &tmpAllocRequest.itemsToMakeLostCount,
8784  &tmpAllocRequest.sumFreeSize,
8785  &tmpAllocRequest.sumItemSize))
8786  {
8788  {
8789  *pAllocationRequest = tmpAllocRequest;
8790  pAllocationRequest->item = suballocIt;
8791  break;
8792  }
8793  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8794  {
8795  *pAllocationRequest = tmpAllocRequest;
8796  pAllocationRequest->item = suballocIt;
8797  found = true;
8798  }
8799  }
8800  }
8801  }
8802 
8803  return found;
8804  }
8805 
8806  return false;
8807 }
8808 
8809 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8810  uint32_t currentFrameIndex,
8811  uint32_t frameInUseCount,
8812  VmaAllocationRequest* pAllocationRequest)
8813 {
8814  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8815 
8816  while(pAllocationRequest->itemsToMakeLostCount > 0)
8817  {
8818  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8819  {
8820  ++pAllocationRequest->item;
8821  }
8822  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8823  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8824  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8825  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8826  {
8827  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8828  --pAllocationRequest->itemsToMakeLostCount;
8829  }
8830  else
8831  {
8832  return false;
8833  }
8834  }
8835 
8836  VMA_HEAVY_ASSERT(Validate());
8837  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8838  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8839 
8840  return true;
8841 }
8842 
8843 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8844 {
8845  uint32_t lostAllocationCount = 0;
8846  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8847  it != m_Suballocations.end();
8848  ++it)
8849  {
8850  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8851  it->hAllocation->CanBecomeLost() &&
8852  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8853  {
8854  it = FreeSuballocation(it);
8855  ++lostAllocationCount;
8856  }
8857  }
8858  return lostAllocationCount;
8859 }
8860 
8861 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8862 {
8863  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8864  it != m_Suballocations.end();
8865  ++it)
8866  {
8867  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8868  {
8869  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8870  {
8871  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8872  return VK_ERROR_VALIDATION_FAILED_EXT;
8873  }
8874  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8875  {
8876  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8877  return VK_ERROR_VALIDATION_FAILED_EXT;
8878  }
8879  }
8880  }
8881 
8882  return VK_SUCCESS;
8883 }
8884 
8885 void VmaBlockMetadata_Generic::Alloc(
8886  const VmaAllocationRequest& request,
8887  VmaSuballocationType type,
8888  VkDeviceSize allocSize,
8889  VmaAllocation hAllocation)
8890 {
8891  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8892  VMA_ASSERT(request.item != m_Suballocations.end());
8893  VmaSuballocation& suballoc = *request.item;
8894  // Given suballocation is a free block.
8895  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8896  // Given offset is inside this suballocation.
8897  VMA_ASSERT(request.offset >= suballoc.offset);
8898  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8899  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8900  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8901 
8902  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8903  // it to become used.
8904  UnregisterFreeSuballocation(request.item);
8905 
8906  suballoc.offset = request.offset;
8907  suballoc.size = allocSize;
8908  suballoc.type = type;
8909  suballoc.hAllocation = hAllocation;
8910 
8911  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8912  if(paddingEnd)
8913  {
8914  VmaSuballocation paddingSuballoc = {};
8915  paddingSuballoc.offset = request.offset + allocSize;
8916  paddingSuballoc.size = paddingEnd;
8917  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8918  VmaSuballocationList::iterator next = request.item;
8919  ++next;
8920  const VmaSuballocationList::iterator paddingEndItem =
8921  m_Suballocations.insert(next, paddingSuballoc);
8922  RegisterFreeSuballocation(paddingEndItem);
8923  }
8924 
8925  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8926  if(paddingBegin)
8927  {
8928  VmaSuballocation paddingSuballoc = {};
8929  paddingSuballoc.offset = request.offset - paddingBegin;
8930  paddingSuballoc.size = paddingBegin;
8931  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8932  const VmaSuballocationList::iterator paddingBeginItem =
8933  m_Suballocations.insert(request.item, paddingSuballoc);
8934  RegisterFreeSuballocation(paddingBeginItem);
8935  }
8936 
8937  // Update totals.
8938  m_FreeCount = m_FreeCount - 1;
8939  if(paddingBegin > 0)
8940  {
8941  ++m_FreeCount;
8942  }
8943  if(paddingEnd > 0)
8944  {
8945  ++m_FreeCount;
8946  }
8947  m_SumFreeSize -= allocSize;
8948 }
8949 
8950 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8951 {
8952  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8953  suballocItem != m_Suballocations.end();
8954  ++suballocItem)
8955  {
8956  VmaSuballocation& suballoc = *suballocItem;
8957  if(suballoc.hAllocation == allocation)
8958  {
8959  FreeSuballocation(suballocItem);
8960  VMA_HEAVY_ASSERT(Validate());
8961  return;
8962  }
8963  }
8964  VMA_ASSERT(0 && "Not found!");
8965 }
8966 
8967 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8968 {
8969  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8970  suballocItem != m_Suballocations.end();
8971  ++suballocItem)
8972  {
8973  VmaSuballocation& suballoc = *suballocItem;
8974  if(suballoc.offset == offset)
8975  {
8976  FreeSuballocation(suballocItem);
8977  return;
8978  }
8979  }
8980  VMA_ASSERT(0 && "Not found!");
8981 }
8982 
8983 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8984 {
8985  VkDeviceSize lastSize = 0;
8986  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8987  {
8988  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8989 
8990  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8991  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8992  VMA_VALIDATE(it->size >= lastSize);
8993  lastSize = it->size;
8994  }
8995  return true;
8996 }
8997 
8998 bool VmaBlockMetadata_Generic::CheckAllocation(
8999  uint32_t currentFrameIndex,
9000  uint32_t frameInUseCount,
9001  VkDeviceSize bufferImageGranularity,
9002  VkDeviceSize allocSize,
9003  VkDeviceSize allocAlignment,
9004  VmaSuballocationType allocType,
9005  VmaSuballocationList::const_iterator suballocItem,
9006  bool canMakeOtherLost,
9007  VkDeviceSize* pOffset,
9008  size_t* itemsToMakeLostCount,
9009  VkDeviceSize* pSumFreeSize,
9010  VkDeviceSize* pSumItemSize) const
9011 {
9012  VMA_ASSERT(allocSize > 0);
9013  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9014  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9015  VMA_ASSERT(pOffset != VMA_NULL);
9016 
9017  *itemsToMakeLostCount = 0;
9018  *pSumFreeSize = 0;
9019  *pSumItemSize = 0;
9020 
9021  if(canMakeOtherLost)
9022  {
9023  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9024  {
9025  *pSumFreeSize = suballocItem->size;
9026  }
9027  else
9028  {
9029  if(suballocItem->hAllocation->CanBecomeLost() &&
9030  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9031  {
9032  ++*itemsToMakeLostCount;
9033  *pSumItemSize = suballocItem->size;
9034  }
9035  else
9036  {
9037  return false;
9038  }
9039  }
9040 
9041  // Remaining size is too small for this request: Early return.
9042  if(GetSize() - suballocItem->offset < allocSize)
9043  {
9044  return false;
9045  }
9046 
9047  // Start from offset equal to beginning of this suballocation.
9048  *pOffset = suballocItem->offset;
9049 
9050  // Apply VMA_DEBUG_MARGIN at the beginning.
9051  if(VMA_DEBUG_MARGIN > 0)
9052  {
9053  *pOffset += VMA_DEBUG_MARGIN;
9054  }
9055 
9056  // Apply alignment.
9057  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9058 
9059  // Check previous suballocations for BufferImageGranularity conflicts.
9060  // Make bigger alignment if necessary.
9061  if(bufferImageGranularity > 1)
9062  {
9063  bool bufferImageGranularityConflict = false;
9064  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9065  while(prevSuballocItem != m_Suballocations.cbegin())
9066  {
9067  --prevSuballocItem;
9068  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9069  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9070  {
9071  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9072  {
9073  bufferImageGranularityConflict = true;
9074  break;
9075  }
9076  }
9077  else
9078  // Already on previous page.
9079  break;
9080  }
9081  if(bufferImageGranularityConflict)
9082  {
9083  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9084  }
9085  }
9086 
9087  // Now that we have final *pOffset, check if we are past suballocItem.
9088  // If yes, return false - this function should be called for another suballocItem as starting point.
9089  if(*pOffset >= suballocItem->offset + suballocItem->size)
9090  {
9091  return false;
9092  }
9093 
9094  // Calculate padding at the beginning based on current offset.
9095  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9096 
9097  // Calculate required margin at the end.
9098  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9099 
9100  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9101  // Another early return check.
9102  if(suballocItem->offset + totalSize > GetSize())
9103  {
9104  return false;
9105  }
9106 
9107  // Advance lastSuballocItem until desired size is reached.
9108  // Update itemsToMakeLostCount.
9109  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9110  if(totalSize > suballocItem->size)
9111  {
9112  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9113  while(remainingSize > 0)
9114  {
9115  ++lastSuballocItem;
9116  if(lastSuballocItem == m_Suballocations.cend())
9117  {
9118  return false;
9119  }
9120  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9121  {
9122  *pSumFreeSize += lastSuballocItem->size;
9123  }
9124  else
9125  {
9126  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9127  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9128  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9129  {
9130  ++*itemsToMakeLostCount;
9131  *pSumItemSize += lastSuballocItem->size;
9132  }
9133  else
9134  {
9135  return false;
9136  }
9137  }
9138  remainingSize = (lastSuballocItem->size < remainingSize) ?
9139  remainingSize - lastSuballocItem->size : 0;
9140  }
9141  }
9142 
9143  // Check next suballocations for BufferImageGranularity conflicts.
9144  // If conflict exists, we must mark more allocations lost or fail.
9145  if(bufferImageGranularity > 1)
9146  {
9147  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9148  ++nextSuballocItem;
9149  while(nextSuballocItem != m_Suballocations.cend())
9150  {
9151  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9152  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9153  {
9154  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9155  {
9156  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9157  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9158  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9159  {
9160  ++*itemsToMakeLostCount;
9161  }
9162  else
9163  {
9164  return false;
9165  }
9166  }
9167  }
9168  else
9169  {
9170  // Already on next page.
9171  break;
9172  }
9173  ++nextSuballocItem;
9174  }
9175  }
9176  }
9177  else
9178  {
9179  const VmaSuballocation& suballoc = *suballocItem;
9180  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9181 
9182  *pSumFreeSize = suballoc.size;
9183 
9184  // Size of this suballocation is too small for this request: Early return.
9185  if(suballoc.size < allocSize)
9186  {
9187  return false;
9188  }
9189 
9190  // Start from offset equal to beginning of this suballocation.
9191  *pOffset = suballoc.offset;
9192 
9193  // Apply VMA_DEBUG_MARGIN at the beginning.
9194  if(VMA_DEBUG_MARGIN > 0)
9195  {
9196  *pOffset += VMA_DEBUG_MARGIN;
9197  }
9198 
9199  // Apply alignment.
9200  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9201 
9202  // Check previous suballocations for BufferImageGranularity conflicts.
9203  // Make bigger alignment if necessary.
9204  if(bufferImageGranularity > 1)
9205  {
9206  bool bufferImageGranularityConflict = false;
9207  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9208  while(prevSuballocItem != m_Suballocations.cbegin())
9209  {
9210  --prevSuballocItem;
9211  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9212  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9213  {
9214  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9215  {
9216  bufferImageGranularityConflict = true;
9217  break;
9218  }
9219  }
9220  else
9221  // Already on previous page.
9222  break;
9223  }
9224  if(bufferImageGranularityConflict)
9225  {
9226  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9227  }
9228  }
9229 
9230  // Calculate padding at the beginning based on current offset.
9231  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9232 
9233  // Calculate required margin at the end.
9234  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9235 
9236  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9237  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9238  {
9239  return false;
9240  }
9241 
9242  // Check next suballocations for BufferImageGranularity conflicts.
9243  // If conflict exists, allocation cannot be made here.
9244  if(bufferImageGranularity > 1)
9245  {
9246  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9247  ++nextSuballocItem;
9248  while(nextSuballocItem != m_Suballocations.cend())
9249  {
9250  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9251  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9252  {
9253  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9254  {
9255  return false;
9256  }
9257  }
9258  else
9259  {
9260  // Already on next page.
9261  break;
9262  }
9263  ++nextSuballocItem;
9264  }
9265  }
9266  }
9267 
9268  // All tests passed: Success. pOffset is already filled.
9269  return true;
9270 }
9271 
9272 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9273 {
9274  VMA_ASSERT(item != m_Suballocations.end());
9275  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9276 
9277  VmaSuballocationList::iterator nextItem = item;
9278  ++nextItem;
9279  VMA_ASSERT(nextItem != m_Suballocations.end());
9280  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9281 
9282  item->size += nextItem->size;
9283  --m_FreeCount;
9284  m_Suballocations.erase(nextItem);
9285 }
9286 
9287 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9288 {
9289  // Change this suballocation to be marked as free.
9290  VmaSuballocation& suballoc = *suballocItem;
9291  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9292  suballoc.hAllocation = VK_NULL_HANDLE;
9293 
9294  // Update totals.
9295  ++m_FreeCount;
9296  m_SumFreeSize += suballoc.size;
9297 
9298  // Merge with previous and/or next suballocation if it's also free.
9299  bool mergeWithNext = false;
9300  bool mergeWithPrev = false;
9301 
9302  VmaSuballocationList::iterator nextItem = suballocItem;
9303  ++nextItem;
9304  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9305  {
9306  mergeWithNext = true;
9307  }
9308 
9309  VmaSuballocationList::iterator prevItem = suballocItem;
9310  if(suballocItem != m_Suballocations.begin())
9311  {
9312  --prevItem;
9313  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9314  {
9315  mergeWithPrev = true;
9316  }
9317  }
9318 
9319  if(mergeWithNext)
9320  {
9321  UnregisterFreeSuballocation(nextItem);
9322  MergeFreeWithNext(suballocItem);
9323  }
9324 
9325  if(mergeWithPrev)
9326  {
9327  UnregisterFreeSuballocation(prevItem);
9328  MergeFreeWithNext(prevItem);
9329  RegisterFreeSuballocation(prevItem);
9330  return prevItem;
9331  }
9332  else
9333  {
9334  RegisterFreeSuballocation(suballocItem);
9335  return suballocItem;
9336  }
9337 }
9338 
9339 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9340 {
9341  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9342  VMA_ASSERT(item->size > 0);
9343 
9344  // You may want to enable this validation at the beginning or at the end of
9345  // this function, depending on what do you want to check.
9346  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9347 
9348  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9349  {
9350  if(m_FreeSuballocationsBySize.empty())
9351  {
9352  m_FreeSuballocationsBySize.push_back(item);
9353  }
9354  else
9355  {
9356  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9357  }
9358  }
9359 
9360  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9361 }
9362 
9363 
9364 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9365 {
9366  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9367  VMA_ASSERT(item->size > 0);
9368 
9369  // You may want to enable this validation at the beginning or at the end of
9370  // this function, depending on what do you want to check.
9371  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9372 
9373  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9374  {
9375  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9376  m_FreeSuballocationsBySize.data(),
9377  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9378  item,
9379  VmaSuballocationItemSizeLess());
9380  for(size_t index = it - m_FreeSuballocationsBySize.data();
9381  index < m_FreeSuballocationsBySize.size();
9382  ++index)
9383  {
9384  if(m_FreeSuballocationsBySize[index] == item)
9385  {
9386  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9387  return;
9388  }
9389  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9390  }
9391  VMA_ASSERT(0 && "Not found.");
9392  }
9393 
9394  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9395 }
9396 
9397 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9398  VkDeviceSize bufferImageGranularity,
9399  VmaSuballocationType& inOutPrevSuballocType) const
9400 {
9401  if(bufferImageGranularity == 1 || IsEmpty())
9402  {
9403  return false;
9404  }
9405 
9406  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9407  bool typeConflictFound = false;
9408  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9409  it != m_Suballocations.cend();
9410  ++it)
9411  {
9412  const VmaSuballocationType suballocType = it->type;
9413  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9414  {
9415  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9416  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9417  {
9418  typeConflictFound = true;
9419  }
9420  inOutPrevSuballocType = suballocType;
9421  }
9422  }
9423 
9424  return typeConflictFound || minAlignment >= bufferImageGranularity;
9425 }
9426 
9428 // class VmaBlockMetadata_Linear
9429 
9430 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9431  VmaBlockMetadata(hAllocator),
9432  m_SumFreeSize(0),
9433  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9434  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9435  m_1stVectorIndex(0),
9436  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9437  m_1stNullItemsBeginCount(0),
9438  m_1stNullItemsMiddleCount(0),
9439  m_2ndNullItemsCount(0)
9440 {
9441 }
9442 
9443 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9444 {
9445 }
9446 
9447 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9448 {
9449  VmaBlockMetadata::Init(size);
9450  m_SumFreeSize = size;
9451 }
9452 
9453 bool VmaBlockMetadata_Linear::Validate() const
9454 {
9455  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9456  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9457 
9458  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9459  VMA_VALIDATE(!suballocations1st.empty() ||
9460  suballocations2nd.empty() ||
9461  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9462 
9463  if(!suballocations1st.empty())
9464  {
9465  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9466  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9467  // Null item at the end should be just pop_back().
9468  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9469  }
9470  if(!suballocations2nd.empty())
9471  {
9472  // Null item at the end should be just pop_back().
9473  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9474  }
9475 
9476  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9477  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9478 
9479  VkDeviceSize sumUsedSize = 0;
9480  const size_t suballoc1stCount = suballocations1st.size();
9481  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9482 
9483  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9484  {
9485  const size_t suballoc2ndCount = suballocations2nd.size();
9486  size_t nullItem2ndCount = 0;
9487  for(size_t i = 0; i < suballoc2ndCount; ++i)
9488  {
9489  const VmaSuballocation& suballoc = suballocations2nd[i];
9490  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9491 
9492  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9493  VMA_VALIDATE(suballoc.offset >= offset);
9494 
9495  if(!currFree)
9496  {
9497  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9498  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9499  sumUsedSize += suballoc.size;
9500  }
9501  else
9502  {
9503  ++nullItem2ndCount;
9504  }
9505 
9506  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9507  }
9508 
9509  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9510  }
9511 
9512  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9513  {
9514  const VmaSuballocation& suballoc = suballocations1st[i];
9515  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9516  suballoc.hAllocation == VK_NULL_HANDLE);
9517  }
9518 
9519  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9520 
9521  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9522  {
9523  const VmaSuballocation& suballoc = suballocations1st[i];
9524  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9525 
9526  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9527  VMA_VALIDATE(suballoc.offset >= offset);
9528  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9529 
9530  if(!currFree)
9531  {
9532  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9533  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9534  sumUsedSize += suballoc.size;
9535  }
9536  else
9537  {
9538  ++nullItem1stCount;
9539  }
9540 
9541  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9542  }
9543  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9544 
9545  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9546  {
9547  const size_t suballoc2ndCount = suballocations2nd.size();
9548  size_t nullItem2ndCount = 0;
9549  for(size_t i = suballoc2ndCount; i--; )
9550  {
9551  const VmaSuballocation& suballoc = suballocations2nd[i];
9552  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9553 
9554  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9555  VMA_VALIDATE(suballoc.offset >= offset);
9556 
9557  if(!currFree)
9558  {
9559  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9560  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9561  sumUsedSize += suballoc.size;
9562  }
9563  else
9564  {
9565  ++nullItem2ndCount;
9566  }
9567 
9568  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9569  }
9570 
9571  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9572  }
9573 
9574  VMA_VALIDATE(offset <= GetSize());
9575  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9576 
9577  return true;
9578 }
9579 
9580 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9581 {
9582  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9583  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9584 }
9585 
9586 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9587 {
9588  const VkDeviceSize size = GetSize();
9589 
9590  /*
9591  We don't consider gaps inside allocation vectors with freed allocations because
9592  they are not suitable for reuse in linear allocator. We consider only space that
9593  is available for new allocations.
9594  */
9595  if(IsEmpty())
9596  {
9597  return size;
9598  }
9599 
9600  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9601 
9602  switch(m_2ndVectorMode)
9603  {
9604  case SECOND_VECTOR_EMPTY:
9605  /*
9606  Available space is after end of 1st, as well as before beginning of 1st (which
9607  whould make it a ring buffer).
9608  */
9609  {
9610  const size_t suballocations1stCount = suballocations1st.size();
9611  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9612  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9613  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9614  return VMA_MAX(
9615  firstSuballoc.offset,
9616  size - (lastSuballoc.offset + lastSuballoc.size));
9617  }
9618  break;
9619 
9620  case SECOND_VECTOR_RING_BUFFER:
9621  /*
9622  Available space is only between end of 2nd and beginning of 1st.
9623  */
9624  {
9625  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9626  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9627  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9628  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9629  }
9630  break;
9631 
9632  case SECOND_VECTOR_DOUBLE_STACK:
9633  /*
9634  Available space is only between end of 1st and top of 2nd.
9635  */
9636  {
9637  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9638  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9639  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9640  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9641  }
9642  break;
9643 
9644  default:
9645  VMA_ASSERT(0);
9646  return 0;
9647  }
9648 }
9649 
9650 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9651 {
9652  const VkDeviceSize size = GetSize();
9653  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9654  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9655  const size_t suballoc1stCount = suballocations1st.size();
9656  const size_t suballoc2ndCount = suballocations2nd.size();
9657 
9658  outInfo.blockCount = 1;
9659  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9660  outInfo.unusedRangeCount = 0;
9661  outInfo.usedBytes = 0;
9662  outInfo.allocationSizeMin = UINT64_MAX;
9663  outInfo.allocationSizeMax = 0;
9664  outInfo.unusedRangeSizeMin = UINT64_MAX;
9665  outInfo.unusedRangeSizeMax = 0;
9666 
9667  VkDeviceSize lastOffset = 0;
9668 
9669  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9670  {
9671  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9672  size_t nextAlloc2ndIndex = 0;
9673  while(lastOffset < freeSpace2ndTo1stEnd)
9674  {
9675  // Find next non-null allocation or move nextAllocIndex to the end.
9676  while(nextAlloc2ndIndex < suballoc2ndCount &&
9677  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9678  {
9679  ++nextAlloc2ndIndex;
9680  }
9681 
9682  // Found non-null allocation.
9683  if(nextAlloc2ndIndex < suballoc2ndCount)
9684  {
9685  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9686 
9687  // 1. Process free space before this allocation.
9688  if(lastOffset < suballoc.offset)
9689  {
9690  // There is free space from lastOffset to suballoc.offset.
9691  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9692  ++outInfo.unusedRangeCount;
9693  outInfo.unusedBytes += unusedRangeSize;
9694  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9695  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9696  }
9697 
9698  // 2. Process this allocation.
9699  // There is allocation with suballoc.offset, suballoc.size.
9700  outInfo.usedBytes += suballoc.size;
9701  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9702  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9703 
9704  // 3. Prepare for next iteration.
9705  lastOffset = suballoc.offset + suballoc.size;
9706  ++nextAlloc2ndIndex;
9707  }
9708  // We are at the end.
9709  else
9710  {
9711  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9712  if(lastOffset < freeSpace2ndTo1stEnd)
9713  {
9714  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9715  ++outInfo.unusedRangeCount;
9716  outInfo.unusedBytes += unusedRangeSize;
9717  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9718  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9719  }
9720 
9721  // End of loop.
9722  lastOffset = freeSpace2ndTo1stEnd;
9723  }
9724  }
9725  }
9726 
9727  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9728  const VkDeviceSize freeSpace1stTo2ndEnd =
9729  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9730  while(lastOffset < freeSpace1stTo2ndEnd)
9731  {
9732  // Find next non-null allocation or move nextAllocIndex to the end.
9733  while(nextAlloc1stIndex < suballoc1stCount &&
9734  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9735  {
9736  ++nextAlloc1stIndex;
9737  }
9738 
9739  // Found non-null allocation.
9740  if(nextAlloc1stIndex < suballoc1stCount)
9741  {
9742  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9743 
9744  // 1. Process free space before this allocation.
9745  if(lastOffset < suballoc.offset)
9746  {
9747  // There is free space from lastOffset to suballoc.offset.
9748  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9749  ++outInfo.unusedRangeCount;
9750  outInfo.unusedBytes += unusedRangeSize;
9751  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9752  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9753  }
9754 
9755  // 2. Process this allocation.
9756  // There is allocation with suballoc.offset, suballoc.size.
9757  outInfo.usedBytes += suballoc.size;
9758  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9759  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9760 
9761  // 3. Prepare for next iteration.
9762  lastOffset = suballoc.offset + suballoc.size;
9763  ++nextAlloc1stIndex;
9764  }
9765  // We are at the end.
9766  else
9767  {
9768  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9769  if(lastOffset < freeSpace1stTo2ndEnd)
9770  {
9771  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9772  ++outInfo.unusedRangeCount;
9773  outInfo.unusedBytes += unusedRangeSize;
9774  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9775  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9776  }
9777 
9778  // End of loop.
9779  lastOffset = freeSpace1stTo2ndEnd;
9780  }
9781  }
9782 
9783  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9784  {
9785  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9786  while(lastOffset < size)
9787  {
9788  // Find next non-null allocation or move nextAllocIndex to the end.
9789  while(nextAlloc2ndIndex != SIZE_MAX &&
9790  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9791  {
9792  --nextAlloc2ndIndex;
9793  }
9794 
9795  // Found non-null allocation.
9796  if(nextAlloc2ndIndex != SIZE_MAX)
9797  {
9798  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9799 
9800  // 1. Process free space before this allocation.
9801  if(lastOffset < suballoc.offset)
9802  {
9803  // There is free space from lastOffset to suballoc.offset.
9804  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9805  ++outInfo.unusedRangeCount;
9806  outInfo.unusedBytes += unusedRangeSize;
9807  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9808  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9809  }
9810 
9811  // 2. Process this allocation.
9812  // There is allocation with suballoc.offset, suballoc.size.
9813  outInfo.usedBytes += suballoc.size;
9814  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9815  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9816 
9817  // 3. Prepare for next iteration.
9818  lastOffset = suballoc.offset + suballoc.size;
9819  --nextAlloc2ndIndex;
9820  }
9821  // We are at the end.
9822  else
9823  {
9824  // There is free space from lastOffset to size.
9825  if(lastOffset < size)
9826  {
9827  const VkDeviceSize unusedRangeSize = size - lastOffset;
9828  ++outInfo.unusedRangeCount;
9829  outInfo.unusedBytes += unusedRangeSize;
9830  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9831  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9832  }
9833 
9834  // End of loop.
9835  lastOffset = size;
9836  }
9837  }
9838  }
9839 
9840  outInfo.unusedBytes = size - outInfo.usedBytes;
9841 }
9842 
9843 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9844 {
9845  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9846  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9847  const VkDeviceSize size = GetSize();
9848  const size_t suballoc1stCount = suballocations1st.size();
9849  const size_t suballoc2ndCount = suballocations2nd.size();
9850 
9851  inoutStats.size += size;
9852 
9853  VkDeviceSize lastOffset = 0;
9854 
9855  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9856  {
9857  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9858  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9859  while(lastOffset < freeSpace2ndTo1stEnd)
9860  {
9861  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9862  while(nextAlloc2ndIndex < suballoc2ndCount &&
9863  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9864  {
9865  ++nextAlloc2ndIndex;
9866  }
9867 
9868  // Found non-null allocation.
9869  if(nextAlloc2ndIndex < suballoc2ndCount)
9870  {
9871  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9872 
9873  // 1. Process free space before this allocation.
9874  if(lastOffset < suballoc.offset)
9875  {
9876  // There is free space from lastOffset to suballoc.offset.
9877  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9878  inoutStats.unusedSize += unusedRangeSize;
9879  ++inoutStats.unusedRangeCount;
9880  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9881  }
9882 
9883  // 2. Process this allocation.
9884  // There is allocation with suballoc.offset, suballoc.size.
9885  ++inoutStats.allocationCount;
9886 
9887  // 3. Prepare for next iteration.
9888  lastOffset = suballoc.offset + suballoc.size;
9889  ++nextAlloc2ndIndex;
9890  }
9891  // We are at the end.
9892  else
9893  {
9894  if(lastOffset < freeSpace2ndTo1stEnd)
9895  {
9896  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9897  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9898  inoutStats.unusedSize += unusedRangeSize;
9899  ++inoutStats.unusedRangeCount;
9900  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9901  }
9902 
9903  // End of loop.
9904  lastOffset = freeSpace2ndTo1stEnd;
9905  }
9906  }
9907  }
9908 
9909  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9910  const VkDeviceSize freeSpace1stTo2ndEnd =
9911  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9912  while(lastOffset < freeSpace1stTo2ndEnd)
9913  {
9914  // Find next non-null allocation or move nextAllocIndex to the end.
9915  while(nextAlloc1stIndex < suballoc1stCount &&
9916  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9917  {
9918  ++nextAlloc1stIndex;
9919  }
9920 
9921  // Found non-null allocation.
9922  if(nextAlloc1stIndex < suballoc1stCount)
9923  {
9924  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9925 
9926  // 1. Process free space before this allocation.
9927  if(lastOffset < suballoc.offset)
9928  {
9929  // There is free space from lastOffset to suballoc.offset.
9930  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9931  inoutStats.unusedSize += unusedRangeSize;
9932  ++inoutStats.unusedRangeCount;
9933  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9934  }
9935 
9936  // 2. Process this allocation.
9937  // There is allocation with suballoc.offset, suballoc.size.
9938  ++inoutStats.allocationCount;
9939 
9940  // 3. Prepare for next iteration.
9941  lastOffset = suballoc.offset + suballoc.size;
9942  ++nextAlloc1stIndex;
9943  }
9944  // We are at the end.
9945  else
9946  {
9947  if(lastOffset < freeSpace1stTo2ndEnd)
9948  {
9949  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9950  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9951  inoutStats.unusedSize += unusedRangeSize;
9952  ++inoutStats.unusedRangeCount;
9953  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9954  }
9955 
9956  // End of loop.
9957  lastOffset = freeSpace1stTo2ndEnd;
9958  }
9959  }
9960 
9961  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9962  {
9963  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9964  while(lastOffset < size)
9965  {
9966  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9967  while(nextAlloc2ndIndex != SIZE_MAX &&
9968  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9969  {
9970  --nextAlloc2ndIndex;
9971  }
9972 
9973  // Found non-null allocation.
9974  if(nextAlloc2ndIndex != SIZE_MAX)
9975  {
9976  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9977 
9978  // 1. Process free space before this allocation.
9979  if(lastOffset < suballoc.offset)
9980  {
9981  // There is free space from lastOffset to suballoc.offset.
9982  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9983  inoutStats.unusedSize += unusedRangeSize;
9984  ++inoutStats.unusedRangeCount;
9985  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9986  }
9987 
9988  // 2. Process this allocation.
9989  // There is allocation with suballoc.offset, suballoc.size.
9990  ++inoutStats.allocationCount;
9991 
9992  // 3. Prepare for next iteration.
9993  lastOffset = suballoc.offset + suballoc.size;
9994  --nextAlloc2ndIndex;
9995  }
9996  // We are at the end.
9997  else
9998  {
9999  if(lastOffset < size)
10000  {
10001  // There is free space from lastOffset to size.
10002  const VkDeviceSize unusedRangeSize = size - lastOffset;
10003  inoutStats.unusedSize += unusedRangeSize;
10004  ++inoutStats.unusedRangeCount;
10005  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10006  }
10007 
10008  // End of loop.
10009  lastOffset = size;
10010  }
10011  }
10012  }
10013 }
10014 
10015 #if VMA_STATS_STRING_ENABLED
10016 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10017 {
10018  const VkDeviceSize size = GetSize();
10019  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10020  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10021  const size_t suballoc1stCount = suballocations1st.size();
10022  const size_t suballoc2ndCount = suballocations2nd.size();
10023 
10024  // FIRST PASS
10025 
10026  size_t unusedRangeCount = 0;
10027  VkDeviceSize usedBytes = 0;
10028 
10029  VkDeviceSize lastOffset = 0;
10030 
10031  size_t alloc2ndCount = 0;
10032  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10033  {
10034  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10035  size_t nextAlloc2ndIndex = 0;
10036  while(lastOffset < freeSpace2ndTo1stEnd)
10037  {
10038  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10039  while(nextAlloc2ndIndex < suballoc2ndCount &&
10040  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10041  {
10042  ++nextAlloc2ndIndex;
10043  }
10044 
10045  // Found non-null allocation.
10046  if(nextAlloc2ndIndex < suballoc2ndCount)
10047  {
10048  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10049 
10050  // 1. Process free space before this allocation.
10051  if(lastOffset < suballoc.offset)
10052  {
10053  // There is free space from lastOffset to suballoc.offset.
10054  ++unusedRangeCount;
10055  }
10056 
10057  // 2. Process this allocation.
10058  // There is allocation with suballoc.offset, suballoc.size.
10059  ++alloc2ndCount;
10060  usedBytes += suballoc.size;
10061 
10062  // 3. Prepare for next iteration.
10063  lastOffset = suballoc.offset + suballoc.size;
10064  ++nextAlloc2ndIndex;
10065  }
10066  // We are at the end.
10067  else
10068  {
10069  if(lastOffset < freeSpace2ndTo1stEnd)
10070  {
10071  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10072  ++unusedRangeCount;
10073  }
10074 
10075  // End of loop.
10076  lastOffset = freeSpace2ndTo1stEnd;
10077  }
10078  }
10079  }
10080 
10081  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10082  size_t alloc1stCount = 0;
10083  const VkDeviceSize freeSpace1stTo2ndEnd =
10084  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10085  while(lastOffset < freeSpace1stTo2ndEnd)
10086  {
10087  // Find next non-null allocation or move nextAllocIndex to the end.
10088  while(nextAlloc1stIndex < suballoc1stCount &&
10089  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10090  {
10091  ++nextAlloc1stIndex;
10092  }
10093 
10094  // Found non-null allocation.
10095  if(nextAlloc1stIndex < suballoc1stCount)
10096  {
10097  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10098 
10099  // 1. Process free space before this allocation.
10100  if(lastOffset < suballoc.offset)
10101  {
10102  // There is free space from lastOffset to suballoc.offset.
10103  ++unusedRangeCount;
10104  }
10105 
10106  // 2. Process this allocation.
10107  // There is allocation with suballoc.offset, suballoc.size.
10108  ++alloc1stCount;
10109  usedBytes += suballoc.size;
10110 
10111  // 3. Prepare for next iteration.
10112  lastOffset = suballoc.offset + suballoc.size;
10113  ++nextAlloc1stIndex;
10114  }
10115  // We are at the end.
10116  else
10117  {
10118  if(lastOffset < size)
10119  {
10120  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10121  ++unusedRangeCount;
10122  }
10123 
10124  // End of loop.
10125  lastOffset = freeSpace1stTo2ndEnd;
10126  }
10127  }
10128 
10129  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10130  {
10131  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10132  while(lastOffset < size)
10133  {
10134  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10135  while(nextAlloc2ndIndex != SIZE_MAX &&
10136  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10137  {
10138  --nextAlloc2ndIndex;
10139  }
10140 
10141  // Found non-null allocation.
10142  if(nextAlloc2ndIndex != SIZE_MAX)
10143  {
10144  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10145 
10146  // 1. Process free space before this allocation.
10147  if(lastOffset < suballoc.offset)
10148  {
10149  // There is free space from lastOffset to suballoc.offset.
10150  ++unusedRangeCount;
10151  }
10152 
10153  // 2. Process this allocation.
10154  // There is allocation with suballoc.offset, suballoc.size.
10155  ++alloc2ndCount;
10156  usedBytes += suballoc.size;
10157 
10158  // 3. Prepare for next iteration.
10159  lastOffset = suballoc.offset + suballoc.size;
10160  --nextAlloc2ndIndex;
10161  }
10162  // We are at the end.
10163  else
10164  {
10165  if(lastOffset < size)
10166  {
10167  // There is free space from lastOffset to size.
10168  ++unusedRangeCount;
10169  }
10170 
10171  // End of loop.
10172  lastOffset = size;
10173  }
10174  }
10175  }
10176 
10177  const VkDeviceSize unusedBytes = size - usedBytes;
10178  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10179 
10180  // SECOND PASS
10181  lastOffset = 0;
10182 
10183  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10184  {
10185  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10186  size_t nextAlloc2ndIndex = 0;
10187  while(lastOffset < freeSpace2ndTo1stEnd)
10188  {
10189  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10190  while(nextAlloc2ndIndex < suballoc2ndCount &&
10191  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10192  {
10193  ++nextAlloc2ndIndex;
10194  }
10195 
10196  // Found non-null allocation.
10197  if(nextAlloc2ndIndex < suballoc2ndCount)
10198  {
10199  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10200 
10201  // 1. Process free space before this allocation.
10202  if(lastOffset < suballoc.offset)
10203  {
10204  // There is free space from lastOffset to suballoc.offset.
10205  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10206  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10207  }
10208 
10209  // 2. Process this allocation.
10210  // There is allocation with suballoc.offset, suballoc.size.
10211  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10212 
10213  // 3. Prepare for next iteration.
10214  lastOffset = suballoc.offset + suballoc.size;
10215  ++nextAlloc2ndIndex;
10216  }
10217  // We are at the end.
10218  else
10219  {
10220  if(lastOffset < freeSpace2ndTo1stEnd)
10221  {
10222  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10223  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10224  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10225  }
10226 
10227  // End of loop.
10228  lastOffset = freeSpace2ndTo1stEnd;
10229  }
10230  }
10231  }
10232 
10233  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10234  while(lastOffset < freeSpace1stTo2ndEnd)
10235  {
10236  // Find next non-null allocation or move nextAllocIndex to the end.
10237  while(nextAlloc1stIndex < suballoc1stCount &&
10238  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10239  {
10240  ++nextAlloc1stIndex;
10241  }
10242 
10243  // Found non-null allocation.
10244  if(nextAlloc1stIndex < suballoc1stCount)
10245  {
10246  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10247 
10248  // 1. Process free space before this allocation.
10249  if(lastOffset < suballoc.offset)
10250  {
10251  // There is free space from lastOffset to suballoc.offset.
10252  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10253  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10254  }
10255 
10256  // 2. Process this allocation.
10257  // There is allocation with suballoc.offset, suballoc.size.
10258  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10259 
10260  // 3. Prepare for next iteration.
10261  lastOffset = suballoc.offset + suballoc.size;
10262  ++nextAlloc1stIndex;
10263  }
10264  // We are at the end.
10265  else
10266  {
10267  if(lastOffset < freeSpace1stTo2ndEnd)
10268  {
10269  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10270  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10271  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10272  }
10273 
10274  // End of loop.
10275  lastOffset = freeSpace1stTo2ndEnd;
10276  }
10277  }
10278 
10279  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10280  {
10281  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10282  while(lastOffset < size)
10283  {
10284  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10285  while(nextAlloc2ndIndex != SIZE_MAX &&
10286  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10287  {
10288  --nextAlloc2ndIndex;
10289  }
10290 
10291  // Found non-null allocation.
10292  if(nextAlloc2ndIndex != SIZE_MAX)
10293  {
10294  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10295 
10296  // 1. Process free space before this allocation.
10297  if(lastOffset < suballoc.offset)
10298  {
10299  // There is free space from lastOffset to suballoc.offset.
10300  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10301  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10302  }
10303 
10304  // 2. Process this allocation.
10305  // There is allocation with suballoc.offset, suballoc.size.
10306  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10307 
10308  // 3. Prepare for next iteration.
10309  lastOffset = suballoc.offset + suballoc.size;
10310  --nextAlloc2ndIndex;
10311  }
10312  // We are at the end.
10313  else
10314  {
10315  if(lastOffset < size)
10316  {
10317  // There is free space from lastOffset to size.
10318  const VkDeviceSize unusedRangeSize = size - lastOffset;
10319  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10320  }
10321 
10322  // End of loop.
10323  lastOffset = size;
10324  }
10325  }
10326  }
10327 
10328  PrintDetailedMap_End(json);
10329 }
10330 #endif // #if VMA_STATS_STRING_ENABLED
10331 
10332 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10333  uint32_t currentFrameIndex,
10334  uint32_t frameInUseCount,
10335  VkDeviceSize bufferImageGranularity,
10336  VkDeviceSize allocSize,
10337  VkDeviceSize allocAlignment,
10338  bool upperAddress,
10339  VmaSuballocationType allocType,
10340  bool canMakeOtherLost,
10341  uint32_t strategy,
10342  VmaAllocationRequest* pAllocationRequest)
10343 {
10344  VMA_ASSERT(allocSize > 0);
10345  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10346  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10347  VMA_HEAVY_ASSERT(Validate());
10348  return upperAddress ?
10349  CreateAllocationRequest_UpperAddress(
10350  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10351  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10352  CreateAllocationRequest_LowerAddress(
10353  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10354  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10355 }
10356 
10357 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10358  uint32_t currentFrameIndex,
10359  uint32_t frameInUseCount,
10360  VkDeviceSize bufferImageGranularity,
10361  VkDeviceSize allocSize,
10362  VkDeviceSize allocAlignment,
10363  VmaSuballocationType allocType,
10364  bool canMakeOtherLost,
10365  uint32_t strategy,
10366  VmaAllocationRequest* pAllocationRequest)
10367 {
10368  const VkDeviceSize size = GetSize();
10369  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10370  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10371 
10372  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10373  {
10374  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10375  return false;
10376  }
10377 
10378  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10379  if(allocSize > size)
10380  {
10381  return false;
10382  }
10383  VkDeviceSize resultBaseOffset = size - allocSize;
10384  if(!suballocations2nd.empty())
10385  {
10386  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10387  resultBaseOffset = lastSuballoc.offset - allocSize;
10388  if(allocSize > lastSuballoc.offset)
10389  {
10390  return false;
10391  }
10392  }
10393 
10394  // Start from offset equal to end of free space.
10395  VkDeviceSize resultOffset = resultBaseOffset;
10396 
10397  // Apply VMA_DEBUG_MARGIN at the end.
10398  if(VMA_DEBUG_MARGIN > 0)
10399  {
10400  if(resultOffset < VMA_DEBUG_MARGIN)
10401  {
10402  return false;
10403  }
10404  resultOffset -= VMA_DEBUG_MARGIN;
10405  }
10406 
10407  // Apply alignment.
10408  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10409 
10410  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10411  // Make bigger alignment if necessary.
10412  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10413  {
10414  bool bufferImageGranularityConflict = false;
10415  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10416  {
10417  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10418  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10419  {
10420  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10421  {
10422  bufferImageGranularityConflict = true;
10423  break;
10424  }
10425  }
10426  else
10427  // Already on previous page.
10428  break;
10429  }
10430  if(bufferImageGranularityConflict)
10431  {
10432  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10433  }
10434  }
10435 
10436  // There is enough free space.
10437  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10438  suballocations1st.back().offset + suballocations1st.back().size :
10439  0;
10440  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10441  {
10442  // Check previous suballocations for BufferImageGranularity conflicts.
10443  // If conflict exists, allocation cannot be made here.
10444  if(bufferImageGranularity > 1)
10445  {
10446  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10447  {
10448  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10449  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10450  {
10451  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10452  {
10453  return false;
10454  }
10455  }
10456  else
10457  {
10458  // Already on next page.
10459  break;
10460  }
10461  }
10462  }
10463 
10464  // All tests passed: Success.
10465  pAllocationRequest->offset = resultOffset;
10466  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10467  pAllocationRequest->sumItemSize = 0;
10468  // pAllocationRequest->item unused.
10469  pAllocationRequest->itemsToMakeLostCount = 0;
10470  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10471  return true;
10472  }
10473 
10474  return false;
10475 }
10476 
10477 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10478  uint32_t currentFrameIndex,
10479  uint32_t frameInUseCount,
10480  VkDeviceSize bufferImageGranularity,
10481  VkDeviceSize allocSize,
10482  VkDeviceSize allocAlignment,
10483  VmaSuballocationType allocType,
10484  bool canMakeOtherLost,
10485  uint32_t strategy,
10486  VmaAllocationRequest* pAllocationRequest)
10487 {
10488  const VkDeviceSize size = GetSize();
10489  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10490  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10491 
10492  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10493  {
10494  // Try to allocate at the end of 1st vector.
10495 
10496  VkDeviceSize resultBaseOffset = 0;
10497  if(!suballocations1st.empty())
10498  {
10499  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10500  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10501  }
10502 
10503  // Start from offset equal to beginning of free space.
10504  VkDeviceSize resultOffset = resultBaseOffset;
10505 
10506  // Apply VMA_DEBUG_MARGIN at the beginning.
10507  if(VMA_DEBUG_MARGIN > 0)
10508  {
10509  resultOffset += VMA_DEBUG_MARGIN;
10510  }
10511 
10512  // Apply alignment.
10513  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10514 
10515  // Check previous suballocations for BufferImageGranularity conflicts.
10516  // Make bigger alignment if necessary.
10517  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10518  {
10519  bool bufferImageGranularityConflict = false;
10520  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10521  {
10522  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10523  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10524  {
10525  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10526  {
10527  bufferImageGranularityConflict = true;
10528  break;
10529  }
10530  }
10531  else
10532  // Already on previous page.
10533  break;
10534  }
10535  if(bufferImageGranularityConflict)
10536  {
10537  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10538  }
10539  }
10540 
10541  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10542  suballocations2nd.back().offset : size;
10543 
10544  // There is enough free space at the end after alignment.
10545  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10546  {
10547  // Check next suballocations for BufferImageGranularity conflicts.
10548  // If conflict exists, allocation cannot be made here.
10549  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10550  {
10551  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10552  {
10553  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10554  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10555  {
10556  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10557  {
10558  return false;
10559  }
10560  }
10561  else
10562  {
10563  // Already on previous page.
10564  break;
10565  }
10566  }
10567  }
10568 
10569  // All tests passed: Success.
10570  pAllocationRequest->offset = resultOffset;
10571  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10572  pAllocationRequest->sumItemSize = 0;
10573  // pAllocationRequest->item, customData unused.
10574  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10575  pAllocationRequest->itemsToMakeLostCount = 0;
10576  return true;
10577  }
10578  }
10579 
10580  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10581  // beginning of 1st vector as the end of free space.
10582  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10583  {
10584  VMA_ASSERT(!suballocations1st.empty());
10585 
10586  VkDeviceSize resultBaseOffset = 0;
10587  if(!suballocations2nd.empty())
10588  {
10589  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10590  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10591  }
10592 
10593  // Start from offset equal to beginning of free space.
10594  VkDeviceSize resultOffset = resultBaseOffset;
10595 
10596  // Apply VMA_DEBUG_MARGIN at the beginning.
10597  if(VMA_DEBUG_MARGIN > 0)
10598  {
10599  resultOffset += VMA_DEBUG_MARGIN;
10600  }
10601 
10602  // Apply alignment.
10603  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10604 
10605  // Check previous suballocations for BufferImageGranularity conflicts.
10606  // Make bigger alignment if necessary.
10607  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10608  {
10609  bool bufferImageGranularityConflict = false;
10610  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10611  {
10612  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10613  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10614  {
10615  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10616  {
10617  bufferImageGranularityConflict = true;
10618  break;
10619  }
10620  }
10621  else
10622  // Already on previous page.
10623  break;
10624  }
10625  if(bufferImageGranularityConflict)
10626  {
10627  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10628  }
10629  }
10630 
10631  pAllocationRequest->itemsToMakeLostCount = 0;
10632  pAllocationRequest->sumItemSize = 0;
10633  size_t index1st = m_1stNullItemsBeginCount;
10634 
10635  if(canMakeOtherLost)
10636  {
10637  while(index1st < suballocations1st.size() &&
10638  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10639  {
10640  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10641  const VmaSuballocation& suballoc = suballocations1st[index1st];
10642  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10643  {
10644  // No problem.
10645  }
10646  else
10647  {
10648  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10649  if(suballoc.hAllocation->CanBecomeLost() &&
10650  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10651  {
10652  ++pAllocationRequest->itemsToMakeLostCount;
10653  pAllocationRequest->sumItemSize += suballoc.size;
10654  }
10655  else
10656  {
10657  return false;
10658  }
10659  }
10660  ++index1st;
10661  }
10662 
10663  // Check next suballocations for BufferImageGranularity conflicts.
10664  // If conflict exists, we must mark more allocations lost or fail.
10665  if(bufferImageGranularity > 1)
10666  {
10667  while(index1st < suballocations1st.size())
10668  {
10669  const VmaSuballocation& suballoc = suballocations1st[index1st];
10670  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10671  {
10672  if(suballoc.hAllocation != VK_NULL_HANDLE)
10673  {
10674  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10675  if(suballoc.hAllocation->CanBecomeLost() &&
10676  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10677  {
10678  ++pAllocationRequest->itemsToMakeLostCount;
10679  pAllocationRequest->sumItemSize += suballoc.size;
10680  }
10681  else
10682  {
10683  return false;
10684  }
10685  }
10686  }
10687  else
10688  {
10689  // Already on next page.
10690  break;
10691  }
10692  ++index1st;
10693  }
10694  }
10695 
10696  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10697  if(index1st == suballocations1st.size() &&
10698  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10699  {
10700  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10701  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10702  }
10703  }
10704 
10705  // There is enough free space at the end after alignment.
10706  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10707  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10708  {
10709  // Check next suballocations for BufferImageGranularity conflicts.
10710  // If conflict exists, allocation cannot be made here.
10711  if(bufferImageGranularity > 1)
10712  {
10713  for(size_t nextSuballocIndex = index1st;
10714  nextSuballocIndex < suballocations1st.size();
10715  nextSuballocIndex++)
10716  {
10717  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10718  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10719  {
10720  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10721  {
10722  return false;
10723  }
10724  }
10725  else
10726  {
10727  // Already on next page.
10728  break;
10729  }
10730  }
10731  }
10732 
10733  // All tests passed: Success.
10734  pAllocationRequest->offset = resultOffset;
10735  pAllocationRequest->sumFreeSize =
10736  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10737  - resultBaseOffset
10738  - pAllocationRequest->sumItemSize;
10739  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10740  // pAllocationRequest->item, customData unused.
10741  return true;
10742  }
10743  }
10744 
10745  return false;
10746 }
10747 
10748 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10749  uint32_t currentFrameIndex,
10750  uint32_t frameInUseCount,
10751  VmaAllocationRequest* pAllocationRequest)
10752 {
10753  if(pAllocationRequest->itemsToMakeLostCount == 0)
10754  {
10755  return true;
10756  }
10757 
10758  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10759 
10760  // We always start from 1st.
10761  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10762  size_t index = m_1stNullItemsBeginCount;
10763  size_t madeLostCount = 0;
10764  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10765  {
10766  if(index == suballocations->size())
10767  {
10768  index = 0;
10769  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10770  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10771  {
10772  suballocations = &AccessSuballocations2nd();
10773  }
10774  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10775  // suballocations continues pointing at AccessSuballocations1st().
10776  VMA_ASSERT(!suballocations->empty());
10777  }
10778  VmaSuballocation& suballoc = (*suballocations)[index];
10779  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10780  {
10781  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10782  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10783  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10784  {
10785  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10786  suballoc.hAllocation = VK_NULL_HANDLE;
10787  m_SumFreeSize += suballoc.size;
10788  if(suballocations == &AccessSuballocations1st())
10789  {
10790  ++m_1stNullItemsMiddleCount;
10791  }
10792  else
10793  {
10794  ++m_2ndNullItemsCount;
10795  }
10796  ++madeLostCount;
10797  }
10798  else
10799  {
10800  return false;
10801  }
10802  }
10803  ++index;
10804  }
10805 
10806  CleanupAfterFree();
10807  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10808 
10809  return true;
10810 }
10811 
10812 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10813 {
10814  uint32_t lostAllocationCount = 0;
10815 
10816  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10817  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10818  {
10819  VmaSuballocation& suballoc = suballocations1st[i];
10820  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10821  suballoc.hAllocation->CanBecomeLost() &&
10822  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10823  {
10824  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10825  suballoc.hAllocation = VK_NULL_HANDLE;
10826  ++m_1stNullItemsMiddleCount;
10827  m_SumFreeSize += suballoc.size;
10828  ++lostAllocationCount;
10829  }
10830  }
10831 
10832  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10833  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10834  {
10835  VmaSuballocation& suballoc = suballocations2nd[i];
10836  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10837  suballoc.hAllocation->CanBecomeLost() &&
10838  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10839  {
10840  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10841  suballoc.hAllocation = VK_NULL_HANDLE;
10842  ++m_2ndNullItemsCount;
10843  m_SumFreeSize += suballoc.size;
10844  ++lostAllocationCount;
10845  }
10846  }
10847 
10848  if(lostAllocationCount)
10849  {
10850  CleanupAfterFree();
10851  }
10852 
10853  return lostAllocationCount;
10854 }
10855 
10856 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10857 {
10858  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10859  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10860  {
10861  const VmaSuballocation& suballoc = suballocations1st[i];
10862  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10863  {
10864  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10865  {
10866  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10867  return VK_ERROR_VALIDATION_FAILED_EXT;
10868  }
10869  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10870  {
10871  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10872  return VK_ERROR_VALIDATION_FAILED_EXT;
10873  }
10874  }
10875  }
10876 
10877  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10878  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10879  {
10880  const VmaSuballocation& suballoc = suballocations2nd[i];
10881  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10882  {
10883  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10884  {
10885  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10886  return VK_ERROR_VALIDATION_FAILED_EXT;
10887  }
10888  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10889  {
10890  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10891  return VK_ERROR_VALIDATION_FAILED_EXT;
10892  }
10893  }
10894  }
10895 
10896  return VK_SUCCESS;
10897 }
10898 
10899 void VmaBlockMetadata_Linear::Alloc(
10900  const VmaAllocationRequest& request,
10901  VmaSuballocationType type,
10902  VkDeviceSize allocSize,
10903  VmaAllocation hAllocation)
10904 {
10905  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10906 
10907  switch(request.type)
10908  {
10909  case VmaAllocationRequestType::UpperAddress:
10910  {
10911  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10912  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10913  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10914  suballocations2nd.push_back(newSuballoc);
10915  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10916  }
10917  break;
10918  case VmaAllocationRequestType::EndOf1st:
10919  {
10920  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10921 
10922  VMA_ASSERT(suballocations1st.empty() ||
10923  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10924  // Check if it fits before the end of the block.
10925  VMA_ASSERT(request.offset + allocSize <= GetSize());
10926 
10927  suballocations1st.push_back(newSuballoc);
10928  }
10929  break;
10930  case VmaAllocationRequestType::EndOf2nd:
10931  {
10932  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10933  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10934  VMA_ASSERT(!suballocations1st.empty() &&
10935  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10936  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10937 
10938  switch(m_2ndVectorMode)
10939  {
10940  case SECOND_VECTOR_EMPTY:
10941  // First allocation from second part ring buffer.
10942  VMA_ASSERT(suballocations2nd.empty());
10943  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10944  break;
10945  case SECOND_VECTOR_RING_BUFFER:
10946  // 2-part ring buffer is already started.
10947  VMA_ASSERT(!suballocations2nd.empty());
10948  break;
10949  case SECOND_VECTOR_DOUBLE_STACK:
10950  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10951  break;
10952  default:
10953  VMA_ASSERT(0);
10954  }
10955 
10956  suballocations2nd.push_back(newSuballoc);
10957  }
10958  break;
10959  default:
10960  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10961  }
10962 
10963  m_SumFreeSize -= newSuballoc.size;
10964 }
10965 
10966 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10967 {
10968  FreeAtOffset(allocation->GetOffset());
10969 }
10970 
10971 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10972 {
10973  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10974  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10975 
10976  if(!suballocations1st.empty())
10977  {
10978  // First allocation: Mark it as next empty at the beginning.
10979  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10980  if(firstSuballoc.offset == offset)
10981  {
10982  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10983  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10984  m_SumFreeSize += firstSuballoc.size;
10985  ++m_1stNullItemsBeginCount;
10986  CleanupAfterFree();
10987  return;
10988  }
10989  }
10990 
10991  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10992  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10993  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10994  {
10995  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10996  if(lastSuballoc.offset == offset)
10997  {
10998  m_SumFreeSize += lastSuballoc.size;
10999  suballocations2nd.pop_back();
11000  CleanupAfterFree();
11001  return;
11002  }
11003  }
11004  // Last allocation in 1st vector.
11005  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11006  {
11007  VmaSuballocation& lastSuballoc = suballocations1st.back();
11008  if(lastSuballoc.offset == offset)
11009  {
11010  m_SumFreeSize += lastSuballoc.size;
11011  suballocations1st.pop_back();
11012  CleanupAfterFree();
11013  return;
11014  }
11015  }
11016 
11017  // Item from the middle of 1st vector.
11018  {
11019  VmaSuballocation refSuballoc;
11020  refSuballoc.offset = offset;
11021  // Rest of members stays uninitialized intentionally for better performance.
11022  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11023  suballocations1st.begin() + m_1stNullItemsBeginCount,
11024  suballocations1st.end(),
11025  refSuballoc,
11026  VmaSuballocationOffsetLess());
11027  if(it != suballocations1st.end())
11028  {
11029  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11030  it->hAllocation = VK_NULL_HANDLE;
11031  ++m_1stNullItemsMiddleCount;
11032  m_SumFreeSize += it->size;
11033  CleanupAfterFree();
11034  return;
11035  }
11036  }
11037 
11038  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11039  {
11040  // Item from the middle of 2nd vector.
11041  VmaSuballocation refSuballoc;
11042  refSuballoc.offset = offset;
11043  // Rest of members stays uninitialized intentionally for better performance.
11044  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11045  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11046  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11047  if(it != suballocations2nd.end())
11048  {
11049  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11050  it->hAllocation = VK_NULL_HANDLE;
11051  ++m_2ndNullItemsCount;
11052  m_SumFreeSize += it->size;
11053  CleanupAfterFree();
11054  return;
11055  }
11056  }
11057 
11058  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11059 }
11060 
11061 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11062 {
11063  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11064  const size_t suballocCount = AccessSuballocations1st().size();
11065  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11066 }
11067 
11068 void VmaBlockMetadata_Linear::CleanupAfterFree()
11069 {
11070  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11071  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11072 
11073  if(IsEmpty())
11074  {
11075  suballocations1st.clear();
11076  suballocations2nd.clear();
11077  m_1stNullItemsBeginCount = 0;
11078  m_1stNullItemsMiddleCount = 0;
11079  m_2ndNullItemsCount = 0;
11080  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11081  }
11082  else
11083  {
11084  const size_t suballoc1stCount = suballocations1st.size();
11085  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11086  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11087 
11088  // Find more null items at the beginning of 1st vector.
11089  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11090  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11091  {
11092  ++m_1stNullItemsBeginCount;
11093  --m_1stNullItemsMiddleCount;
11094  }
11095 
11096  // Find more null items at the end of 1st vector.
11097  while(m_1stNullItemsMiddleCount > 0 &&
11098  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11099  {
11100  --m_1stNullItemsMiddleCount;
11101  suballocations1st.pop_back();
11102  }
11103 
11104  // Find more null items at the end of 2nd vector.
11105  while(m_2ndNullItemsCount > 0 &&
11106  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11107  {
11108  --m_2ndNullItemsCount;
11109  suballocations2nd.pop_back();
11110  }
11111 
11112  // Find more null items at the beginning of 2nd vector.
11113  while(m_2ndNullItemsCount > 0 &&
11114  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11115  {
11116  --m_2ndNullItemsCount;
11117  VmaVectorRemove(suballocations2nd, 0);
11118  }
11119 
11120  if(ShouldCompact1st())
11121  {
11122  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11123  size_t srcIndex = m_1stNullItemsBeginCount;
11124  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11125  {
11126  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11127  {
11128  ++srcIndex;
11129  }
11130  if(dstIndex != srcIndex)
11131  {
11132  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11133  }
11134  ++srcIndex;
11135  }
11136  suballocations1st.resize(nonNullItemCount);
11137  m_1stNullItemsBeginCount = 0;
11138  m_1stNullItemsMiddleCount = 0;
11139  }
11140 
11141  // 2nd vector became empty.
11142  if(suballocations2nd.empty())
11143  {
11144  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11145  }
11146 
11147  // 1st vector became empty.
11148  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11149  {
11150  suballocations1st.clear();
11151  m_1stNullItemsBeginCount = 0;
11152 
11153  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11154  {
11155  // Swap 1st with 2nd. Now 2nd is empty.
11156  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11157  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11158  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11159  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11160  {
11161  ++m_1stNullItemsBeginCount;
11162  --m_1stNullItemsMiddleCount;
11163  }
11164  m_2ndNullItemsCount = 0;
11165  m_1stVectorIndex ^= 1;
11166  }
11167  }
11168  }
11169 
11170  VMA_HEAVY_ASSERT(Validate());
11171 }
11172 
11173 
11175 // class VmaBlockMetadata_Buddy
11176 
11177 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11178  VmaBlockMetadata(hAllocator),
11179  m_Root(VMA_NULL),
11180  m_AllocationCount(0),
11181  m_FreeCount(1),
11182  m_SumFreeSize(0)
11183 {
11184  memset(m_FreeList, 0, sizeof(m_FreeList));
11185 }
11186 
11187 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11188 {
11189  DeleteNode(m_Root);
11190 }
11191 
11192 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11193 {
11194  VmaBlockMetadata::Init(size);
11195 
11196  m_UsableSize = VmaPrevPow2(size);
11197  m_SumFreeSize = m_UsableSize;
11198 
11199  // Calculate m_LevelCount.
11200  m_LevelCount = 1;
11201  while(m_LevelCount < MAX_LEVELS &&
11202  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11203  {
11204  ++m_LevelCount;
11205  }
11206 
11207  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11208  rootNode->offset = 0;
11209  rootNode->type = Node::TYPE_FREE;
11210  rootNode->parent = VMA_NULL;
11211  rootNode->buddy = VMA_NULL;
11212 
11213  m_Root = rootNode;
11214  AddToFreeListFront(0, rootNode);
11215 }
11216 
11217 bool VmaBlockMetadata_Buddy::Validate() const
11218 {
11219  // Validate tree.
11220  ValidationContext ctx;
11221  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11222  {
11223  VMA_VALIDATE(false && "ValidateNode failed.");
11224  }
11225  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11226  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11227 
11228  // Validate free node lists.
11229  for(uint32_t level = 0; level < m_LevelCount; ++level)
11230  {
11231  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11232  m_FreeList[level].front->free.prev == VMA_NULL);
11233 
11234  for(Node* node = m_FreeList[level].front;
11235  node != VMA_NULL;
11236  node = node->free.next)
11237  {
11238  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11239 
11240  if(node->free.next == VMA_NULL)
11241  {
11242  VMA_VALIDATE(m_FreeList[level].back == node);
11243  }
11244  else
11245  {
11246  VMA_VALIDATE(node->free.next->free.prev == node);
11247  }
11248  }
11249  }
11250 
11251  // Validate that free lists ar higher levels are empty.
11252  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11253  {
11254  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11255  }
11256 
11257  return true;
11258 }
11259 
11260 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11261 {
11262  for(uint32_t level = 0; level < m_LevelCount; ++level)
11263  {
11264  if(m_FreeList[level].front != VMA_NULL)
11265  {
11266  return LevelToNodeSize(level);
11267  }
11268  }
11269  return 0;
11270 }
11271 
11272 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11273 {
11274  const VkDeviceSize unusableSize = GetUnusableSize();
11275 
11276  outInfo.blockCount = 1;
11277 
11278  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11279  outInfo.usedBytes = outInfo.unusedBytes = 0;
11280 
11281  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11282  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11283  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11284 
11285  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11286 
11287  if(unusableSize > 0)
11288  {
11289  ++outInfo.unusedRangeCount;
11290  outInfo.unusedBytes += unusableSize;
11291  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11292  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11293  }
11294 }
11295 
11296 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11297 {
11298  const VkDeviceSize unusableSize = GetUnusableSize();
11299 
11300  inoutStats.size += GetSize();
11301  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11302  inoutStats.allocationCount += m_AllocationCount;
11303  inoutStats.unusedRangeCount += m_FreeCount;
11304  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11305 
11306  if(unusableSize > 0)
11307  {
11308  ++inoutStats.unusedRangeCount;
11309  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11310  }
11311 }
11312 
11313 #if VMA_STATS_STRING_ENABLED
11314 
11315 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11316 {
11317  // TODO optimize
11318  VmaStatInfo stat;
11319  CalcAllocationStatInfo(stat);
11320 
11321  PrintDetailedMap_Begin(
11322  json,
11323  stat.unusedBytes,
11324  stat.allocationCount,
11325  stat.unusedRangeCount);
11326 
11327  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11328 
11329  const VkDeviceSize unusableSize = GetUnusableSize();
11330  if(unusableSize > 0)
11331  {
11332  PrintDetailedMap_UnusedRange(json,
11333  m_UsableSize, // offset
11334  unusableSize); // size
11335  }
11336 
11337  PrintDetailedMap_End(json);
11338 }
11339 
11340 #endif // #if VMA_STATS_STRING_ENABLED
11341 
11342 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11343  uint32_t currentFrameIndex,
11344  uint32_t frameInUseCount,
11345  VkDeviceSize bufferImageGranularity,
11346  VkDeviceSize allocSize,
11347  VkDeviceSize allocAlignment,
11348  bool upperAddress,
11349  VmaSuballocationType allocType,
11350  bool canMakeOtherLost,
11351  uint32_t strategy,
11352  VmaAllocationRequest* pAllocationRequest)
11353 {
11354  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11355 
11356  // Simple way to respect bufferImageGranularity. May be optimized some day.
11357  // Whenever it might be an OPTIMAL image...
11358  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11359  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11360  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11361  {
11362  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11363  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11364  }
11365 
11366  if(allocSize > m_UsableSize)
11367  {
11368  return false;
11369  }
11370 
11371  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11372  for(uint32_t level = targetLevel + 1; level--; )
11373  {
11374  for(Node* freeNode = m_FreeList[level].front;
11375  freeNode != VMA_NULL;
11376  freeNode = freeNode->free.next)
11377  {
11378  if(freeNode->offset % allocAlignment == 0)
11379  {
11380  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11381  pAllocationRequest->offset = freeNode->offset;
11382  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11383  pAllocationRequest->sumItemSize = 0;
11384  pAllocationRequest->itemsToMakeLostCount = 0;
11385  pAllocationRequest->customData = (void*)(uintptr_t)level;
11386  return true;
11387  }
11388  }
11389  }
11390 
11391  return false;
11392 }
11393 
11394 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11395  uint32_t currentFrameIndex,
11396  uint32_t frameInUseCount,
11397  VmaAllocationRequest* pAllocationRequest)
11398 {
11399  /*
11400  Lost allocations are not supported in buddy allocator at the moment.
11401  Support might be added in the future.
11402  */
11403  return pAllocationRequest->itemsToMakeLostCount == 0;
11404 }
11405 
11406 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11407 {
11408  /*
11409  Lost allocations are not supported in buddy allocator at the moment.
11410  Support might be added in the future.
11411  */
11412  return 0;
11413 }
11414 
11415 void VmaBlockMetadata_Buddy::Alloc(
11416  const VmaAllocationRequest& request,
11417  VmaSuballocationType type,
11418  VkDeviceSize allocSize,
11419  VmaAllocation hAllocation)
11420 {
11421  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11422 
11423  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11424  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11425 
11426  Node* currNode = m_FreeList[currLevel].front;
11427  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11428  while(currNode->offset != request.offset)
11429  {
11430  currNode = currNode->free.next;
11431  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11432  }
11433 
11434  // Go down, splitting free nodes.
11435  while(currLevel < targetLevel)
11436  {
11437  // currNode is already first free node at currLevel.
11438  // Remove it from list of free nodes at this currLevel.
11439  RemoveFromFreeList(currLevel, currNode);
11440 
11441  const uint32_t childrenLevel = currLevel + 1;
11442 
11443  // Create two free sub-nodes.
11444  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11445  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11446 
11447  leftChild->offset = currNode->offset;
11448  leftChild->type = Node::TYPE_FREE;
11449  leftChild->parent = currNode;
11450  leftChild->buddy = rightChild;
11451 
11452  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11453  rightChild->type = Node::TYPE_FREE;
11454  rightChild->parent = currNode;
11455  rightChild->buddy = leftChild;
11456 
11457  // Convert current currNode to split type.
11458  currNode->type = Node::TYPE_SPLIT;
11459  currNode->split.leftChild = leftChild;
11460 
11461  // Add child nodes to free list. Order is important!
11462  AddToFreeListFront(childrenLevel, rightChild);
11463  AddToFreeListFront(childrenLevel, leftChild);
11464 
11465  ++m_FreeCount;
11466  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11467  ++currLevel;
11468  currNode = m_FreeList[currLevel].front;
11469 
11470  /*
11471  We can be sure that currNode, as left child of node previously split,
11472  also fullfills the alignment requirement.
11473  */
11474  }
11475 
11476  // Remove from free list.
11477  VMA_ASSERT(currLevel == targetLevel &&
11478  currNode != VMA_NULL &&
11479  currNode->type == Node::TYPE_FREE);
11480  RemoveFromFreeList(currLevel, currNode);
11481 
11482  // Convert to allocation node.
11483  currNode->type = Node::TYPE_ALLOCATION;
11484  currNode->allocation.alloc = hAllocation;
11485 
11486  ++m_AllocationCount;
11487  --m_FreeCount;
11488  m_SumFreeSize -= allocSize;
11489 }
11490 
11491 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11492 {
11493  if(node->type == Node::TYPE_SPLIT)
11494  {
11495  DeleteNode(node->split.leftChild->buddy);
11496  DeleteNode(node->split.leftChild);
11497  }
11498 
11499  vma_delete(GetAllocationCallbacks(), node);
11500 }
11501 
11502 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11503 {
11504  VMA_VALIDATE(level < m_LevelCount);
11505  VMA_VALIDATE(curr->parent == parent);
11506  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11507  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11508  switch(curr->type)
11509  {
11510  case Node::TYPE_FREE:
11511  // curr->free.prev, next are validated separately.
11512  ctx.calculatedSumFreeSize += levelNodeSize;
11513  ++ctx.calculatedFreeCount;
11514  break;
11515  case Node::TYPE_ALLOCATION:
11516  ++ctx.calculatedAllocationCount;
11517  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11518  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11519  break;
11520  case Node::TYPE_SPLIT:
11521  {
11522  const uint32_t childrenLevel = level + 1;
11523  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11524  const Node* const leftChild = curr->split.leftChild;
11525  VMA_VALIDATE(leftChild != VMA_NULL);
11526  VMA_VALIDATE(leftChild->offset == curr->offset);
11527  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11528  {
11529  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11530  }
11531  const Node* const rightChild = leftChild->buddy;
11532  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11533  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11534  {
11535  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11536  }
11537  }
11538  break;
11539  default:
11540  return false;
11541  }
11542 
11543  return true;
11544 }
11545 
11546 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11547 {
11548  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11549  uint32_t level = 0;
11550  VkDeviceSize currLevelNodeSize = m_UsableSize;
11551  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11552  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11553  {
11554  ++level;
11555  currLevelNodeSize = nextLevelNodeSize;
11556  nextLevelNodeSize = currLevelNodeSize >> 1;
11557  }
11558  return level;
11559 }
11560 
11561 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11562 {
11563  // Find node and level.
11564  Node* node = m_Root;
11565  VkDeviceSize nodeOffset = 0;
11566  uint32_t level = 0;
11567  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11568  while(node->type == Node::TYPE_SPLIT)
11569  {
11570  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11571  if(offset < nodeOffset + nextLevelSize)
11572  {
11573  node = node->split.leftChild;
11574  }
11575  else
11576  {
11577  node = node->split.leftChild->buddy;
11578  nodeOffset += nextLevelSize;
11579  }
11580  ++level;
11581  levelNodeSize = nextLevelSize;
11582  }
11583 
11584  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11585  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11586 
11587  ++m_FreeCount;
11588  --m_AllocationCount;
11589  m_SumFreeSize += alloc->GetSize();
11590 
11591  node->type = Node::TYPE_FREE;
11592 
11593  // Join free nodes if possible.
11594  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11595  {
11596  RemoveFromFreeList(level, node->buddy);
11597  Node* const parent = node->parent;
11598 
11599  vma_delete(GetAllocationCallbacks(), node->buddy);
11600  vma_delete(GetAllocationCallbacks(), node);
11601  parent->type = Node::TYPE_FREE;
11602 
11603  node = parent;
11604  --level;
11605  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11606  --m_FreeCount;
11607  }
11608 
11609  AddToFreeListFront(level, node);
11610 }
11611 
11612 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11613 {
11614  switch(node->type)
11615  {
11616  case Node::TYPE_FREE:
11617  ++outInfo.unusedRangeCount;
11618  outInfo.unusedBytes += levelNodeSize;
11619  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11620  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11621  break;
11622  case Node::TYPE_ALLOCATION:
11623  {
11624  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11625  ++outInfo.allocationCount;
11626  outInfo.usedBytes += allocSize;
11627  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11628  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11629 
11630  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11631  if(unusedRangeSize > 0)
11632  {
11633  ++outInfo.unusedRangeCount;
11634  outInfo.unusedBytes += unusedRangeSize;
11635  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11636  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11637  }
11638  }
11639  break;
11640  case Node::TYPE_SPLIT:
11641  {
11642  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11643  const Node* const leftChild = node->split.leftChild;
11644  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11645  const Node* const rightChild = leftChild->buddy;
11646  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11647  }
11648  break;
11649  default:
11650  VMA_ASSERT(0);
11651  }
11652 }
11653 
11654 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11655 {
11656  VMA_ASSERT(node->type == Node::TYPE_FREE);
11657 
11658  // List is empty.
11659  Node* const frontNode = m_FreeList[level].front;
11660  if(frontNode == VMA_NULL)
11661  {
11662  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11663  node->free.prev = node->free.next = VMA_NULL;
11664  m_FreeList[level].front = m_FreeList[level].back = node;
11665  }
11666  else
11667  {
11668  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11669  node->free.prev = VMA_NULL;
11670  node->free.next = frontNode;
11671  frontNode->free.prev = node;
11672  m_FreeList[level].front = node;
11673  }
11674 }
11675 
11676 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11677 {
11678  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11679 
11680  // It is at the front.
11681  if(node->free.prev == VMA_NULL)
11682  {
11683  VMA_ASSERT(m_FreeList[level].front == node);
11684  m_FreeList[level].front = node->free.next;
11685  }
11686  else
11687  {
11688  Node* const prevFreeNode = node->free.prev;
11689  VMA_ASSERT(prevFreeNode->free.next == node);
11690  prevFreeNode->free.next = node->free.next;
11691  }
11692 
11693  // It is at the back.
11694  if(node->free.next == VMA_NULL)
11695  {
11696  VMA_ASSERT(m_FreeList[level].back == node);
11697  m_FreeList[level].back = node->free.prev;
11698  }
11699  else
11700  {
11701  Node* const nextFreeNode = node->free.next;
11702  VMA_ASSERT(nextFreeNode->free.prev == node);
11703  nextFreeNode->free.prev = node->free.prev;
11704  }
11705 }
11706 
11707 #if VMA_STATS_STRING_ENABLED
11708 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11709 {
11710  switch(node->type)
11711  {
11712  case Node::TYPE_FREE:
11713  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11714  break;
11715  case Node::TYPE_ALLOCATION:
11716  {
11717  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11718  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11719  if(allocSize < levelNodeSize)
11720  {
11721  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11722  }
11723  }
11724  break;
11725  case Node::TYPE_SPLIT:
11726  {
11727  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11728  const Node* const leftChild = node->split.leftChild;
11729  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11730  const Node* const rightChild = leftChild->buddy;
11731  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11732  }
11733  break;
11734  default:
11735  VMA_ASSERT(0);
11736  }
11737 }
11738 #endif // #if VMA_STATS_STRING_ENABLED
11739 
11740 
11742 // class VmaDeviceMemoryBlock
11743 
11744 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11745  m_pMetadata(VMA_NULL),
11746  m_MemoryTypeIndex(UINT32_MAX),
11747  m_Id(0),
11748  m_hMemory(VK_NULL_HANDLE),
11749  m_MapCount(0),
11750  m_pMappedData(VMA_NULL)
11751 {
11752 }
11753 
11754 void VmaDeviceMemoryBlock::Init(
11755  VmaAllocator hAllocator,
11756  VmaPool hParentPool,
11757  uint32_t newMemoryTypeIndex,
11758  VkDeviceMemory newMemory,
11759  VkDeviceSize newSize,
11760  uint32_t id,
11761  uint32_t algorithm)
11762 {
11763  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11764 
11765  m_hParentPool = hParentPool;
11766  m_MemoryTypeIndex = newMemoryTypeIndex;
11767  m_Id = id;
11768  m_hMemory = newMemory;
11769 
11770  switch(algorithm)
11771  {
11773  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11774  break;
11776  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11777  break;
11778  default:
11779  VMA_ASSERT(0);
11780  // Fall-through.
11781  case 0:
11782  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11783  }
11784  m_pMetadata->Init(newSize);
11785 }
11786 
11787 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11788 {
11789  // This is the most important assert in the entire library.
11790  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11791  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11792 
11793  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11794  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11795  m_hMemory = VK_NULL_HANDLE;
11796 
11797  vma_delete(allocator, m_pMetadata);
11798  m_pMetadata = VMA_NULL;
11799 }
11800 
11801 bool VmaDeviceMemoryBlock::Validate() const
11802 {
11803  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11804  (m_pMetadata->GetSize() != 0));
11805 
11806  return m_pMetadata->Validate();
11807 }
11808 
11809 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11810 {
11811  void* pData = nullptr;
11812  VkResult res = Map(hAllocator, 1, &pData);
11813  if(res != VK_SUCCESS)
11814  {
11815  return res;
11816  }
11817 
11818  res = m_pMetadata->CheckCorruption(pData);
11819 
11820  Unmap(hAllocator, 1);
11821 
11822  return res;
11823 }
11824 
11825 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11826 {
11827  if(count == 0)
11828  {
11829  return VK_SUCCESS;
11830  }
11831 
11832  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11833  if(m_MapCount != 0)
11834  {
11835  m_MapCount += count;
11836  VMA_ASSERT(m_pMappedData != VMA_NULL);
11837  if(ppData != VMA_NULL)
11838  {
11839  *ppData = m_pMappedData;
11840  }
11841  return VK_SUCCESS;
11842  }
11843  else
11844  {
11845  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11846  hAllocator->m_hDevice,
11847  m_hMemory,
11848  0, // offset
11849  VK_WHOLE_SIZE,
11850  0, // flags
11851  &m_pMappedData);
11852  if(result == VK_SUCCESS)
11853  {
11854  if(ppData != VMA_NULL)
11855  {
11856  *ppData = m_pMappedData;
11857  }
11858  m_MapCount = count;
11859  }
11860  return result;
11861  }
11862 }
11863 
11864 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11865 {
11866  if(count == 0)
11867  {
11868  return;
11869  }
11870 
11871  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11872  if(m_MapCount >= count)
11873  {
11874  m_MapCount -= count;
11875  if(m_MapCount == 0)
11876  {
11877  m_pMappedData = VMA_NULL;
11878  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11879  }
11880  }
11881  else
11882  {
11883  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11884  }
11885 }
11886 
11887 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11888 {
11889  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11890  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11891 
11892  void* pData;
11893  VkResult res = Map(hAllocator, 1, &pData);
11894  if(res != VK_SUCCESS)
11895  {
11896  return res;
11897  }
11898 
11899  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11900  VmaWriteMagicValue(pData, allocOffset + allocSize);
11901 
11902  Unmap(hAllocator, 1);
11903 
11904  return VK_SUCCESS;
11905 }
11906 
11907 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11908 {
11909  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11910  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11911 
11912  void* pData;
11913  VkResult res = Map(hAllocator, 1, &pData);
11914  if(res != VK_SUCCESS)
11915  {
11916  return res;
11917  }
11918 
11919  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11920  {
11921  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11922  }
11923  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11924  {
11925  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11926  }
11927 
11928  Unmap(hAllocator, 1);
11929 
11930  return VK_SUCCESS;
11931 }
11932 
11933 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11934  const VmaAllocator hAllocator,
11935  const VmaAllocation hAllocation,
11936  VkDeviceSize allocationLocalOffset,
11937  VkBuffer hBuffer,
11938  const void* pNext)
11939 {
11940  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11941  hAllocation->GetBlock() == this);
11942  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11943  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11944  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11945  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11946  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11947  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11948 }
11949 
11950 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11951  const VmaAllocator hAllocator,
11952  const VmaAllocation hAllocation,
11953  VkDeviceSize allocationLocalOffset,
11954  VkImage hImage,
11955  const void* pNext)
11956 {
11957  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11958  hAllocation->GetBlock() == this);
11959  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11960  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11961  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11962  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11963  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11964  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11965 }
11966 
11967 static void InitStatInfo(VmaStatInfo& outInfo)
11968 {
11969  memset(&outInfo, 0, sizeof(outInfo));
11970  outInfo.allocationSizeMin = UINT64_MAX;
11971  outInfo.unusedRangeSizeMin = UINT64_MAX;
11972 }
11973 
11974 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11975 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11976 {
11977  inoutInfo.blockCount += srcInfo.blockCount;
11978  inoutInfo.allocationCount += srcInfo.allocationCount;
11979  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11980  inoutInfo.usedBytes += srcInfo.usedBytes;
11981  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11982  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11983  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11984  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11985  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11986 }
11987 
11988 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11989 {
11990  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11991  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11992  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11993  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11994 }
11995 
11996 VmaPool_T::VmaPool_T(
11997  VmaAllocator hAllocator,
11998  const VmaPoolCreateInfo& createInfo,
11999  VkDeviceSize preferredBlockSize) :
12000  m_BlockVector(
12001  hAllocator,
12002  this, // hParentPool
12003  createInfo.memoryTypeIndex,
12004  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12005  createInfo.minBlockCount,
12006  createInfo.maxBlockCount,
12007  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12008  createInfo.frameInUseCount,
12009  createInfo.blockSize != 0, // explicitBlockSize
12010  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12011  m_Id(0),
12012  m_Name(VMA_NULL)
12013 {
12014 }
12015 
12016 VmaPool_T::~VmaPool_T()
12017 {
12018 }
12019 
12020 void VmaPool_T::SetName(const char* pName)
12021 {
12022  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12023  VmaFreeString(allocs, m_Name);
12024 
12025  if(pName != VMA_NULL)
12026  {
12027  m_Name = VmaCreateStringCopy(allocs, pName);
12028  }
12029  else
12030  {
12031  m_Name = VMA_NULL;
12032  }
12033 }
12034 
12035 #if VMA_STATS_STRING_ENABLED
12036 
12037 #endif // #if VMA_STATS_STRING_ENABLED
12038 
12039 VmaBlockVector::VmaBlockVector(
12040  VmaAllocator hAllocator,
12041  VmaPool hParentPool,
12042  uint32_t memoryTypeIndex,
12043  VkDeviceSize preferredBlockSize,
12044  size_t minBlockCount,
12045  size_t maxBlockCount,
12046  VkDeviceSize bufferImageGranularity,
12047  uint32_t frameInUseCount,
12048  bool explicitBlockSize,
12049  uint32_t algorithm) :
12050  m_hAllocator(hAllocator),
12051  m_hParentPool(hParentPool),
12052  m_MemoryTypeIndex(memoryTypeIndex),
12053  m_PreferredBlockSize(preferredBlockSize),
12054  m_MinBlockCount(minBlockCount),
12055  m_MaxBlockCount(maxBlockCount),
12056  m_BufferImageGranularity(bufferImageGranularity),
12057  m_FrameInUseCount(frameInUseCount),
12058  m_ExplicitBlockSize(explicitBlockSize),
12059  m_Algorithm(algorithm),
12060  m_HasEmptyBlock(false),
12061  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12062  m_NextBlockId(0)
12063 {
12064 }
12065 
12066 VmaBlockVector::~VmaBlockVector()
12067 {
12068  for(size_t i = m_Blocks.size(); i--; )
12069  {
12070  m_Blocks[i]->Destroy(m_hAllocator);
12071  vma_delete(m_hAllocator, m_Blocks[i]);
12072  }
12073 }
12074 
12075 VkResult VmaBlockVector::CreateMinBlocks()
12076 {
12077  for(size_t i = 0; i < m_MinBlockCount; ++i)
12078  {
12079  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12080  if(res != VK_SUCCESS)
12081  {
12082  return res;
12083  }
12084  }
12085  return VK_SUCCESS;
12086 }
12087 
12088 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12089 {
12090  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12091 
12092  const size_t blockCount = m_Blocks.size();
12093 
12094  pStats->size = 0;
12095  pStats->unusedSize = 0;
12096  pStats->allocationCount = 0;
12097  pStats->unusedRangeCount = 0;
12098  pStats->unusedRangeSizeMax = 0;
12099  pStats->blockCount = blockCount;
12100 
12101  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12102  {
12103  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12104  VMA_ASSERT(pBlock);
12105  VMA_HEAVY_ASSERT(pBlock->Validate());
12106  pBlock->m_pMetadata->AddPoolStats(*pStats);
12107  }
12108 }
12109 
12110 bool VmaBlockVector::IsEmpty()
12111 {
12112  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12113  return m_Blocks.empty();
12114 }
12115 
12116 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12117 {
12118  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12119  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12120  (VMA_DEBUG_MARGIN > 0) &&
12121  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12122  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12123 }
12124 
12125 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12126 
12127 VkResult VmaBlockVector::Allocate(
12128  uint32_t currentFrameIndex,
12129  VkDeviceSize size,
12130  VkDeviceSize alignment,
12131  const VmaAllocationCreateInfo& createInfo,
12132  VmaSuballocationType suballocType,
12133  size_t allocationCount,
12134  VmaAllocation* pAllocations)
12135 {
12136  size_t allocIndex;
12137  VkResult res = VK_SUCCESS;
12138 
12139  if(IsCorruptionDetectionEnabled())
12140  {
12141  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12142  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12143  }
12144 
12145  {
12146  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12147  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12148  {
12149  res = AllocatePage(
12150  currentFrameIndex,
12151  size,
12152  alignment,
12153  createInfo,
12154  suballocType,
12155  pAllocations + allocIndex);
12156  if(res != VK_SUCCESS)
12157  {
12158  break;
12159  }
12160  }
12161  }
12162 
12163  if(res != VK_SUCCESS)
12164  {
12165  // Free all already created allocations.
12166  while(allocIndex--)
12167  {
12168  Free(pAllocations[allocIndex]);
12169  }
12170  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12171  }
12172 
12173  return res;
12174 }
12175 
12176 VkResult VmaBlockVector::AllocatePage(
12177  uint32_t currentFrameIndex,
12178  VkDeviceSize size,
12179  VkDeviceSize alignment,
12180  const VmaAllocationCreateInfo& createInfo,
12181  VmaSuballocationType suballocType,
12182  VmaAllocation* pAllocation)
12183 {
12184  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12185  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12186  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12187  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12188 
12189  VkDeviceSize freeMemory;
12190  {
12191  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12192  VmaBudget heapBudget = {};
12193  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12194  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12195  }
12196 
12197  const bool canFallbackToDedicated = !IsCustomPool();
12198  const bool canCreateNewBlock =
12199  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12200  (m_Blocks.size() < m_MaxBlockCount) &&
12201  (freeMemory >= size || !canFallbackToDedicated);
12202  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12203 
12204  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12205  // Which in turn is available only when maxBlockCount = 1.
12206  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12207  {
12208  canMakeOtherLost = false;
12209  }
12210 
12211  // Upper address can only be used with linear allocator and within single memory block.
12212  if(isUpperAddress &&
12213  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12214  {
12215  return VK_ERROR_FEATURE_NOT_PRESENT;
12216  }
12217 
12218  // Validate strategy.
12219  switch(strategy)
12220  {
12221  case 0:
12223  break;
12227  break;
12228  default:
12229  return VK_ERROR_FEATURE_NOT_PRESENT;
12230  }
12231 
12232  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12233  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12234  {
12235  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12236  }
12237 
12238  /*
12239  Under certain condition, this whole section can be skipped for optimization, so
12240  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12241  e.g. for custom pools with linear algorithm.
12242  */
12243  if(!canMakeOtherLost || canCreateNewBlock)
12244  {
12245  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12246  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12248 
12249  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12250  {
12251  // Use only last block.
12252  if(!m_Blocks.empty())
12253  {
12254  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12255  VMA_ASSERT(pCurrBlock);
12256  VkResult res = AllocateFromBlock(
12257  pCurrBlock,
12258  currentFrameIndex,
12259  size,
12260  alignment,
12261  allocFlagsCopy,
12262  createInfo.pUserData,
12263  suballocType,
12264  strategy,
12265  pAllocation);
12266  if(res == VK_SUCCESS)
12267  {
12268  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12269  return VK_SUCCESS;
12270  }
12271  }
12272  }
12273  else
12274  {
12276  {
12277  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12278  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12279  {
12280  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12281  VMA_ASSERT(pCurrBlock);
12282  VkResult res = AllocateFromBlock(
12283  pCurrBlock,
12284  currentFrameIndex,
12285  size,
12286  alignment,
12287  allocFlagsCopy,
12288  createInfo.pUserData,
12289  suballocType,
12290  strategy,
12291  pAllocation);
12292  if(res == VK_SUCCESS)
12293  {
12294  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12295  return VK_SUCCESS;
12296  }
12297  }
12298  }
12299  else // WORST_FIT, FIRST_FIT
12300  {
12301  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12302  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12303  {
12304  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12305  VMA_ASSERT(pCurrBlock);
12306  VkResult res = AllocateFromBlock(
12307  pCurrBlock,
12308  currentFrameIndex,
12309  size,
12310  alignment,
12311  allocFlagsCopy,
12312  createInfo.pUserData,
12313  suballocType,
12314  strategy,
12315  pAllocation);
12316  if(res == VK_SUCCESS)
12317  {
12318  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12319  return VK_SUCCESS;
12320  }
12321  }
12322  }
12323  }
12324 
12325  // 2. Try to create new block.
12326  if(canCreateNewBlock)
12327  {
12328  // Calculate optimal size for new block.
12329  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12330  uint32_t newBlockSizeShift = 0;
12331  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12332 
12333  if(!m_ExplicitBlockSize)
12334  {
12335  // Allocate 1/8, 1/4, 1/2 as first blocks.
12336  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12337  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12338  {
12339  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12340  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12341  {
12342  newBlockSize = smallerNewBlockSize;
12343  ++newBlockSizeShift;
12344  }
12345  else
12346  {
12347  break;
12348  }
12349  }
12350  }
12351 
12352  size_t newBlockIndex = 0;
12353  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12354  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12355  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12356  if(!m_ExplicitBlockSize)
12357  {
12358  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12359  {
12360  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12361  if(smallerNewBlockSize >= size)
12362  {
12363  newBlockSize = smallerNewBlockSize;
12364  ++newBlockSizeShift;
12365  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12366  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12367  }
12368  else
12369  {
12370  break;
12371  }
12372  }
12373  }
12374 
12375  if(res == VK_SUCCESS)
12376  {
12377  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12378  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12379 
12380  res = AllocateFromBlock(
12381  pBlock,
12382  currentFrameIndex,
12383  size,
12384  alignment,
12385  allocFlagsCopy,
12386  createInfo.pUserData,
12387  suballocType,
12388  strategy,
12389  pAllocation);
12390  if(res == VK_SUCCESS)
12391  {
12392  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12393  return VK_SUCCESS;
12394  }
12395  else
12396  {
12397  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12398  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12399  }
12400  }
12401  }
12402  }
12403 
12404  // 3. Try to allocate from existing blocks with making other allocations lost.
12405  if(canMakeOtherLost)
12406  {
12407  uint32_t tryIndex = 0;
12408  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12409  {
12410  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12411  VmaAllocationRequest bestRequest = {};
12412  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12413 
12414  // 1. Search existing allocations.
12416  {
12417  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12418  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12419  {
12420  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12421  VMA_ASSERT(pCurrBlock);
12422  VmaAllocationRequest currRequest = {};
12423  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12424  currentFrameIndex,
12425  m_FrameInUseCount,
12426  m_BufferImageGranularity,
12427  size,
12428  alignment,
12429  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12430  suballocType,
12431  canMakeOtherLost,
12432  strategy,
12433  &currRequest))
12434  {
12435  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12436  if(pBestRequestBlock == VMA_NULL ||
12437  currRequestCost < bestRequestCost)
12438  {
12439  pBestRequestBlock = pCurrBlock;
12440  bestRequest = currRequest;
12441  bestRequestCost = currRequestCost;
12442 
12443  if(bestRequestCost == 0)
12444  {
12445  break;
12446  }
12447  }
12448  }
12449  }
12450  }
12451  else // WORST_FIT, FIRST_FIT
12452  {
12453  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12454  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12455  {
12456  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12457  VMA_ASSERT(pCurrBlock);
12458  VmaAllocationRequest currRequest = {};
12459  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12460  currentFrameIndex,
12461  m_FrameInUseCount,
12462  m_BufferImageGranularity,
12463  size,
12464  alignment,
12465  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12466  suballocType,
12467  canMakeOtherLost,
12468  strategy,
12469  &currRequest))
12470  {
12471  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12472  if(pBestRequestBlock == VMA_NULL ||
12473  currRequestCost < bestRequestCost ||
12475  {
12476  pBestRequestBlock = pCurrBlock;
12477  bestRequest = currRequest;
12478  bestRequestCost = currRequestCost;
12479 
12480  if(bestRequestCost == 0 ||
12482  {
12483  break;
12484  }
12485  }
12486  }
12487  }
12488  }
12489 
12490  if(pBestRequestBlock != VMA_NULL)
12491  {
12492  if(mapped)
12493  {
12494  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12495  if(res != VK_SUCCESS)
12496  {
12497  return res;
12498  }
12499  }
12500 
12501  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12502  currentFrameIndex,
12503  m_FrameInUseCount,
12504  &bestRequest))
12505  {
12506  // Allocate from this pBlock.
12507  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12508  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12509  UpdateHasEmptyBlock();
12510  (*pAllocation)->InitBlockAllocation(
12511  pBestRequestBlock,
12512  bestRequest.offset,
12513  alignment,
12514  size,
12515  m_MemoryTypeIndex,
12516  suballocType,
12517  mapped,
12518  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12519  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12520  VMA_DEBUG_LOG(" Returned from existing block");
12521  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12522  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12523  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12524  {
12525  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12526  }
12527  if(IsCorruptionDetectionEnabled())
12528  {
12529  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12530  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12531  }
12532  return VK_SUCCESS;
12533  }
12534  // else: Some allocations must have been touched while we are here. Next try.
12535  }
12536  else
12537  {
12538  // Could not find place in any of the blocks - break outer loop.
12539  break;
12540  }
12541  }
12542  /* Maximum number of tries exceeded - a very unlike event when many other
12543  threads are simultaneously touching allocations making it impossible to make
12544  lost at the same time as we try to allocate. */
12545  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12546  {
12547  return VK_ERROR_TOO_MANY_OBJECTS;
12548  }
12549  }
12550 
12551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12552 }
12553 
12554 void VmaBlockVector::Free(
12555  const VmaAllocation hAllocation)
12556 {
12557  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12558 
12559  bool budgetExceeded = false;
12560  {
12561  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12562  VmaBudget heapBudget = {};
12563  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12564  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12565  }
12566 
12567  // Scope for lock.
12568  {
12569  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12570 
12571  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12572 
12573  if(IsCorruptionDetectionEnabled())
12574  {
12575  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12576  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12577  }
12578 
12579  if(hAllocation->IsPersistentMap())
12580  {
12581  pBlock->Unmap(m_hAllocator, 1);
12582  }
12583 
12584  pBlock->m_pMetadata->Free(hAllocation);
12585  VMA_HEAVY_ASSERT(pBlock->Validate());
12586 
12587  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12588 
12589  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12590  // pBlock became empty after this deallocation.
12591  if(pBlock->m_pMetadata->IsEmpty())
12592  {
12593  // Already has empty block. We don't want to have two, so delete this one.
12594  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12595  {
12596  pBlockToDelete = pBlock;
12597  Remove(pBlock);
12598  }
12599  // else: We now have an empty block - leave it.
12600  }
12601  // pBlock didn't become empty, but we have another empty block - find and free that one.
12602  // (This is optional, heuristics.)
12603  else if(m_HasEmptyBlock && canDeleteBlock)
12604  {
12605  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12606  if(pLastBlock->m_pMetadata->IsEmpty())
12607  {
12608  pBlockToDelete = pLastBlock;
12609  m_Blocks.pop_back();
12610  }
12611  }
12612 
12613  UpdateHasEmptyBlock();
12614  IncrementallySortBlocks();
12615  }
12616 
12617  // Destruction of a free block. Deferred until this point, outside of mutex
12618  // lock, for performance reason.
12619  if(pBlockToDelete != VMA_NULL)
12620  {
12621  VMA_DEBUG_LOG(" Deleted empty block");
12622  pBlockToDelete->Destroy(m_hAllocator);
12623  vma_delete(m_hAllocator, pBlockToDelete);
12624  }
12625 }
12626 
12627 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12628 {
12629  VkDeviceSize result = 0;
12630  for(size_t i = m_Blocks.size(); i--; )
12631  {
12632  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12633  if(result >= m_PreferredBlockSize)
12634  {
12635  break;
12636  }
12637  }
12638  return result;
12639 }
12640 
12641 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12642 {
12643  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12644  {
12645  if(m_Blocks[blockIndex] == pBlock)
12646  {
12647  VmaVectorRemove(m_Blocks, blockIndex);
12648  return;
12649  }
12650  }
12651  VMA_ASSERT(0);
12652 }
12653 
12654 void VmaBlockVector::IncrementallySortBlocks()
12655 {
12656  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12657  {
12658  // Bubble sort only until first swap.
12659  for(size_t i = 1; i < m_Blocks.size(); ++i)
12660  {
12661  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12662  {
12663  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12664  return;
12665  }
12666  }
12667  }
12668 }
12669 
12670 VkResult VmaBlockVector::AllocateFromBlock(
12671  VmaDeviceMemoryBlock* pBlock,
12672  uint32_t currentFrameIndex,
12673  VkDeviceSize size,
12674  VkDeviceSize alignment,
12675  VmaAllocationCreateFlags allocFlags,
12676  void* pUserData,
12677  VmaSuballocationType suballocType,
12678  uint32_t strategy,
12679  VmaAllocation* pAllocation)
12680 {
12681  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12682  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12683  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12684  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12685 
12686  VmaAllocationRequest currRequest = {};
12687  if(pBlock->m_pMetadata->CreateAllocationRequest(
12688  currentFrameIndex,
12689  m_FrameInUseCount,
12690  m_BufferImageGranularity,
12691  size,
12692  alignment,
12693  isUpperAddress,
12694  suballocType,
12695  false, // canMakeOtherLost
12696  strategy,
12697  &currRequest))
12698  {
12699  // Allocate from pCurrBlock.
12700  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12701 
12702  if(mapped)
12703  {
12704  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12705  if(res != VK_SUCCESS)
12706  {
12707  return res;
12708  }
12709  }
12710 
12711  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12712  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12713  UpdateHasEmptyBlock();
12714  (*pAllocation)->InitBlockAllocation(
12715  pBlock,
12716  currRequest.offset,
12717  alignment,
12718  size,
12719  m_MemoryTypeIndex,
12720  suballocType,
12721  mapped,
12722  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12723  VMA_HEAVY_ASSERT(pBlock->Validate());
12724  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12725  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12726  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12727  {
12728  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12729  }
12730  if(IsCorruptionDetectionEnabled())
12731  {
12732  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12733  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12734  }
12735  return VK_SUCCESS;
12736  }
12737  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12738 }
12739 
12740 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12741 {
12742  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12743  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12744  allocInfo.allocationSize = blockSize;
12745 
12746 #if VMA_BUFFER_DEVICE_ADDRESS
12747  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12748  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12749  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
12750  {
12751  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12752  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12753  }
12754 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
12755 
12756  VkDeviceMemory mem = VK_NULL_HANDLE;
12757  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12758  if(res < 0)
12759  {
12760  return res;
12761  }
12762 
12763  // New VkDeviceMemory successfully created.
12764 
12765  // Create new Allocation for it.
12766  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12767  pBlock->Init(
12768  m_hAllocator,
12769  m_hParentPool,
12770  m_MemoryTypeIndex,
12771  mem,
12772  allocInfo.allocationSize,
12773  m_NextBlockId++,
12774  m_Algorithm);
12775 
12776  m_Blocks.push_back(pBlock);
12777  if(pNewBlockIndex != VMA_NULL)
12778  {
12779  *pNewBlockIndex = m_Blocks.size() - 1;
12780  }
12781 
12782  return VK_SUCCESS;
12783 }
12784 
12785 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12786  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12787  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12788 {
12789  const size_t blockCount = m_Blocks.size();
12790  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12791 
12792  enum BLOCK_FLAG
12793  {
12794  BLOCK_FLAG_USED = 0x00000001,
12795  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12796  };
12797 
12798  struct BlockInfo
12799  {
12800  uint32_t flags;
12801  void* pMappedData;
12802  };
12803  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12804  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12805  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12806 
12807  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12808  const size_t moveCount = moves.size();
12809  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12810  {
12811  const VmaDefragmentationMove& move = moves[moveIndex];
12812  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12813  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12814  }
12815 
12816  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12817 
12818  // Go over all blocks. Get mapped pointer or map if necessary.
12819  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12820  {
12821  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12822  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12823  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12824  {
12825  currBlockInfo.pMappedData = pBlock->GetMappedData();
12826  // It is not originally mapped - map it.
12827  if(currBlockInfo.pMappedData == VMA_NULL)
12828  {
12829  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12830  if(pDefragCtx->res == VK_SUCCESS)
12831  {
12832  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12833  }
12834  }
12835  }
12836  }
12837 
12838  // Go over all moves. Do actual data transfer.
12839  if(pDefragCtx->res == VK_SUCCESS)
12840  {
12841  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12842  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12843 
12844  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12845  {
12846  const VmaDefragmentationMove& move = moves[moveIndex];
12847 
12848  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12849  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12850 
12851  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12852 
12853  // Invalidate source.
12854  if(isNonCoherent)
12855  {
12856  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12857  memRange.memory = pSrcBlock->GetDeviceMemory();
12858  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12859  memRange.size = VMA_MIN(
12860  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12861  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12862  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12863  }
12864 
12865  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12866  memmove(
12867  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12868  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12869  static_cast<size_t>(move.size));
12870 
12871  if(IsCorruptionDetectionEnabled())
12872  {
12873  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12874  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12875  }
12876 
12877  // Flush destination.
12878  if(isNonCoherent)
12879  {
12880  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12881  memRange.memory = pDstBlock->GetDeviceMemory();
12882  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12883  memRange.size = VMA_MIN(
12884  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12885  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12886  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12887  }
12888  }
12889  }
12890 
12891  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12892  // Regardless of pCtx->res == VK_SUCCESS.
12893  for(size_t blockIndex = blockCount; blockIndex--; )
12894  {
12895  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12896  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12897  {
12898  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12899  pBlock->Unmap(m_hAllocator, 1);
12900  }
12901  }
12902 }
12903 
12904 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12905  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12906  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12907  VkCommandBuffer commandBuffer)
12908 {
12909  const size_t blockCount = m_Blocks.size();
12910 
12911  pDefragCtx->blockContexts.resize(blockCount);
12912  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12913 
12914  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12915  const size_t moveCount = moves.size();
12916  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12917  {
12918  const VmaDefragmentationMove& move = moves[moveIndex];
12919 
12920  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
12921  {
12922  // Old school move still require us to map the whole block
12923  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12924  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12925  }
12926  }
12927 
12928  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12929 
12930  // Go over all blocks. Create and bind buffer for whole block if necessary.
12931  {
12932  VkBufferCreateInfo bufCreateInfo;
12933  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12934 
12935  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12936  {
12937  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12938  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12939  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12940  {
12941  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12942  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12943  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12944  if(pDefragCtx->res == VK_SUCCESS)
12945  {
12946  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12947  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12948  }
12949  }
12950  }
12951  }
12952 
12953  // Go over all moves. Post data transfer commands to command buffer.
12954  if(pDefragCtx->res == VK_SUCCESS)
12955  {
12956  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12957  {
12958  const VmaDefragmentationMove& move = moves[moveIndex];
12959 
12960  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12961  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12962 
12963  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12964 
12965  VkBufferCopy region = {
12966  move.srcOffset,
12967  move.dstOffset,
12968  move.size };
12969  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12970  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12971  }
12972  }
12973 
12974  // Save buffers to defrag context for later destruction.
12975  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12976  {
12977  pDefragCtx->res = VK_NOT_READY;
12978  }
12979 }
12980 
12981 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12982 {
12983  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12984  {
12985  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12986  if(pBlock->m_pMetadata->IsEmpty())
12987  {
12988  if(m_Blocks.size() > m_MinBlockCount)
12989  {
12990  if(pDefragmentationStats != VMA_NULL)
12991  {
12992  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12993  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12994  }
12995 
12996  VmaVectorRemove(m_Blocks, blockIndex);
12997  pBlock->Destroy(m_hAllocator);
12998  vma_delete(m_hAllocator, pBlock);
12999  }
13000  else
13001  {
13002  break;
13003  }
13004  }
13005  }
13006  UpdateHasEmptyBlock();
13007 }
13008 
13009 void VmaBlockVector::UpdateHasEmptyBlock()
13010 {
13011  m_HasEmptyBlock = false;
13012  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13013  {
13014  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13015  if(pBlock->m_pMetadata->IsEmpty())
13016  {
13017  m_HasEmptyBlock = true;
13018  break;
13019  }
13020  }
13021 }
13022 
13023 #if VMA_STATS_STRING_ENABLED
13024 
13025 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13026 {
13027  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13028 
13029  json.BeginObject();
13030 
13031  if(IsCustomPool())
13032  {
13033  const char* poolName = m_hParentPool->GetName();
13034  if(poolName != VMA_NULL && poolName[0] != '\0')
13035  {
13036  json.WriteString("Name");
13037  json.WriteString(poolName);
13038  }
13039 
13040  json.WriteString("MemoryTypeIndex");
13041  json.WriteNumber(m_MemoryTypeIndex);
13042 
13043  json.WriteString("BlockSize");
13044  json.WriteNumber(m_PreferredBlockSize);
13045 
13046  json.WriteString("BlockCount");
13047  json.BeginObject(true);
13048  if(m_MinBlockCount > 0)
13049  {
13050  json.WriteString("Min");
13051  json.WriteNumber((uint64_t)m_MinBlockCount);
13052  }
13053  if(m_MaxBlockCount < SIZE_MAX)
13054  {
13055  json.WriteString("Max");
13056  json.WriteNumber((uint64_t)m_MaxBlockCount);
13057  }
13058  json.WriteString("Cur");
13059  json.WriteNumber((uint64_t)m_Blocks.size());
13060  json.EndObject();
13061 
13062  if(m_FrameInUseCount > 0)
13063  {
13064  json.WriteString("FrameInUseCount");
13065  json.WriteNumber(m_FrameInUseCount);
13066  }
13067 
13068  if(m_Algorithm != 0)
13069  {
13070  json.WriteString("Algorithm");
13071  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13072  }
13073  }
13074  else
13075  {
13076  json.WriteString("PreferredBlockSize");
13077  json.WriteNumber(m_PreferredBlockSize);
13078  }
13079 
13080  json.WriteString("Blocks");
13081  json.BeginObject();
13082  for(size_t i = 0; i < m_Blocks.size(); ++i)
13083  {
13084  json.BeginString();
13085  json.ContinueString(m_Blocks[i]->GetId());
13086  json.EndString();
13087 
13088  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13089  }
13090  json.EndObject();
13091 
13092  json.EndObject();
13093 }
13094 
13095 #endif // #if VMA_STATS_STRING_ENABLED
13096 
13097 void VmaBlockVector::Defragment(
13098  class VmaBlockVectorDefragmentationContext* pCtx,
13100  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13101  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13102  VkCommandBuffer commandBuffer)
13103 {
13104  pCtx->res = VK_SUCCESS;
13105 
13106  const VkMemoryPropertyFlags memPropFlags =
13107  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13108  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13109 
13110  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13111  isHostVisible;
13112  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13113  !IsCorruptionDetectionEnabled() &&
13114  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13115 
13116  // There are options to defragment this memory type.
13117  if(canDefragmentOnCpu || canDefragmentOnGpu)
13118  {
13119  bool defragmentOnGpu;
13120  // There is only one option to defragment this memory type.
13121  if(canDefragmentOnGpu != canDefragmentOnCpu)
13122  {
13123  defragmentOnGpu = canDefragmentOnGpu;
13124  }
13125  // Both options are available: Heuristics to choose the best one.
13126  else
13127  {
13128  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13129  m_hAllocator->IsIntegratedGpu();
13130  }
13131 
13132  bool overlappingMoveSupported = !defragmentOnGpu;
13133 
13134  if(m_hAllocator->m_UseMutex)
13135  {
13137  {
13138  if(!m_Mutex.TryLockWrite())
13139  {
13140  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13141  return;
13142  }
13143  }
13144  else
13145  {
13146  m_Mutex.LockWrite();
13147  pCtx->mutexLocked = true;
13148  }
13149  }
13150 
13151  pCtx->Begin(overlappingMoveSupported, flags);
13152 
13153  // Defragment.
13154 
13155  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13156  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13157  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13158 
13159  // Accumulate statistics.
13160  if(pStats != VMA_NULL)
13161  {
13162  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13163  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13164  pStats->bytesMoved += bytesMoved;
13165  pStats->allocationsMoved += allocationsMoved;
13166  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13167  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13168  if(defragmentOnGpu)
13169  {
13170  maxGpuBytesToMove -= bytesMoved;
13171  maxGpuAllocationsToMove -= allocationsMoved;
13172  }
13173  else
13174  {
13175  maxCpuBytesToMove -= bytesMoved;
13176  maxCpuAllocationsToMove -= allocationsMoved;
13177  }
13178  }
13179 
13181  {
13182  if(m_hAllocator->m_UseMutex)
13183  m_Mutex.UnlockWrite();
13184 
13185  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13186  pCtx->res = VK_NOT_READY;
13187 
13188  return;
13189  }
13190 
13191  if(pCtx->res >= VK_SUCCESS)
13192  {
13193  if(defragmentOnGpu)
13194  {
13195  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13196  }
13197  else
13198  {
13199  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13200  }
13201  }
13202  }
13203 }
13204 
13205 void VmaBlockVector::DefragmentationEnd(
13206  class VmaBlockVectorDefragmentationContext* pCtx,
13207  VmaDefragmentationStats* pStats)
13208 {
13209  // Destroy buffers.
13210  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
13211  {
13212  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
13213  if(blockCtx.hBuffer)
13214  {
13215  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
13216  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13217  }
13218  }
13219 
13220  if(pCtx->res >= VK_SUCCESS)
13221  {
13222  FreeEmptyBlocks(pStats);
13223  }
13224 
13225  if(pCtx->mutexLocked)
13226  {
13227  VMA_ASSERT(m_hAllocator->m_UseMutex);
13228  m_Mutex.UnlockWrite();
13229  }
13230 }
13231 
13232 uint32_t VmaBlockVector::ProcessDefragmentations(
13233  class VmaBlockVectorDefragmentationContext *pCtx,
13234  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13235 {
13236  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13237 
13238  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13239 
13240  for(uint32_t i = 0; i < moveCount; ++ i)
13241  {
13242  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13243 
13244  pMove->allocation = move.hAllocation;
13245  pMove->memory = move.pDstBlock->GetDeviceMemory();
13246  pMove->offset = move.dstOffset;
13247 
13248  ++ pMove;
13249  }
13250 
13251  pCtx->defragmentationMovesProcessed += moveCount;
13252 
13253  return moveCount;
13254 }
13255 
13256 void VmaBlockVector::CommitDefragmentations(
13257  class VmaBlockVectorDefragmentationContext *pCtx,
13258  VmaDefragmentationStats* pStats)
13259 {
13260  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13261 
13262  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13263  {
13264  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13265 
13266  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13267  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13268  }
13269 
13270  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13271  FreeEmptyBlocks(pStats);
13272 }
13273 
13274 size_t VmaBlockVector::CalcAllocationCount() const
13275 {
13276  size_t result = 0;
13277  for(size_t i = 0; i < m_Blocks.size(); ++i)
13278  {
13279  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13280  }
13281  return result;
13282 }
13283 
13284 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13285 {
13286  if(m_BufferImageGranularity == 1)
13287  {
13288  return false;
13289  }
13290  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13291  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13292  {
13293  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13294  VMA_ASSERT(m_Algorithm == 0);
13295  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13296  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13297  {
13298  return true;
13299  }
13300  }
13301  return false;
13302 }
13303 
13304 void VmaBlockVector::MakePoolAllocationsLost(
13305  uint32_t currentFrameIndex,
13306  size_t* pLostAllocationCount)
13307 {
13308  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13309  size_t lostAllocationCount = 0;
13310  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13311  {
13312  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13313  VMA_ASSERT(pBlock);
13314  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13315  }
13316  if(pLostAllocationCount != VMA_NULL)
13317  {
13318  *pLostAllocationCount = lostAllocationCount;
13319  }
13320 }
13321 
13322 VkResult VmaBlockVector::CheckCorruption()
13323 {
13324  if(!IsCorruptionDetectionEnabled())
13325  {
13326  return VK_ERROR_FEATURE_NOT_PRESENT;
13327  }
13328 
13329  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13330  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13331  {
13332  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13333  VMA_ASSERT(pBlock);
13334  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13335  if(res != VK_SUCCESS)
13336  {
13337  return res;
13338  }
13339  }
13340  return VK_SUCCESS;
13341 }
13342 
13343 void VmaBlockVector::AddStats(VmaStats* pStats)
13344 {
13345  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13346  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13347 
13348  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13349 
13350  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13351  {
13352  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13353  VMA_ASSERT(pBlock);
13354  VMA_HEAVY_ASSERT(pBlock->Validate());
13355  VmaStatInfo allocationStatInfo;
13356  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13357  VmaAddStatInfo(pStats->total, allocationStatInfo);
13358  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13359  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13360  }
13361 }
13362 
13364 // VmaDefragmentationAlgorithm_Generic members definition
13365 
13366 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13367  VmaAllocator hAllocator,
13368  VmaBlockVector* pBlockVector,
13369  uint32_t currentFrameIndex,
13370  bool overlappingMoveSupported) :
13371  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13372  m_AllocationCount(0),
13373  m_AllAllocations(false),
13374  m_BytesMoved(0),
13375  m_AllocationsMoved(0),
13376  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13377 {
13378  // Create block info for each block.
13379  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13380  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13381  {
13382  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13383  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13384  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13385  m_Blocks.push_back(pBlockInfo);
13386  }
13387 
13388  // Sort them by m_pBlock pointer value.
13389  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13390 }
13391 
13392 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13393 {
13394  for(size_t i = m_Blocks.size(); i--; )
13395  {
13396  vma_delete(m_hAllocator, m_Blocks[i]);
13397  }
13398 }
13399 
13400 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13401 {
13402  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13403  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13404  {
13405  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13406  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13407  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13408  {
13409  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13410  (*it)->m_Allocations.push_back(allocInfo);
13411  }
13412  else
13413  {
13414  VMA_ASSERT(0);
13415  }
13416 
13417  ++m_AllocationCount;
13418  }
13419 }
13420 
13421 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13422  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13423  VkDeviceSize maxBytesToMove,
13424  uint32_t maxAllocationsToMove,
13425  bool freeOldAllocations)
13426 {
13427  if(m_Blocks.empty())
13428  {
13429  return VK_SUCCESS;
13430  }
13431 
13432  // This is a choice based on research.
13433  // Option 1:
13434  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13435  // Option 2:
13436  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13437  // Option 3:
13438  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13439 
13440  size_t srcBlockMinIndex = 0;
13441  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13442  /*
13443  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13444  {
13445  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13446  if(blocksWithNonMovableCount > 0)
13447  {
13448  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13449  }
13450  }
13451  */
13452 
13453  size_t srcBlockIndex = m_Blocks.size() - 1;
13454  size_t srcAllocIndex = SIZE_MAX;
13455  for(;;)
13456  {
13457  // 1. Find next allocation to move.
13458  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13459  // 1.2. Then start from last to first m_Allocations.
13460  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13461  {
13462  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13463  {
13464  // Finished: no more allocations to process.
13465  if(srcBlockIndex == srcBlockMinIndex)
13466  {
13467  return VK_SUCCESS;
13468  }
13469  else
13470  {
13471  --srcBlockIndex;
13472  srcAllocIndex = SIZE_MAX;
13473  }
13474  }
13475  else
13476  {
13477  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13478  }
13479  }
13480 
13481  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13482  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13483 
13484  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13485  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13486  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13487  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13488 
13489  // 2. Try to find new place for this allocation in preceding or current block.
13490  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13491  {
13492  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13493  VmaAllocationRequest dstAllocRequest;
13494  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13495  m_CurrentFrameIndex,
13496  m_pBlockVector->GetFrameInUseCount(),
13497  m_pBlockVector->GetBufferImageGranularity(),
13498  size,
13499  alignment,
13500  false, // upperAddress
13501  suballocType,
13502  false, // canMakeOtherLost
13503  strategy,
13504  &dstAllocRequest) &&
13505  MoveMakesSense(
13506  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13507  {
13508  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13509 
13510  // Reached limit on number of allocations or bytes to move.
13511  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13512  (m_BytesMoved + size > maxBytesToMove))
13513  {
13514  return VK_SUCCESS;
13515  }
13516 
13517  VmaDefragmentationMove move = {};
13518  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13519  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13520  move.srcOffset = srcOffset;
13521  move.dstOffset = dstAllocRequest.offset;
13522  move.size = size;
13523  move.hAllocation = allocInfo.m_hAllocation;
13524  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13525  move.pDstBlock = pDstBlockInfo->m_pBlock;
13526 
13527  moves.push_back(move);
13528 
13529  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13530  dstAllocRequest,
13531  suballocType,
13532  size,
13533  allocInfo.m_hAllocation);
13534 
13535  if(freeOldAllocations)
13536  {
13537  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13538  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13539  }
13540 
13541  if(allocInfo.m_pChanged != VMA_NULL)
13542  {
13543  *allocInfo.m_pChanged = VK_TRUE;
13544  }
13545 
13546  ++m_AllocationsMoved;
13547  m_BytesMoved += size;
13548 
13549  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13550 
13551  break;
13552  }
13553  }
13554 
13555  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13556 
13557  if(srcAllocIndex > 0)
13558  {
13559  --srcAllocIndex;
13560  }
13561  else
13562  {
13563  if(srcBlockIndex > 0)
13564  {
13565  --srcBlockIndex;
13566  srcAllocIndex = SIZE_MAX;
13567  }
13568  else
13569  {
13570  return VK_SUCCESS;
13571  }
13572  }
13573  }
13574 }
13575 
13576 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13577 {
13578  size_t result = 0;
13579  for(size_t i = 0; i < m_Blocks.size(); ++i)
13580  {
13581  if(m_Blocks[i]->m_HasNonMovableAllocations)
13582  {
13583  ++result;
13584  }
13585  }
13586  return result;
13587 }
13588 
13589 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13590  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13591  VkDeviceSize maxBytesToMove,
13592  uint32_t maxAllocationsToMove,
13594 {
13595  if(!m_AllAllocations && m_AllocationCount == 0)
13596  {
13597  return VK_SUCCESS;
13598  }
13599 
13600  const size_t blockCount = m_Blocks.size();
13601  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13602  {
13603  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13604 
13605  if(m_AllAllocations)
13606  {
13607  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13608  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13609  it != pMetadata->m_Suballocations.end();
13610  ++it)
13611  {
13612  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13613  {
13614  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13615  pBlockInfo->m_Allocations.push_back(allocInfo);
13616  }
13617  }
13618  }
13619 
13620  pBlockInfo->CalcHasNonMovableAllocations();
13621 
13622  // This is a choice based on research.
13623  // Option 1:
13624  pBlockInfo->SortAllocationsByOffsetDescending();
13625  // Option 2:
13626  //pBlockInfo->SortAllocationsBySizeDescending();
13627  }
13628 
13629  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13630  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13631 
13632  // This is a choice based on research.
13633  const uint32_t roundCount = 2;
13634 
13635  // Execute defragmentation rounds (the main part).
13636  VkResult result = VK_SUCCESS;
13637  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13638  {
13639  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
13640  }
13641 
13642  return result;
13643 }
13644 
13645 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13646  size_t dstBlockIndex, VkDeviceSize dstOffset,
13647  size_t srcBlockIndex, VkDeviceSize srcOffset)
13648 {
13649  if(dstBlockIndex < srcBlockIndex)
13650  {
13651  return true;
13652  }
13653  if(dstBlockIndex > srcBlockIndex)
13654  {
13655  return false;
13656  }
13657  if(dstOffset < srcOffset)
13658  {
13659  return true;
13660  }
13661  return false;
13662 }
13663 
13665 // VmaDefragmentationAlgorithm_Fast
13666 
13667 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13668  VmaAllocator hAllocator,
13669  VmaBlockVector* pBlockVector,
13670  uint32_t currentFrameIndex,
13671  bool overlappingMoveSupported) :
13672  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13673  m_OverlappingMoveSupported(overlappingMoveSupported),
13674  m_AllocationCount(0),
13675  m_AllAllocations(false),
13676  m_BytesMoved(0),
13677  m_AllocationsMoved(0),
13678  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13679 {
13680  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13681 
13682 }
13683 
13684 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13685 {
13686 }
13687 
13688 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13689  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13690  VkDeviceSize maxBytesToMove,
13691  uint32_t maxAllocationsToMove,
13693 {
13694  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13695 
13696  const size_t blockCount = m_pBlockVector->GetBlockCount();
13697  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13698  {
13699  return VK_SUCCESS;
13700  }
13701 
13702  PreprocessMetadata();
13703 
13704  // Sort blocks in order from most destination.
13705 
13706  m_BlockInfos.resize(blockCount);
13707  for(size_t i = 0; i < blockCount; ++i)
13708  {
13709  m_BlockInfos[i].origBlockIndex = i;
13710  }
13711 
13712  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13713  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13714  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13715  });
13716 
13717  // THE MAIN ALGORITHM
13718 
13719  FreeSpaceDatabase freeSpaceDb;
13720 
13721  size_t dstBlockInfoIndex = 0;
13722  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13723  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13724  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13725  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13726  VkDeviceSize dstOffset = 0;
13727 
13728  bool end = false;
13729  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13730  {
13731  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13732  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13733  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13734  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13735  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13736  {
13737  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13738  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13739  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13740  if(m_AllocationsMoved == maxAllocationsToMove ||
13741  m_BytesMoved + srcAllocSize > maxBytesToMove)
13742  {
13743  end = true;
13744  break;
13745  }
13746  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13747 
13748  VmaDefragmentationMove move = {};
13749  // Try to place it in one of free spaces from the database.
13750  size_t freeSpaceInfoIndex;
13751  VkDeviceSize dstAllocOffset;
13752  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13753  freeSpaceInfoIndex, dstAllocOffset))
13754  {
13755  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13756  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13757  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13758 
13759  // Same block
13760  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13761  {
13762  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13763 
13764  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13765 
13766  VmaSuballocation suballoc = *srcSuballocIt;
13767  suballoc.offset = dstAllocOffset;
13768  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13769  m_BytesMoved += srcAllocSize;
13770  ++m_AllocationsMoved;
13771 
13772  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13773  ++nextSuballocIt;
13774  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13775  srcSuballocIt = nextSuballocIt;
13776 
13777  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13778 
13779  move.srcBlockIndex = srcOrigBlockIndex;
13780  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13781  move.srcOffset = srcAllocOffset;
13782  move.dstOffset = dstAllocOffset;
13783  move.size = srcAllocSize;
13784 
13785  moves.push_back(move);
13786  }
13787  // Different block
13788  else
13789  {
13790  // MOVE OPTION 2: Move the allocation to a different block.
13791 
13792  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13793 
13794  VmaSuballocation suballoc = *srcSuballocIt;
13795  suballoc.offset = dstAllocOffset;
13796  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13797  m_BytesMoved += srcAllocSize;
13798  ++m_AllocationsMoved;
13799 
13800  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13801  ++nextSuballocIt;
13802  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13803  srcSuballocIt = nextSuballocIt;
13804 
13805  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13806 
13807  move.srcBlockIndex = srcOrigBlockIndex;
13808  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13809  move.srcOffset = srcAllocOffset;
13810  move.dstOffset = dstAllocOffset;
13811  move.size = srcAllocSize;
13812 
13813  moves.push_back(move);
13814  }
13815  }
13816  else
13817  {
13818  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13819 
13820  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13821  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13822  dstAllocOffset + srcAllocSize > dstBlockSize)
13823  {
13824  // But before that, register remaining free space at the end of dst block.
13825  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13826 
13827  ++dstBlockInfoIndex;
13828  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13829  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13830  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13831  dstBlockSize = pDstMetadata->GetSize();
13832  dstOffset = 0;
13833  dstAllocOffset = 0;
13834  }
13835 
13836  // Same block
13837  if(dstBlockInfoIndex == srcBlockInfoIndex)
13838  {
13839  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13840 
13841  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13842 
13843  bool skipOver = overlap;
13844  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13845  {
13846  // If destination and source place overlap, skip if it would move it
13847  // by only < 1/64 of its size.
13848  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13849  }
13850 
13851  if(skipOver)
13852  {
13853  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13854 
13855  dstOffset = srcAllocOffset + srcAllocSize;
13856  ++srcSuballocIt;
13857  }
13858  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13859  else
13860  {
13861  srcSuballocIt->offset = dstAllocOffset;
13862  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13863  dstOffset = dstAllocOffset + srcAllocSize;
13864  m_BytesMoved += srcAllocSize;
13865  ++m_AllocationsMoved;
13866  ++srcSuballocIt;
13867 
13868  move.srcBlockIndex = srcOrigBlockIndex;
13869  move.dstBlockIndex = dstOrigBlockIndex;
13870  move.srcOffset = srcAllocOffset;
13871  move.dstOffset = dstAllocOffset;
13872  move.size = srcAllocSize;
13873 
13874  moves.push_back(move);
13875  }
13876  }
13877  // Different block
13878  else
13879  {
13880  // MOVE OPTION 2: Move the allocation to a different block.
13881 
13882  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13883  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13884 
13885  VmaSuballocation suballoc = *srcSuballocIt;
13886  suballoc.offset = dstAllocOffset;
13887  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13888  dstOffset = dstAllocOffset + srcAllocSize;
13889  m_BytesMoved += srcAllocSize;
13890  ++m_AllocationsMoved;
13891 
13892  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13893  ++nextSuballocIt;
13894  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13895  srcSuballocIt = nextSuballocIt;
13896 
13897  pDstMetadata->m_Suballocations.push_back(suballoc);
13898 
13899  move.srcBlockIndex = srcOrigBlockIndex;
13900  move.dstBlockIndex = dstOrigBlockIndex;
13901  move.srcOffset = srcAllocOffset;
13902  move.dstOffset = dstAllocOffset;
13903  move.size = srcAllocSize;
13904 
13905  moves.push_back(move);
13906  }
13907  }
13908  }
13909  }
13910 
13911  m_BlockInfos.clear();
13912 
13913  PostprocessMetadata();
13914 
13915  return VK_SUCCESS;
13916 }
13917 
13918 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13919 {
13920  const size_t blockCount = m_pBlockVector->GetBlockCount();
13921  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13922  {
13923  VmaBlockMetadata_Generic* const pMetadata =
13924  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13925  pMetadata->m_FreeCount = 0;
13926  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13927  pMetadata->m_FreeSuballocationsBySize.clear();
13928  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13929  it != pMetadata->m_Suballocations.end(); )
13930  {
13931  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13932  {
13933  VmaSuballocationList::iterator nextIt = it;
13934  ++nextIt;
13935  pMetadata->m_Suballocations.erase(it);
13936  it = nextIt;
13937  }
13938  else
13939  {
13940  ++it;
13941  }
13942  }
13943  }
13944 }
13945 
13946 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13947 {
13948  const size_t blockCount = m_pBlockVector->GetBlockCount();
13949  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13950  {
13951  VmaBlockMetadata_Generic* const pMetadata =
13952  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13953  const VkDeviceSize blockSize = pMetadata->GetSize();
13954 
13955  // No allocations in this block - entire area is free.
13956  if(pMetadata->m_Suballocations.empty())
13957  {
13958  pMetadata->m_FreeCount = 1;
13959  //pMetadata->m_SumFreeSize is already set to blockSize.
13960  VmaSuballocation suballoc = {
13961  0, // offset
13962  blockSize, // size
13963  VMA_NULL, // hAllocation
13964  VMA_SUBALLOCATION_TYPE_FREE };
13965  pMetadata->m_Suballocations.push_back(suballoc);
13966  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13967  }
13968  // There are some allocations in this block.
13969  else
13970  {
13971  VkDeviceSize offset = 0;
13972  VmaSuballocationList::iterator it;
13973  for(it = pMetadata->m_Suballocations.begin();
13974  it != pMetadata->m_Suballocations.end();
13975  ++it)
13976  {
13977  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13978  VMA_ASSERT(it->offset >= offset);
13979 
13980  // Need to insert preceding free space.
13981  if(it->offset > offset)
13982  {
13983  ++pMetadata->m_FreeCount;
13984  const VkDeviceSize freeSize = it->offset - offset;
13985  VmaSuballocation suballoc = {
13986  offset, // offset
13987  freeSize, // size
13988  VMA_NULL, // hAllocation
13989  VMA_SUBALLOCATION_TYPE_FREE };
13990  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13991  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13992  {
13993  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13994  }
13995  }
13996 
13997  pMetadata->m_SumFreeSize -= it->size;
13998  offset = it->offset + it->size;
13999  }
14000 
14001  // Need to insert trailing free space.
14002  if(offset < blockSize)
14003  {
14004  ++pMetadata->m_FreeCount;
14005  const VkDeviceSize freeSize = blockSize - offset;
14006  VmaSuballocation suballoc = {
14007  offset, // offset
14008  freeSize, // size
14009  VMA_NULL, // hAllocation
14010  VMA_SUBALLOCATION_TYPE_FREE };
14011  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14012  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14013  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14014  {
14015  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14016  }
14017  }
14018 
14019  VMA_SORT(
14020  pMetadata->m_FreeSuballocationsBySize.begin(),
14021  pMetadata->m_FreeSuballocationsBySize.end(),
14022  VmaSuballocationItemSizeLess());
14023  }
14024 
14025  VMA_HEAVY_ASSERT(pMetadata->Validate());
14026  }
14027 }
14028 
14029 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14030 {
14031  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14032  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14033  while(it != pMetadata->m_Suballocations.end())
14034  {
14035  if(it->offset < suballoc.offset)
14036  {
14037  ++it;
14038  }
14039  }
14040  pMetadata->m_Suballocations.insert(it, suballoc);
14041 }
14042 
14044 // VmaBlockVectorDefragmentationContext
14045 
14046 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14047  VmaAllocator hAllocator,
14048  VmaPool hCustomPool,
14049  VmaBlockVector* pBlockVector,
14050  uint32_t currFrameIndex) :
14051  res(VK_SUCCESS),
14052  mutexLocked(false),
14053  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14054  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14055  defragmentationMovesProcessed(0),
14056  defragmentationMovesCommitted(0),
14057  hasDefragmentationPlan(0),
14058  m_hAllocator(hAllocator),
14059  m_hCustomPool(hCustomPool),
14060  m_pBlockVector(pBlockVector),
14061  m_CurrFrameIndex(currFrameIndex),
14062  m_pAlgorithm(VMA_NULL),
14063  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14064  m_AllAllocations(false)
14065 {
14066 }
14067 
14068 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14069 {
14070  vma_delete(m_hAllocator, m_pAlgorithm);
14071 }
14072 
14073 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14074 {
14075  AllocInfo info = { hAlloc, pChanged };
14076  m_Allocations.push_back(info);
14077 }
14078 
14079 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14080 {
14081  const bool allAllocations = m_AllAllocations ||
14082  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14083 
14084  /********************************
14085  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14086  ********************************/
14087 
14088  /*
14089  Fast algorithm is supported only when certain criteria are met:
14090  - VMA_DEBUG_MARGIN is 0.
14091  - All allocations in this block vector are moveable.
14092  - There is no possibility of image/buffer granularity conflict.
14093  - The defragmentation is not incremental
14094  */
14095  if(VMA_DEBUG_MARGIN == 0 &&
14096  allAllocations &&
14097  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14099  {
14100  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14101  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14102  }
14103  else
14104  {
14105  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14106  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14107  }
14108 
14109  if(allAllocations)
14110  {
14111  m_pAlgorithm->AddAll();
14112  }
14113  else
14114  {
14115  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14116  {
14117  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14118  }
14119  }
14120 }
14121 
14123 // VmaDefragmentationContext
14124 
14125 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14126  VmaAllocator hAllocator,
14127  uint32_t currFrameIndex,
14128  uint32_t flags,
14129  VmaDefragmentationStats* pStats) :
14130  m_hAllocator(hAllocator),
14131  m_CurrFrameIndex(currFrameIndex),
14132  m_Flags(flags),
14133  m_pStats(pStats),
14134  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14135 {
14136  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14137 }
14138 
14139 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14140 {
14141  for(size_t i = m_CustomPoolContexts.size(); i--; )
14142  {
14143  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14144  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
14145  vma_delete(m_hAllocator, pBlockVectorCtx);
14146  }
14147  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14148  {
14149  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14150  if(pBlockVectorCtx)
14151  {
14152  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
14153  vma_delete(m_hAllocator, pBlockVectorCtx);
14154  }
14155  }
14156 }
14157 
14158 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
14159 {
14160  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14161  {
14162  VmaPool pool = pPools[poolIndex];
14163  VMA_ASSERT(pool);
14164  // Pools with algorithm other than default are not defragmented.
14165  if(pool->m_BlockVector.GetAlgorithm() == 0)
14166  {
14167  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14168 
14169  for(size_t i = m_CustomPoolContexts.size(); i--; )
14170  {
14171  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14172  {
14173  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14174  break;
14175  }
14176  }
14177 
14178  if(!pBlockVectorDefragCtx)
14179  {
14180  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14181  m_hAllocator,
14182  pool,
14183  &pool->m_BlockVector,
14184  m_CurrFrameIndex);
14185  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14186  }
14187 
14188  pBlockVectorDefragCtx->AddAll();
14189  }
14190  }
14191 }
14192 
14193 void VmaDefragmentationContext_T::AddAllocations(
14194  uint32_t allocationCount,
14195  VmaAllocation* pAllocations,
14196  VkBool32* pAllocationsChanged)
14197 {
14198  // Dispatch pAllocations among defragmentators. Create them when necessary.
14199  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14200  {
14201  const VmaAllocation hAlloc = pAllocations[allocIndex];
14202  VMA_ASSERT(hAlloc);
14203  // DedicatedAlloc cannot be defragmented.
14204  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14205  // Lost allocation cannot be defragmented.
14206  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14207  {
14208  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14209 
14210  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14211  // This allocation belongs to custom pool.
14212  if(hAllocPool != VK_NULL_HANDLE)
14213  {
14214  // Pools with algorithm other than default are not defragmented.
14215  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14216  {
14217  for(size_t i = m_CustomPoolContexts.size(); i--; )
14218  {
14219  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14220  {
14221  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14222  break;
14223  }
14224  }
14225  if(!pBlockVectorDefragCtx)
14226  {
14227  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14228  m_hAllocator,
14229  hAllocPool,
14230  &hAllocPool->m_BlockVector,
14231  m_CurrFrameIndex);
14232  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14233  }
14234  }
14235  }
14236  // This allocation belongs to default pool.
14237  else
14238  {
14239  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14240  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14241  if(!pBlockVectorDefragCtx)
14242  {
14243  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14244  m_hAllocator,
14245  VMA_NULL, // hCustomPool
14246  m_hAllocator->m_pBlockVectors[memTypeIndex],
14247  m_CurrFrameIndex);
14248  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14249  }
14250  }
14251 
14252  if(pBlockVectorDefragCtx)
14253  {
14254  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14255  &pAllocationsChanged[allocIndex] : VMA_NULL;
14256  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14257  }
14258  }
14259  }
14260 }
14261 
14262 VkResult VmaDefragmentationContext_T::Defragment(
14263  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14264  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14265  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14266 {
14267  if(pStats)
14268  {
14269  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14270  }
14271 
14273  {
14274  // For incremental defragmetnations, we just earmark how much we can move
14275  // The real meat is in the defragmentation steps
14276  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14277  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14278 
14279  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14280  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14281 
14282  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14283  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14284  return VK_SUCCESS;
14285 
14286  return VK_NOT_READY;
14287  }
14288 
14289  if(commandBuffer == VK_NULL_HANDLE)
14290  {
14291  maxGpuBytesToMove = 0;
14292  maxGpuAllocationsToMove = 0;
14293  }
14294 
14295  VkResult res = VK_SUCCESS;
14296 
14297  // Process default pools.
14298  for(uint32_t memTypeIndex = 0;
14299  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14300  ++memTypeIndex)
14301  {
14302  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14303  if(pBlockVectorCtx)
14304  {
14305  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14306  pBlockVectorCtx->GetBlockVector()->Defragment(
14307  pBlockVectorCtx,
14308  pStats, flags,
14309  maxCpuBytesToMove, maxCpuAllocationsToMove,
14310  maxGpuBytesToMove, maxGpuAllocationsToMove,
14311  commandBuffer);
14312  if(pBlockVectorCtx->res != VK_SUCCESS)
14313  {
14314  res = pBlockVectorCtx->res;
14315  }
14316  }
14317  }
14318 
14319  // Process custom pools.
14320  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14321  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14322  ++customCtxIndex)
14323  {
14324  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14325  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14326  pBlockVectorCtx->GetBlockVector()->Defragment(
14327  pBlockVectorCtx,
14328  pStats, flags,
14329  maxCpuBytesToMove, maxCpuAllocationsToMove,
14330  maxGpuBytesToMove, maxGpuAllocationsToMove,
14331  commandBuffer);
14332  if(pBlockVectorCtx->res != VK_SUCCESS)
14333  {
14334  res = pBlockVectorCtx->res;
14335  }
14336  }
14337 
14338  return res;
14339 }
14340 
14341 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14342 {
14343  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14344  uint32_t movesLeft = pInfo->moveCount;
14345 
14346  // Process default pools.
14347  for(uint32_t memTypeIndex = 0;
14348  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14349  ++memTypeIndex)
14350  {
14351  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14352  if(pBlockVectorCtx)
14353  {
14354  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14355 
14356  if(!pBlockVectorCtx->hasDefragmentationPlan)
14357  {
14358  pBlockVectorCtx->GetBlockVector()->Defragment(
14359  pBlockVectorCtx,
14360  m_pStats, m_Flags,
14361  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14362  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14363  VK_NULL_HANDLE);
14364 
14365  if(pBlockVectorCtx->res < VK_SUCCESS)
14366  continue;
14367 
14368  pBlockVectorCtx->hasDefragmentationPlan = true;
14369  }
14370 
14371  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14372  pBlockVectorCtx,
14373  pCurrentMove, movesLeft);
14374 
14375  movesLeft -= processed;
14376  pCurrentMove += processed;
14377  }
14378  }
14379 
14380  // Process custom pools.
14381  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14382  customCtxIndex < customCtxCount;
14383  ++customCtxIndex)
14384  {
14385  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14386  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14387 
14388  if(!pBlockVectorCtx->hasDefragmentationPlan)
14389  {
14390  pBlockVectorCtx->GetBlockVector()->Defragment(
14391  pBlockVectorCtx,
14392  m_pStats, m_Flags,
14393  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14394  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14395  VK_NULL_HANDLE);
14396 
14397  if(pBlockVectorCtx->res < VK_SUCCESS)
14398  continue;
14399 
14400  pBlockVectorCtx->hasDefragmentationPlan = true;
14401  }
14402 
14403  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14404  pBlockVectorCtx,
14405  pCurrentMove, movesLeft);
14406 
14407  movesLeft -= processed;
14408  pCurrentMove += processed;
14409  }
14410 
14411  pInfo->moveCount = pInfo->moveCount - movesLeft;
14412 
14413  return VK_SUCCESS;
14414 }
14415 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14416 {
14417  VkResult res = VK_SUCCESS;
14418 
14419  // Process default pools.
14420  for(uint32_t memTypeIndex = 0;
14421  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14422  ++memTypeIndex)
14423  {
14424  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14425  if(pBlockVectorCtx)
14426  {
14427  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14428 
14429  if(!pBlockVectorCtx->hasDefragmentationPlan)
14430  {
14431  res = VK_NOT_READY;
14432  continue;
14433  }
14434 
14435  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14436  pBlockVectorCtx, m_pStats);
14437 
14438  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14439  res = VK_NOT_READY;
14440  }
14441  }
14442 
14443  // Process custom pools.
14444  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14445  customCtxIndex < customCtxCount;
14446  ++customCtxIndex)
14447  {
14448  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14449  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14450 
14451  if(!pBlockVectorCtx->hasDefragmentationPlan)
14452  {
14453  res = VK_NOT_READY;
14454  continue;
14455  }
14456 
14457  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14458  pBlockVectorCtx, m_pStats);
14459 
14460  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14461  res = VK_NOT_READY;
14462  }
14463 
14464  return res;
14465 }
14466 
14468 // VmaRecorder
14469 
14470 #if VMA_RECORDING_ENABLED
14471 
14472 VmaRecorder::VmaRecorder() :
14473  m_UseMutex(true),
14474  m_Flags(0),
14475  m_File(VMA_NULL),
14476  m_Freq(INT64_MAX),
14477  m_StartCounter(INT64_MAX)
14478 {
14479 }
14480 
14481 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14482 {
14483  m_UseMutex = useMutex;
14484  m_Flags = settings.flags;
14485 
14486  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
14487  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
14488 
14489  // Open file for writing.
14490  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14491  if(err != 0)
14492  {
14493  return VK_ERROR_INITIALIZATION_FAILED;
14494  }
14495 
14496  // Write header.
14497  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14498  fprintf(m_File, "%s\n", "1,8");
14499 
14500  return VK_SUCCESS;
14501 }
14502 
14503 VmaRecorder::~VmaRecorder()
14504 {
14505  if(m_File != VMA_NULL)
14506  {
14507  fclose(m_File);
14508  }
14509 }
14510 
14511 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14512 {
14513  CallParams callParams;
14514  GetBasicParams(callParams);
14515 
14516  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14517  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14518  Flush();
14519 }
14520 
14521 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14522 {
14523  CallParams callParams;
14524  GetBasicParams(callParams);
14525 
14526  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14527  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14528  Flush();
14529 }
14530 
14531 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14532 {
14533  CallParams callParams;
14534  GetBasicParams(callParams);
14535 
14536  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14537  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14538  createInfo.memoryTypeIndex,
14539  createInfo.flags,
14540  createInfo.blockSize,
14541  (uint64_t)createInfo.minBlockCount,
14542  (uint64_t)createInfo.maxBlockCount,
14543  createInfo.frameInUseCount,
14544  pool);
14545  Flush();
14546 }
14547 
14548 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14549 {
14550  CallParams callParams;
14551  GetBasicParams(callParams);
14552 
14553  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14554  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14555  pool);
14556  Flush();
14557 }
14558 
14559 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14560  const VkMemoryRequirements& vkMemReq,
14561  const VmaAllocationCreateInfo& createInfo,
14562  VmaAllocation allocation)
14563 {
14564  CallParams callParams;
14565  GetBasicParams(callParams);
14566 
14567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14568  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14569  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14570  vkMemReq.size,
14571  vkMemReq.alignment,
14572  vkMemReq.memoryTypeBits,
14573  createInfo.flags,
14574  createInfo.usage,
14575  createInfo.requiredFlags,
14576  createInfo.preferredFlags,
14577  createInfo.memoryTypeBits,
14578  createInfo.pool,
14579  allocation,
14580  userDataStr.GetString());
14581  Flush();
14582 }
14583 
14584 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14585  const VkMemoryRequirements& vkMemReq,
14586  const VmaAllocationCreateInfo& createInfo,
14587  uint64_t allocationCount,
14588  const VmaAllocation* pAllocations)
14589 {
14590  CallParams callParams;
14591  GetBasicParams(callParams);
14592 
14593  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14594  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14595  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14596  vkMemReq.size,
14597  vkMemReq.alignment,
14598  vkMemReq.memoryTypeBits,
14599  createInfo.flags,
14600  createInfo.usage,
14601  createInfo.requiredFlags,
14602  createInfo.preferredFlags,
14603  createInfo.memoryTypeBits,
14604  createInfo.pool);
14605  PrintPointerList(allocationCount, pAllocations);
14606  fprintf(m_File, ",%s\n", userDataStr.GetString());
14607  Flush();
14608 }
14609 
14610 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14611  const VkMemoryRequirements& vkMemReq,
14612  bool requiresDedicatedAllocation,
14613  bool prefersDedicatedAllocation,
14614  const VmaAllocationCreateInfo& createInfo,
14615  VmaAllocation allocation)
14616 {
14617  CallParams callParams;
14618  GetBasicParams(callParams);
14619 
14620  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14621  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14622  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14623  vkMemReq.size,
14624  vkMemReq.alignment,
14625  vkMemReq.memoryTypeBits,
14626  requiresDedicatedAllocation ? 1 : 0,
14627  prefersDedicatedAllocation ? 1 : 0,
14628  createInfo.flags,
14629  createInfo.usage,
14630  createInfo.requiredFlags,
14631  createInfo.preferredFlags,
14632  createInfo.memoryTypeBits,
14633  createInfo.pool,
14634  allocation,
14635  userDataStr.GetString());
14636  Flush();
14637 }
14638 
14639 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14640  const VkMemoryRequirements& vkMemReq,
14641  bool requiresDedicatedAllocation,
14642  bool prefersDedicatedAllocation,
14643  const VmaAllocationCreateInfo& createInfo,
14644  VmaAllocation allocation)
14645 {
14646  CallParams callParams;
14647  GetBasicParams(callParams);
14648 
14649  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14650  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14651  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14652  vkMemReq.size,
14653  vkMemReq.alignment,
14654  vkMemReq.memoryTypeBits,
14655  requiresDedicatedAllocation ? 1 : 0,
14656  prefersDedicatedAllocation ? 1 : 0,
14657  createInfo.flags,
14658  createInfo.usage,
14659  createInfo.requiredFlags,
14660  createInfo.preferredFlags,
14661  createInfo.memoryTypeBits,
14662  createInfo.pool,
14663  allocation,
14664  userDataStr.GetString());
14665  Flush();
14666 }
14667 
14668 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14669  VmaAllocation allocation)
14670 {
14671  CallParams callParams;
14672  GetBasicParams(callParams);
14673 
14674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14675  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14676  allocation);
14677  Flush();
14678 }
14679 
14680 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14681  uint64_t allocationCount,
14682  const VmaAllocation* pAllocations)
14683 {
14684  CallParams callParams;
14685  GetBasicParams(callParams);
14686 
14687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14688  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14689  PrintPointerList(allocationCount, pAllocations);
14690  fprintf(m_File, "\n");
14691  Flush();
14692 }
14693 
14694 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14695  VmaAllocation allocation,
14696  const void* pUserData)
14697 {
14698  CallParams callParams;
14699  GetBasicParams(callParams);
14700 
14701  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14702  UserDataString userDataStr(
14703  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14704  pUserData);
14705  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14706  allocation,
14707  userDataStr.GetString());
14708  Flush();
14709 }
14710 
14711 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14712  VmaAllocation allocation)
14713 {
14714  CallParams callParams;
14715  GetBasicParams(callParams);
14716 
14717  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14718  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14719  allocation);
14720  Flush();
14721 }
14722 
14723 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14724  VmaAllocation allocation)
14725 {
14726  CallParams callParams;
14727  GetBasicParams(callParams);
14728 
14729  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14730  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14731  allocation);
14732  Flush();
14733 }
14734 
14735 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14736  VmaAllocation allocation)
14737 {
14738  CallParams callParams;
14739  GetBasicParams(callParams);
14740 
14741  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14742  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14743  allocation);
14744  Flush();
14745 }
14746 
14747 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14748  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14749 {
14750  CallParams callParams;
14751  GetBasicParams(callParams);
14752 
14753  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14754  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14755  allocation,
14756  offset,
14757  size);
14758  Flush();
14759 }
14760 
14761 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14762  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14763 {
14764  CallParams callParams;
14765  GetBasicParams(callParams);
14766 
14767  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14768  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14769  allocation,
14770  offset,
14771  size);
14772  Flush();
14773 }
14774 
14775 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14776  const VkBufferCreateInfo& bufCreateInfo,
14777  const VmaAllocationCreateInfo& allocCreateInfo,
14778  VmaAllocation allocation)
14779 {
14780  CallParams callParams;
14781  GetBasicParams(callParams);
14782 
14783  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14784  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14785  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14786  bufCreateInfo.flags,
14787  bufCreateInfo.size,
14788  bufCreateInfo.usage,
14789  bufCreateInfo.sharingMode,
14790  allocCreateInfo.flags,
14791  allocCreateInfo.usage,
14792  allocCreateInfo.requiredFlags,
14793  allocCreateInfo.preferredFlags,
14794  allocCreateInfo.memoryTypeBits,
14795  allocCreateInfo.pool,
14796  allocation,
14797  userDataStr.GetString());
14798  Flush();
14799 }
14800 
14801 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14802  const VkImageCreateInfo& imageCreateInfo,
14803  const VmaAllocationCreateInfo& allocCreateInfo,
14804  VmaAllocation allocation)
14805 {
14806  CallParams callParams;
14807  GetBasicParams(callParams);
14808 
14809  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14810  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14811  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14812  imageCreateInfo.flags,
14813  imageCreateInfo.imageType,
14814  imageCreateInfo.format,
14815  imageCreateInfo.extent.width,
14816  imageCreateInfo.extent.height,
14817  imageCreateInfo.extent.depth,
14818  imageCreateInfo.mipLevels,
14819  imageCreateInfo.arrayLayers,
14820  imageCreateInfo.samples,
14821  imageCreateInfo.tiling,
14822  imageCreateInfo.usage,
14823  imageCreateInfo.sharingMode,
14824  imageCreateInfo.initialLayout,
14825  allocCreateInfo.flags,
14826  allocCreateInfo.usage,
14827  allocCreateInfo.requiredFlags,
14828  allocCreateInfo.preferredFlags,
14829  allocCreateInfo.memoryTypeBits,
14830  allocCreateInfo.pool,
14831  allocation,
14832  userDataStr.GetString());
14833  Flush();
14834 }
14835 
14836 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14837  VmaAllocation allocation)
14838 {
14839  CallParams callParams;
14840  GetBasicParams(callParams);
14841 
14842  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14843  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14844  allocation);
14845  Flush();
14846 }
14847 
14848 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14849  VmaAllocation allocation)
14850 {
14851  CallParams callParams;
14852  GetBasicParams(callParams);
14853 
14854  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14855  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14856  allocation);
14857  Flush();
14858 }
14859 
14860 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14861  VmaAllocation allocation)
14862 {
14863  CallParams callParams;
14864  GetBasicParams(callParams);
14865 
14866  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14867  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14868  allocation);
14869  Flush();
14870 }
14871 
14872 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14873  VmaAllocation allocation)
14874 {
14875  CallParams callParams;
14876  GetBasicParams(callParams);
14877 
14878  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14879  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14880  allocation);
14881  Flush();
14882 }
14883 
14884 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14885  VmaPool pool)
14886 {
14887  CallParams callParams;
14888  GetBasicParams(callParams);
14889 
14890  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14891  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14892  pool);
14893  Flush();
14894 }
14895 
14896 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14897  const VmaDefragmentationInfo2& info,
14899 {
14900  CallParams callParams;
14901  GetBasicParams(callParams);
14902 
14903  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14904  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14905  info.flags);
14906  PrintPointerList(info.allocationCount, info.pAllocations);
14907  fprintf(m_File, ",");
14908  PrintPointerList(info.poolCount, info.pPools);
14909  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14910  info.maxCpuBytesToMove,
14912  info.maxGpuBytesToMove,
14914  info.commandBuffer,
14915  ctx);
14916  Flush();
14917 }
14918 
14919 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14921 {
14922  CallParams callParams;
14923  GetBasicParams(callParams);
14924 
14925  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14926  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14927  ctx);
14928  Flush();
14929 }
14930 
14931 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14932  VmaPool pool,
14933  const char* name)
14934 {
14935  CallParams callParams;
14936  GetBasicParams(callParams);
14937 
14938  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14939  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14940  pool, name != VMA_NULL ? name : "");
14941  Flush();
14942 }
14943 
14944 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14945 {
14946  if(pUserData != VMA_NULL)
14947  {
14948  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14949  {
14950  m_Str = (const char*)pUserData;
14951  }
14952  else
14953  {
14954  sprintf_s(m_PtrStr, "%p", pUserData);
14955  m_Str = m_PtrStr;
14956  }
14957  }
14958  else
14959  {
14960  m_Str = "";
14961  }
14962 }
14963 
14964 void VmaRecorder::WriteConfiguration(
14965  const VkPhysicalDeviceProperties& devProps,
14966  const VkPhysicalDeviceMemoryProperties& memProps,
14967  uint32_t vulkanApiVersion,
14968  bool dedicatedAllocationExtensionEnabled,
14969  bool bindMemory2ExtensionEnabled,
14970  bool memoryBudgetExtensionEnabled,
14971  bool deviceCoherentMemoryExtensionEnabled)
14972 {
14973  fprintf(m_File, "Config,Begin\n");
14974 
14975  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14976 
14977  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14978  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14979  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14980  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14981  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14982  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14983 
14984  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14985  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14986  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14987 
14988  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14989  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14990  {
14991  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14992  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14993  }
14994  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14995  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14996  {
14997  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14998  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14999  }
15000 
15001  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15002  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15003  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15004  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15005 
15006  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15007  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15008  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15009  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15010  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15011  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15012  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15013  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15014  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15015 
15016  fprintf(m_File, "Config,End\n");
15017 }
15018 
15019 void VmaRecorder::GetBasicParams(CallParams& outParams)
15020 {
15021  outParams.threadId = GetCurrentThreadId();
15022 
15023  LARGE_INTEGER counter;
15024  QueryPerformanceCounter(&counter);
15025  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
15026 }
15027 
15028 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15029 {
15030  if(count)
15031  {
15032  fprintf(m_File, "%p", pItems[0]);
15033  for(uint64_t i = 1; i < count; ++i)
15034  {
15035  fprintf(m_File, " %p", pItems[i]);
15036  }
15037  }
15038 }
15039 
15040 void VmaRecorder::Flush()
15041 {
15042  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15043  {
15044  fflush(m_File);
15045  }
15046 }
15047 
15048 #endif // #if VMA_RECORDING_ENABLED
15049 
15051 // VmaAllocationObjectAllocator
15052 
15053 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15054  m_Allocator(pAllocationCallbacks, 1024)
15055 {
15056 }
15057 
15058 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15059 {
15060  VmaMutexLock mutexLock(m_Mutex);
15061  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15062 }
15063 
15064 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15065 {
15066  VmaMutexLock mutexLock(m_Mutex);
15067  m_Allocator.Free(hAlloc);
15068 }
15069 
15071 // VmaAllocator_T
15072 
15073 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15074  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15075  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15076  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15077  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15078  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15079  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15080  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15081  m_hDevice(pCreateInfo->device),
15082  m_hInstance(pCreateInfo->instance),
15083  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15084  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15085  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15086  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15087  m_HeapSizeLimitMask(0),
15088  m_PreferredLargeHeapBlockSize(0),
15089  m_PhysicalDevice(pCreateInfo->physicalDevice),
15090  m_CurrentFrameIndex(0),
15091  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15092  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15093  m_NextPoolId(0),
15094  m_GlobalMemoryTypeBits(UINT32_MAX)
15096  ,m_pRecorder(VMA_NULL)
15097 #endif
15098 {
15099  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15100  {
15101  m_UseKhrDedicatedAllocation = false;
15102  m_UseKhrBindMemory2 = false;
15103  }
15104 
15105  if(VMA_DEBUG_DETECT_CORRUPTION)
15106  {
15107  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15108  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15109  }
15110 
15111  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
15112 
15113  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15114  {
15115 #if !(VMA_DEDICATED_ALLOCATION)
15117  {
15118  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15119  }
15120 #endif
15121 #if !(VMA_BIND_MEMORY2)
15122  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15123  {
15124  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15125  }
15126 #endif
15127  }
15128 #if !(VMA_MEMORY_BUDGET)
15129  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15130  {
15131  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15132  }
15133 #endif
15134 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15135  if(m_UseKhrBufferDeviceAddress)
15136  {
15137  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15138  }
15139 #endif
15140 #if VMA_VULKAN_VERSION < 1002000
15141  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15142  {
15143  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15144  }
15145 #endif
15146 #if VMA_VULKAN_VERSION < 1001000
15147  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15148  {
15149  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15150  }
15151 #endif
15152 
15153  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15154  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15155  memset(&m_MemProps, 0, sizeof(m_MemProps));
15156 
15157  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15158  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15159  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15160 
15161  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15162  {
15163  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15164  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15165  }
15166 
15167  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15168 
15169  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15170  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15171 
15172  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15173  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15174  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15175  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15176 
15177  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15178  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15179 
15180  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15181 
15182  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15183  {
15184  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15185  {
15186  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15187  if(limit != VK_WHOLE_SIZE)
15188  {
15189  m_HeapSizeLimitMask |= 1u << heapIndex;
15190  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15191  {
15192  m_MemProps.memoryHeaps[heapIndex].size = limit;
15193  }
15194  }
15195  }
15196  }
15197 
15198  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15199  {
15200  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15201 
15202  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15203  this,
15204  VK_NULL_HANDLE, // hParentPool
15205  memTypeIndex,
15206  preferredBlockSize,
15207  0,
15208  SIZE_MAX,
15209  GetBufferImageGranularity(),
15210  pCreateInfo->frameInUseCount,
15211  false, // explicitBlockSize
15212  false); // linearAlgorithm
15213  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15214  // becase minBlockCount is 0.
15215  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15216 
15217  }
15218 }
15219 
15220 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15221 {
15222  VkResult res = VK_SUCCESS;
15223 
15224  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15225  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15226  {
15227 #if VMA_RECORDING_ENABLED
15228  m_pRecorder = vma_new(this, VmaRecorder)();
15229  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15230  if(res != VK_SUCCESS)
15231  {
15232  return res;
15233  }
15234  m_pRecorder->WriteConfiguration(
15235  m_PhysicalDeviceProperties,
15236  m_MemProps,
15237  m_VulkanApiVersion,
15238  m_UseKhrDedicatedAllocation,
15239  m_UseKhrBindMemory2,
15240  m_UseExtMemoryBudget,
15241  m_UseAmdDeviceCoherentMemory);
15242  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15243 #else
15244  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15245  return VK_ERROR_FEATURE_NOT_PRESENT;
15246 #endif
15247  }
15248 
15249 #if VMA_MEMORY_BUDGET
15250  if(m_UseExtMemoryBudget)
15251  {
15252  UpdateVulkanBudget();
15253  }
15254 #endif // #if VMA_MEMORY_BUDGET
15255 
15256  return res;
15257 }
15258 
15259 VmaAllocator_T::~VmaAllocator_T()
15260 {
15261 #if VMA_RECORDING_ENABLED
15262  if(m_pRecorder != VMA_NULL)
15263  {
15264  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15265  vma_delete(this, m_pRecorder);
15266  }
15267 #endif
15268 
15269  VMA_ASSERT(m_Pools.empty());
15270 
15271  for(size_t i = GetMemoryTypeCount(); i--; )
15272  {
15273  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15274  {
15275  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15276  }
15277 
15278  vma_delete(this, m_pDedicatedAllocations[i]);
15279  vma_delete(this, m_pBlockVectors[i]);
15280  }
15281 }
15282 
15283 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15284 {
15285 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15286  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15287  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15288  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15289  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15290  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15291  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15292  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15293  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15294  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15295  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15296  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15297  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15298  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15299  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15300  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15301  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15302  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15303 #if VMA_VULKAN_VERSION >= 1001000
15304  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15305  {
15306  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
15307  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
15308  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
15309  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
15310  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
15311  m_VulkanFunctions.vkBindBufferMemory2KHR =
15312  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
15313  m_VulkanFunctions.vkBindImageMemory2KHR =
15314  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
15315  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
15316  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
15317  }
15318 #endif
15319 #if VMA_DEDICATED_ALLOCATION
15320  if(m_UseKhrDedicatedAllocation)
15321  {
15322  if(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR == nullptr)
15323  {
15324  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
15325  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
15326  }
15327  if(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR == nullptr)
15328  {
15329  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
15330  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
15331  }
15332  }
15333 #endif
15334 #if VMA_BIND_MEMORY2
15335  if(m_UseKhrBindMemory2)
15336  {
15337  if(m_VulkanFunctions.vkBindBufferMemory2KHR == nullptr)
15338  {
15339  m_VulkanFunctions.vkBindBufferMemory2KHR =
15340  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
15341  }
15342  if(m_VulkanFunctions.vkBindImageMemory2KHR == nullptr)
15343  {
15344  m_VulkanFunctions.vkBindImageMemory2KHR =
15345  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
15346  }
15347  }
15348 #endif // #if VMA_BIND_MEMORY2
15349 #if VMA_MEMORY_BUDGET
15350  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15351  {
15352  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
15353  if(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR == nullptr)
15354  {
15355  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
15356  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
15357  }
15358  }
15359 #endif // #if VMA_MEMORY_BUDGET
15360 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15361 
15362 #define VMA_COPY_IF_NOT_NULL(funcName) \
15363  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15364 
15365  if(pVulkanFunctions != VMA_NULL)
15366  {
15367  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15368  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15369  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15370  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15371  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15372  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15373  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15374  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15375  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15376  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15377  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15378  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15379  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15380  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15381  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15382  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15383  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15384 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15385  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15386  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15387 #endif
15388 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15389  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15390  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15391 #endif
15392 #if VMA_MEMORY_BUDGET
15393  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15394 #endif
15395  }
15396 
15397 #undef VMA_COPY_IF_NOT_NULL
15398 
15399  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
15400  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
15401  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15402  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15403  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15404  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15405  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15406  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15407  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15408  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15409  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15410  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15411  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15412  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15413  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15414  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15415  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15416  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15417  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15418 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15419  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15420  {
15421  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15422  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15423  }
15424 #endif
15425 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15426  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15427  {
15428  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15429  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15430  }
15431 #endif
15432 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15433  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15434  {
15435  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15436  }
15437 #endif
15438 }
15439 
15440 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15441 {
15442  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15443  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15444  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15445  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15446 }
15447 
15448 VkResult VmaAllocator_T::AllocateMemoryOfType(
15449  VkDeviceSize size,
15450  VkDeviceSize alignment,
15451  bool dedicatedAllocation,
15452  VkBuffer dedicatedBuffer,
15453  VkBufferUsageFlags dedicatedBufferUsage,
15454  VkImage dedicatedImage,
15455  const VmaAllocationCreateInfo& createInfo,
15456  uint32_t memTypeIndex,
15457  VmaSuballocationType suballocType,
15458  size_t allocationCount,
15459  VmaAllocation* pAllocations)
15460 {
15461  VMA_ASSERT(pAllocations != VMA_NULL);
15462  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15463 
15464  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15465 
15466  // If memory type is not HOST_VISIBLE, disable MAPPED.
15467  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15468  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15469  {
15470  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15471  }
15472  // If memory is lazily allocated, it should be always dedicated.
15473  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15474  {
15476  }
15477 
15478  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15479  VMA_ASSERT(blockVector);
15480 
15481  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15482  bool preferDedicatedMemory =
15483  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15484  dedicatedAllocation ||
15485  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15486  size > preferredBlockSize / 2;
15487 
15488  if(preferDedicatedMemory &&
15489  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15490  finalCreateInfo.pool == VK_NULL_HANDLE)
15491  {
15493  }
15494 
15495  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15496  {
15497  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15498  {
15499  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15500  }
15501  else
15502  {
15503  return AllocateDedicatedMemory(
15504  size,
15505  suballocType,
15506  memTypeIndex,
15507  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15508  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15509  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15510  finalCreateInfo.pUserData,
15511  dedicatedBuffer,
15512  dedicatedBufferUsage,
15513  dedicatedImage,
15514  allocationCount,
15515  pAllocations);
15516  }
15517  }
15518  else
15519  {
15520  VkResult res = blockVector->Allocate(
15521  m_CurrentFrameIndex.load(),
15522  size,
15523  alignment,
15524  finalCreateInfo,
15525  suballocType,
15526  allocationCount,
15527  pAllocations);
15528  if(res == VK_SUCCESS)
15529  {
15530  return res;
15531  }
15532 
15533  // 5. Try dedicated memory.
15534  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15535  {
15536  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15537  }
15538  else
15539  {
15540  res = AllocateDedicatedMemory(
15541  size,
15542  suballocType,
15543  memTypeIndex,
15544  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15545  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15546  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15547  finalCreateInfo.pUserData,
15548  dedicatedBuffer,
15549  dedicatedBufferUsage,
15550  dedicatedImage,
15551  allocationCount,
15552  pAllocations);
15553  if(res == VK_SUCCESS)
15554  {
15555  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
15556  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
15557  return VK_SUCCESS;
15558  }
15559  else
15560  {
15561  // Everything failed: Return error code.
15562  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15563  return res;
15564  }
15565  }
15566  }
15567 }
15568 
15569 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15570  VkDeviceSize size,
15571  VmaSuballocationType suballocType,
15572  uint32_t memTypeIndex,
15573  bool withinBudget,
15574  bool map,
15575  bool isUserDataString,
15576  void* pUserData,
15577  VkBuffer dedicatedBuffer,
15578  VkBufferUsageFlags dedicatedBufferUsage,
15579  VkImage dedicatedImage,
15580  size_t allocationCount,
15581  VmaAllocation* pAllocations)
15582 {
15583  VMA_ASSERT(allocationCount > 0 && pAllocations);
15584 
15585  if(withinBudget)
15586  {
15587  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15588  VmaBudget heapBudget = {};
15589  GetBudget(&heapBudget, heapIndex, 1);
15590  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15591  {
15592  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15593  }
15594  }
15595 
15596  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15597  allocInfo.memoryTypeIndex = memTypeIndex;
15598  allocInfo.allocationSize = size;
15599 
15600 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15601  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15602  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15603  {
15604  if(dedicatedBuffer != VK_NULL_HANDLE)
15605  {
15606  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15607  dedicatedAllocInfo.buffer = dedicatedBuffer;
15608  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15609  }
15610  else if(dedicatedImage != VK_NULL_HANDLE)
15611  {
15612  dedicatedAllocInfo.image = dedicatedImage;
15613  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15614  }
15615  }
15616 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15617 
15618 #if VMA_BUFFER_DEVICE_ADDRESS
15619  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
15620  if(m_UseKhrBufferDeviceAddress)
15621  {
15622  bool canContainBufferWithDeviceAddress = true;
15623  if(dedicatedBuffer != VK_NULL_HANDLE)
15624  {
15625  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
15626  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
15627  }
15628  else if(dedicatedImage != VK_NULL_HANDLE)
15629  {
15630  canContainBufferWithDeviceAddress = false;
15631  }
15632  if(canContainBufferWithDeviceAddress)
15633  {
15634  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT;
15635  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
15636  }
15637  }
15638 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
15639 
15640  size_t allocIndex;
15641  VkResult res = VK_SUCCESS;
15642  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15643  {
15644  res = AllocateDedicatedMemoryPage(
15645  size,
15646  suballocType,
15647  memTypeIndex,
15648  allocInfo,
15649  map,
15650  isUserDataString,
15651  pUserData,
15652  pAllocations + allocIndex);
15653  if(res != VK_SUCCESS)
15654  {
15655  break;
15656  }
15657  }
15658 
15659  if(res == VK_SUCCESS)
15660  {
15661  // Register them in m_pDedicatedAllocations.
15662  {
15663  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15664  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15665  VMA_ASSERT(pDedicatedAllocations);
15666  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15667  {
15668  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15669  }
15670  }
15671 
15672  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15673  }
15674  else
15675  {
15676  // Free all already created allocations.
15677  while(allocIndex--)
15678  {
15679  VmaAllocation currAlloc = pAllocations[allocIndex];
15680  VkDeviceMemory hMemory = currAlloc->GetMemory();
15681 
15682  /*
15683  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15684  before vkFreeMemory.
15685 
15686  if(currAlloc->GetMappedData() != VMA_NULL)
15687  {
15688  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15689  }
15690  */
15691 
15692  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15693  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15694  currAlloc->SetUserData(this, VMA_NULL);
15695  m_AllocationObjectAllocator.Free(currAlloc);
15696  }
15697 
15698  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15699  }
15700 
15701  return res;
15702 }
15703 
15704 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15705  VkDeviceSize size,
15706  VmaSuballocationType suballocType,
15707  uint32_t memTypeIndex,
15708  const VkMemoryAllocateInfo& allocInfo,
15709  bool map,
15710  bool isUserDataString,
15711  void* pUserData,
15712  VmaAllocation* pAllocation)
15713 {
15714  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15715  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15716  if(res < 0)
15717  {
15718  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15719  return res;
15720  }
15721 
15722  void* pMappedData = VMA_NULL;
15723  if(map)
15724  {
15725  res = (*m_VulkanFunctions.vkMapMemory)(
15726  m_hDevice,
15727  hMemory,
15728  0,
15729  VK_WHOLE_SIZE,
15730  0,
15731  &pMappedData);
15732  if(res < 0)
15733  {
15734  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15735  FreeVulkanMemory(memTypeIndex, size, hMemory);
15736  return res;
15737  }
15738  }
15739 
15740  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
15741  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15742  (*pAllocation)->SetUserData(this, pUserData);
15743  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15744  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15745  {
15746  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15747  }
15748 
15749  return VK_SUCCESS;
15750 }
15751 
15752 void VmaAllocator_T::GetBufferMemoryRequirements(
15753  VkBuffer hBuffer,
15754  VkMemoryRequirements& memReq,
15755  bool& requiresDedicatedAllocation,
15756  bool& prefersDedicatedAllocation) const
15757 {
15758 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15759  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15760  {
15761  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15762  memReqInfo.buffer = hBuffer;
15763 
15764  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15765 
15766  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15767  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15768 
15769  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15770 
15771  memReq = memReq2.memoryRequirements;
15772  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15773  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15774  }
15775  else
15776 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15777  {
15778  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15779  requiresDedicatedAllocation = false;
15780  prefersDedicatedAllocation = false;
15781  }
15782 }
15783 
15784 void VmaAllocator_T::GetImageMemoryRequirements(
15785  VkImage hImage,
15786  VkMemoryRequirements& memReq,
15787  bool& requiresDedicatedAllocation,
15788  bool& prefersDedicatedAllocation) const
15789 {
15790 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15791  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15792  {
15793  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15794  memReqInfo.image = hImage;
15795 
15796  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15797 
15798  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15799  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15800 
15801  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15802 
15803  memReq = memReq2.memoryRequirements;
15804  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15805  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15806  }
15807  else
15808 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15809  {
15810  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15811  requiresDedicatedAllocation = false;
15812  prefersDedicatedAllocation = false;
15813  }
15814 }
15815 
15816 VkResult VmaAllocator_T::AllocateMemory(
15817  const VkMemoryRequirements& vkMemReq,
15818  bool requiresDedicatedAllocation,
15819  bool prefersDedicatedAllocation,
15820  VkBuffer dedicatedBuffer,
15821  VkBufferUsageFlags dedicatedBufferUsage,
15822  VkImage dedicatedImage,
15823  const VmaAllocationCreateInfo& createInfo,
15824  VmaSuballocationType suballocType,
15825  size_t allocationCount,
15826  VmaAllocation* pAllocations)
15827 {
15828  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15829 
15830  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15831 
15832  if(vkMemReq.size == 0)
15833  {
15834  return VK_ERROR_VALIDATION_FAILED_EXT;
15835  }
15836  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15837  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15838  {
15839  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15840  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15841  }
15842  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15844  {
15845  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15846  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15847  }
15848  if(requiresDedicatedAllocation)
15849  {
15850  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15851  {
15852  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15853  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15854  }
15855  if(createInfo.pool != VK_NULL_HANDLE)
15856  {
15857  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15858  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15859  }
15860  }
15861  if((createInfo.pool != VK_NULL_HANDLE) &&
15862  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15863  {
15864  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15865  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15866  }
15867 
15868  if(createInfo.pool != VK_NULL_HANDLE)
15869  {
15870  const VkDeviceSize alignmentForPool = VMA_MAX(
15871  vkMemReq.alignment,
15872  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15873 
15874  VmaAllocationCreateInfo createInfoForPool = createInfo;
15875  // If memory type is not HOST_VISIBLE, disable MAPPED.
15876  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15877  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15878  {
15879  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15880  }
15881 
15882  return createInfo.pool->m_BlockVector.Allocate(
15883  m_CurrentFrameIndex.load(),
15884  vkMemReq.size,
15885  alignmentForPool,
15886  createInfoForPool,
15887  suballocType,
15888  allocationCount,
15889  pAllocations);
15890  }
15891  else
15892  {
15893  // Bit mask of memory Vulkan types acceptable for this allocation.
15894  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15895  uint32_t memTypeIndex = UINT32_MAX;
15896  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15897  if(res == VK_SUCCESS)
15898  {
15899  VkDeviceSize alignmentForMemType = VMA_MAX(
15900  vkMemReq.alignment,
15901  GetMemoryTypeMinAlignment(memTypeIndex));
15902 
15903  res = AllocateMemoryOfType(
15904  vkMemReq.size,
15905  alignmentForMemType,
15906  requiresDedicatedAllocation || prefersDedicatedAllocation,
15907  dedicatedBuffer,
15908  dedicatedBufferUsage,
15909  dedicatedImage,
15910  createInfo,
15911  memTypeIndex,
15912  suballocType,
15913  allocationCount,
15914  pAllocations);
15915  // Succeeded on first try.
15916  if(res == VK_SUCCESS)
15917  {
15918  return res;
15919  }
15920  // Allocation from this memory type failed. Try other compatible memory types.
15921  else
15922  {
15923  for(;;)
15924  {
15925  // Remove old memTypeIndex from list of possibilities.
15926  memoryTypeBits &= ~(1u << memTypeIndex);
15927  // Find alternative memTypeIndex.
15928  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15929  if(res == VK_SUCCESS)
15930  {
15931  alignmentForMemType = VMA_MAX(
15932  vkMemReq.alignment,
15933  GetMemoryTypeMinAlignment(memTypeIndex));
15934 
15935  res = AllocateMemoryOfType(
15936  vkMemReq.size,
15937  alignmentForMemType,
15938  requiresDedicatedAllocation || prefersDedicatedAllocation,
15939  dedicatedBuffer,
15940  dedicatedBufferUsage,
15941  dedicatedImage,
15942  createInfo,
15943  memTypeIndex,
15944  suballocType,
15945  allocationCount,
15946  pAllocations);
15947  // Allocation from this alternative memory type succeeded.
15948  if(res == VK_SUCCESS)
15949  {
15950  return res;
15951  }
15952  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15953  }
15954  // No other matching memory type index could be found.
15955  else
15956  {
15957  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15958  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15959  }
15960  }
15961  }
15962  }
15963  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15964  else
15965  return res;
15966  }
15967 }
15968 
15969 void VmaAllocator_T::FreeMemory(
15970  size_t allocationCount,
15971  const VmaAllocation* pAllocations)
15972 {
15973  VMA_ASSERT(pAllocations);
15974 
15975  for(size_t allocIndex = allocationCount; allocIndex--; )
15976  {
15977  VmaAllocation allocation = pAllocations[allocIndex];
15978 
15979  if(allocation != VK_NULL_HANDLE)
15980  {
15981  if(TouchAllocation(allocation))
15982  {
15983  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15984  {
15985  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15986  }
15987 
15988  switch(allocation->GetType())
15989  {
15990  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15991  {
15992  VmaBlockVector* pBlockVector = VMA_NULL;
15993  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15994  if(hPool != VK_NULL_HANDLE)
15995  {
15996  pBlockVector = &hPool->m_BlockVector;
15997  }
15998  else
15999  {
16000  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16001  pBlockVector = m_pBlockVectors[memTypeIndex];
16002  }
16003  pBlockVector->Free(allocation);
16004  }
16005  break;
16006  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16007  FreeDedicatedMemory(allocation);
16008  break;
16009  default:
16010  VMA_ASSERT(0);
16011  }
16012  }
16013 
16014  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16015  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16016  allocation->SetUserData(this, VMA_NULL);
16017  m_AllocationObjectAllocator.Free(allocation);
16018  }
16019  }
16020 }
16021 
16022 VkResult VmaAllocator_T::ResizeAllocation(
16023  const VmaAllocation alloc,
16024  VkDeviceSize newSize)
16025 {
16026  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16027  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16028  {
16029  return VK_ERROR_VALIDATION_FAILED_EXT;
16030  }
16031  if(newSize == alloc->GetSize())
16032  {
16033  return VK_SUCCESS;
16034  }
16035  return VK_ERROR_OUT_OF_POOL_MEMORY;
16036 }
16037 
16038 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16039 {
16040  // Initialize.
16041  InitStatInfo(pStats->total);
16042  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16043  InitStatInfo(pStats->memoryType[i]);
16044  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16045  InitStatInfo(pStats->memoryHeap[i]);
16046 
16047  // Process default pools.
16048  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16049  {
16050  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16051  VMA_ASSERT(pBlockVector);
16052  pBlockVector->AddStats(pStats);
16053  }
16054 
16055  // Process custom pools.
16056  {
16057  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16058  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16059  {
16060  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16061  }
16062  }
16063 
16064  // Process dedicated allocations.
16065  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16066  {
16067  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16068  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16069  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16070  VMA_ASSERT(pDedicatedAllocVector);
16071  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16072  {
16073  VmaStatInfo allocationStatInfo;
16074  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16075  VmaAddStatInfo(pStats->total, allocationStatInfo);
16076  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16077  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16078  }
16079  }
16080 
16081  // Postprocess.
16082  VmaPostprocessCalcStatInfo(pStats->total);
16083  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16084  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16085  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16086  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16087 }
16088 
16089 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16090 {
16091 #if VMA_MEMORY_BUDGET
16092  if(m_UseExtMemoryBudget)
16093  {
16094  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16095  {
16096  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16097  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16098  {
16099  const uint32_t heapIndex = firstHeap + i;
16100 
16101  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16102  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16103 
16104  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16105  {
16106  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16107  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16108  }
16109  else
16110  {
16111  outBudget->usage = 0;
16112  }
16113 
16114  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16115  outBudget->budget = VMA_MIN(
16116  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16117  }
16118  }
16119  else
16120  {
16121  UpdateVulkanBudget(); // Outside of mutex lock
16122  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16123  }
16124  }
16125  else
16126 #endif
16127  {
16128  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16129  {
16130  const uint32_t heapIndex = firstHeap + i;
16131 
16132  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16133  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16134 
16135  outBudget->usage = outBudget->blockBytes;
16136  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16137  }
16138  }
16139 }
16140 
16141 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16142 
16143 VkResult VmaAllocator_T::DefragmentationBegin(
16144  const VmaDefragmentationInfo2& info,
16145  VmaDefragmentationStats* pStats,
16146  VmaDefragmentationContext* pContext)
16147 {
16148  if(info.pAllocationsChanged != VMA_NULL)
16149  {
16150  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16151  }
16152 
16153  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16154  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16155 
16156  (*pContext)->AddPools(info.poolCount, info.pPools);
16157  (*pContext)->AddAllocations(
16159 
16160  VkResult res = (*pContext)->Defragment(
16163  info.commandBuffer, pStats, info.flags);
16164 
16165  if(res != VK_NOT_READY)
16166  {
16167  vma_delete(this, *pContext);
16168  *pContext = VMA_NULL;
16169  }
16170 
16171  return res;
16172 }
16173 
16174 VkResult VmaAllocator_T::DefragmentationEnd(
16175  VmaDefragmentationContext context)
16176 {
16177  vma_delete(this, context);
16178  return VK_SUCCESS;
16179 }
16180 
16181 VkResult VmaAllocator_T::DefragmentationPassBegin(
16183  VmaDefragmentationContext context)
16184 {
16185  return context->DefragmentPassBegin(pInfo);
16186 }
16187 VkResult VmaAllocator_T::DefragmentationPassEnd(
16188  VmaDefragmentationContext context)
16189 {
16190  return context->DefragmentPassEnd();
16191 
16192 }
16193 
16194 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16195 {
16196  if(hAllocation->CanBecomeLost())
16197  {
16198  /*
16199  Warning: This is a carefully designed algorithm.
16200  Do not modify unless you really know what you're doing :)
16201  */
16202  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16203  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16204  for(;;)
16205  {
16206  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16207  {
16208  pAllocationInfo->memoryType = UINT32_MAX;
16209  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16210  pAllocationInfo->offset = 0;
16211  pAllocationInfo->size = hAllocation->GetSize();
16212  pAllocationInfo->pMappedData = VMA_NULL;
16213  pAllocationInfo->pUserData = hAllocation->GetUserData();
16214  return;
16215  }
16216  else if(localLastUseFrameIndex == localCurrFrameIndex)
16217  {
16218  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16219  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16220  pAllocationInfo->offset = hAllocation->GetOffset();
16221  pAllocationInfo->size = hAllocation->GetSize();
16222  pAllocationInfo->pMappedData = VMA_NULL;
16223  pAllocationInfo->pUserData = hAllocation->GetUserData();
16224  return;
16225  }
16226  else // Last use time earlier than current time.
16227  {
16228  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16229  {
16230  localLastUseFrameIndex = localCurrFrameIndex;
16231  }
16232  }
16233  }
16234  }
16235  else
16236  {
16237 #if VMA_STATS_STRING_ENABLED
16238  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16239  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16240  for(;;)
16241  {
16242  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16243  if(localLastUseFrameIndex == localCurrFrameIndex)
16244  {
16245  break;
16246  }
16247  else // Last use time earlier than current time.
16248  {
16249  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16250  {
16251  localLastUseFrameIndex = localCurrFrameIndex;
16252  }
16253  }
16254  }
16255 #endif
16256 
16257  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16258  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16259  pAllocationInfo->offset = hAllocation->GetOffset();
16260  pAllocationInfo->size = hAllocation->GetSize();
16261  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16262  pAllocationInfo->pUserData = hAllocation->GetUserData();
16263  }
16264 }
16265 
16266 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16267 {
16268  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16269  if(hAllocation->CanBecomeLost())
16270  {
16271  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16272  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16273  for(;;)
16274  {
16275  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16276  {
16277  return false;
16278  }
16279  else if(localLastUseFrameIndex == localCurrFrameIndex)
16280  {
16281  return true;
16282  }
16283  else // Last use time earlier than current time.
16284  {
16285  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16286  {
16287  localLastUseFrameIndex = localCurrFrameIndex;
16288  }
16289  }
16290  }
16291  }
16292  else
16293  {
16294 #if VMA_STATS_STRING_ENABLED
16295  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16296  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16297  for(;;)
16298  {
16299  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16300  if(localLastUseFrameIndex == localCurrFrameIndex)
16301  {
16302  break;
16303  }
16304  else // Last use time earlier than current time.
16305  {
16306  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16307  {
16308  localLastUseFrameIndex = localCurrFrameIndex;
16309  }
16310  }
16311  }
16312 #endif
16313 
16314  return true;
16315  }
16316 }
16317 
16318 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16319 {
16320  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16321 
16322  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16323 
16324  if(newCreateInfo.maxBlockCount == 0)
16325  {
16326  newCreateInfo.maxBlockCount = SIZE_MAX;
16327  }
16328  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16329  {
16330  return VK_ERROR_INITIALIZATION_FAILED;
16331  }
16332  // Memory type index out of range or forbidden.
16333  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16334  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16335  {
16336  return VK_ERROR_FEATURE_NOT_PRESENT;
16337  }
16338 
16339  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16340 
16341  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16342 
16343  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16344  if(res != VK_SUCCESS)
16345  {
16346  vma_delete(this, *pPool);
16347  *pPool = VMA_NULL;
16348  return res;
16349  }
16350 
16351  // Add to m_Pools.
16352  {
16353  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16354  (*pPool)->SetId(m_NextPoolId++);
16355  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16356  }
16357 
16358  return VK_SUCCESS;
16359 }
16360 
16361 void VmaAllocator_T::DestroyPool(VmaPool pool)
16362 {
16363  // Remove from m_Pools.
16364  {
16365  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16366  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16367  VMA_ASSERT(success && "Pool not found in Allocator.");
16368  }
16369 
16370  vma_delete(this, pool);
16371 }
16372 
16373 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16374 {
16375  pool->m_BlockVector.GetPoolStats(pPoolStats);
16376 }
16377 
16378 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16379 {
16380  m_CurrentFrameIndex.store(frameIndex);
16381 
16382 #if VMA_MEMORY_BUDGET
16383  if(m_UseExtMemoryBudget)
16384  {
16385  UpdateVulkanBudget();
16386  }
16387 #endif // #if VMA_MEMORY_BUDGET
16388 }
16389 
16390 void VmaAllocator_T::MakePoolAllocationsLost(
16391  VmaPool hPool,
16392  size_t* pLostAllocationCount)
16393 {
16394  hPool->m_BlockVector.MakePoolAllocationsLost(
16395  m_CurrentFrameIndex.load(),
16396  pLostAllocationCount);
16397 }
16398 
16399 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16400 {
16401  return hPool->m_BlockVector.CheckCorruption();
16402 }
16403 
16404 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16405 {
16406  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16407 
16408  // Process default pools.
16409  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16410  {
16411  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16412  {
16413  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16414  VMA_ASSERT(pBlockVector);
16415  VkResult localRes = pBlockVector->CheckCorruption();
16416  switch(localRes)
16417  {
16418  case VK_ERROR_FEATURE_NOT_PRESENT:
16419  break;
16420  case VK_SUCCESS:
16421  finalRes = VK_SUCCESS;
16422  break;
16423  default:
16424  return localRes;
16425  }
16426  }
16427  }
16428 
16429  // Process custom pools.
16430  {
16431  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16432  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16433  {
16434  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16435  {
16436  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16437  switch(localRes)
16438  {
16439  case VK_ERROR_FEATURE_NOT_PRESENT:
16440  break;
16441  case VK_SUCCESS:
16442  finalRes = VK_SUCCESS;
16443  break;
16444  default:
16445  return localRes;
16446  }
16447  }
16448  }
16449  }
16450 
16451  return finalRes;
16452 }
16453 
16454 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16455 {
16456  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16457  (*pAllocation)->InitLost();
16458 }
16459 
16460 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16461 {
16462  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16463 
16464  // HeapSizeLimit is in effect for this heap.
16465  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16466  {
16467  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16468  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16469  for(;;)
16470  {
16471  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16472  if(blockBytesAfterAllocation > heapSize)
16473  {
16474  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16475  }
16476  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16477  {
16478  break;
16479  }
16480  }
16481  }
16482  else
16483  {
16484  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16485  }
16486 
16487  // VULKAN CALL vkAllocateMemory.
16488  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16489 
16490  if(res == VK_SUCCESS)
16491  {
16492 #if VMA_MEMORY_BUDGET
16493  ++m_Budget.m_OperationsSinceBudgetFetch;
16494 #endif
16495 
16496  // Informative callback.
16497  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16498  {
16499  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
16500  }
16501  }
16502  else
16503  {
16504  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16505  }
16506 
16507  return res;
16508 }
16509 
16510 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
16511 {
16512  // Informative callback.
16513  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
16514  {
16515  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
16516  }
16517 
16518  // VULKAN CALL vkFreeMemory.
16519  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
16520 
16521  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
16522 }
16523 
16524 VkResult VmaAllocator_T::BindVulkanBuffer(
16525  VkDeviceMemory memory,
16526  VkDeviceSize memoryOffset,
16527  VkBuffer buffer,
16528  const void* pNext)
16529 {
16530  if(pNext != VMA_NULL)
16531  {
16532 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16533  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16534  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
16535  {
16536  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
16537  bindBufferMemoryInfo.pNext = pNext;
16538  bindBufferMemoryInfo.buffer = buffer;
16539  bindBufferMemoryInfo.memory = memory;
16540  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16541  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16542  }
16543  else
16544 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16545  {
16546  return VK_ERROR_EXTENSION_NOT_PRESENT;
16547  }
16548  }
16549  else
16550  {
16551  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
16552  }
16553 }
16554 
16555 VkResult VmaAllocator_T::BindVulkanImage(
16556  VkDeviceMemory memory,
16557  VkDeviceSize memoryOffset,
16558  VkImage image,
16559  const void* pNext)
16560 {
16561  if(pNext != VMA_NULL)
16562  {
16563 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16564  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16565  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
16566  {
16567  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
16568  bindBufferMemoryInfo.pNext = pNext;
16569  bindBufferMemoryInfo.image = image;
16570  bindBufferMemoryInfo.memory = memory;
16571  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16572  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16573  }
16574  else
16575 #endif // #if VMA_BIND_MEMORY2
16576  {
16577  return VK_ERROR_EXTENSION_NOT_PRESENT;
16578  }
16579  }
16580  else
16581  {
16582  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
16583  }
16584 }
16585 
16586 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
16587 {
16588  if(hAllocation->CanBecomeLost())
16589  {
16590  return VK_ERROR_MEMORY_MAP_FAILED;
16591  }
16592 
16593  switch(hAllocation->GetType())
16594  {
16595  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16596  {
16597  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16598  char *pBytes = VMA_NULL;
16599  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
16600  if(res == VK_SUCCESS)
16601  {
16602  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16603  hAllocation->BlockAllocMap();
16604  }
16605  return res;
16606  }
16607  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16608  return hAllocation->DedicatedAllocMap(this, ppData);
16609  default:
16610  VMA_ASSERT(0);
16611  return VK_ERROR_MEMORY_MAP_FAILED;
16612  }
16613 }
16614 
16615 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16616 {
16617  switch(hAllocation->GetType())
16618  {
16619  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16620  {
16621  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16622  hAllocation->BlockAllocUnmap();
16623  pBlock->Unmap(this, 1);
16624  }
16625  break;
16626  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16627  hAllocation->DedicatedAllocUnmap(this);
16628  break;
16629  default:
16630  VMA_ASSERT(0);
16631  }
16632 }
16633 
16634 VkResult VmaAllocator_T::BindBufferMemory(
16635  VmaAllocation hAllocation,
16636  VkDeviceSize allocationLocalOffset,
16637  VkBuffer hBuffer,
16638  const void* pNext)
16639 {
16640  VkResult res = VK_SUCCESS;
16641  switch(hAllocation->GetType())
16642  {
16643  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16644  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16645  break;
16646  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16647  {
16648  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16649  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16650  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16651  break;
16652  }
16653  default:
16654  VMA_ASSERT(0);
16655  }
16656  return res;
16657 }
16658 
16659 VkResult VmaAllocator_T::BindImageMemory(
16660  VmaAllocation hAllocation,
16661  VkDeviceSize allocationLocalOffset,
16662  VkImage hImage,
16663  const void* pNext)
16664 {
16665  VkResult res = VK_SUCCESS;
16666  switch(hAllocation->GetType())
16667  {
16668  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16669  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16670  break;
16671  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16672  {
16673  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16674  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16675  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16676  break;
16677  }
16678  default:
16679  VMA_ASSERT(0);
16680  }
16681  return res;
16682 }
16683 
16684 void VmaAllocator_T::FlushOrInvalidateAllocation(
16685  VmaAllocation hAllocation,
16686  VkDeviceSize offset, VkDeviceSize size,
16687  VMA_CACHE_OPERATION op)
16688 {
16689  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16690  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16691  {
16692  const VkDeviceSize allocationSize = hAllocation->GetSize();
16693  VMA_ASSERT(offset <= allocationSize);
16694 
16695  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16696 
16697  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16698  memRange.memory = hAllocation->GetMemory();
16699 
16700  switch(hAllocation->GetType())
16701  {
16702  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16703  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16704  if(size == VK_WHOLE_SIZE)
16705  {
16706  memRange.size = allocationSize - memRange.offset;
16707  }
16708  else
16709  {
16710  VMA_ASSERT(offset + size <= allocationSize);
16711  memRange.size = VMA_MIN(
16712  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16713  allocationSize - memRange.offset);
16714  }
16715  break;
16716 
16717  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16718  {
16719  // 1. Still within this allocation.
16720  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16721  if(size == VK_WHOLE_SIZE)
16722  {
16723  size = allocationSize - offset;
16724  }
16725  else
16726  {
16727  VMA_ASSERT(offset + size <= allocationSize);
16728  }
16729  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16730 
16731  // 2. Adjust to whole block.
16732  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16733  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16734  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16735  memRange.offset += allocationOffset;
16736  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16737 
16738  break;
16739  }
16740 
16741  default:
16742  VMA_ASSERT(0);
16743  }
16744 
16745  switch(op)
16746  {
16747  case VMA_CACHE_FLUSH:
16748  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16749  break;
16750  case VMA_CACHE_INVALIDATE:
16751  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16752  break;
16753  default:
16754  VMA_ASSERT(0);
16755  }
16756  }
16757  // else: Just ignore this call.
16758 }
16759 
16760 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16761 {
16762  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16763 
16764  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16765  {
16766  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16767  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16768  VMA_ASSERT(pDedicatedAllocations);
16769  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16770  VMA_ASSERT(success);
16771  }
16772 
16773  VkDeviceMemory hMemory = allocation->GetMemory();
16774 
16775  /*
16776  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16777  before vkFreeMemory.
16778 
16779  if(allocation->GetMappedData() != VMA_NULL)
16780  {
16781  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16782  }
16783  */
16784 
16785  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16786 
16787  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16788 }
16789 
16790 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16791 {
16792  VkBufferCreateInfo dummyBufCreateInfo;
16793  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16794 
16795  uint32_t memoryTypeBits = 0;
16796 
16797  // Create buffer.
16798  VkBuffer buf = VK_NULL_HANDLE;
16799  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16800  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16801  if(res == VK_SUCCESS)
16802  {
16803  // Query for supported memory types.
16804  VkMemoryRequirements memReq;
16805  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16806  memoryTypeBits = memReq.memoryTypeBits;
16807 
16808  // Destroy buffer.
16809  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16810  }
16811 
16812  return memoryTypeBits;
16813 }
16814 
16815 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
16816 {
16817  // Make sure memory information is already fetched.
16818  VMA_ASSERT(GetMemoryTypeCount() > 0);
16819 
16820  uint32_t memoryTypeBits = UINT32_MAX;
16821 
16822  if(!m_UseAmdDeviceCoherentMemory)
16823  {
16824  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
16825  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16826  {
16827  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
16828  {
16829  memoryTypeBits &= ~(1u << memTypeIndex);
16830  }
16831  }
16832  }
16833 
16834  return memoryTypeBits;
16835 }
16836 
16837 #if VMA_MEMORY_BUDGET
16838 
16839 void VmaAllocator_T::UpdateVulkanBudget()
16840 {
16841  VMA_ASSERT(m_UseExtMemoryBudget);
16842 
16843  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16844 
16845  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16846  VmaPnextChainPushFront(&memProps, &budgetProps);
16847 
16848  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16849 
16850  {
16851  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16852 
16853  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16854  {
16855  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16856  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16857  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16858 
16859  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
16860  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
16861  {
16862  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16863  }
16864  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
16865  {
16866  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
16867  }
16868  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
16869  {
16870  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16871  }
16872  }
16873  m_Budget.m_OperationsSinceBudgetFetch = 0;
16874  }
16875 }
16876 
16877 #endif // #if VMA_MEMORY_BUDGET
16878 
16879 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16880 {
16881  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16882  !hAllocation->CanBecomeLost() &&
16883  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16884  {
16885  void* pData = VMA_NULL;
16886  VkResult res = Map(hAllocation, &pData);
16887  if(res == VK_SUCCESS)
16888  {
16889  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16890  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16891  Unmap(hAllocation);
16892  }
16893  else
16894  {
16895  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16896  }
16897  }
16898 }
16899 
16900 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16901 {
16902  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16903  if(memoryTypeBits == UINT32_MAX)
16904  {
16905  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16906  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16907  }
16908  return memoryTypeBits;
16909 }
16910 
16911 #if VMA_STATS_STRING_ENABLED
16912 
16913 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16914 {
16915  bool dedicatedAllocationsStarted = false;
16916  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16917  {
16918  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16919  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16920  VMA_ASSERT(pDedicatedAllocVector);
16921  if(pDedicatedAllocVector->empty() == false)
16922  {
16923  if(dedicatedAllocationsStarted == false)
16924  {
16925  dedicatedAllocationsStarted = true;
16926  json.WriteString("DedicatedAllocations");
16927  json.BeginObject();
16928  }
16929 
16930  json.BeginString("Type ");
16931  json.ContinueString(memTypeIndex);
16932  json.EndString();
16933 
16934  json.BeginArray();
16935 
16936  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16937  {
16938  json.BeginObject(true);
16939  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16940  hAlloc->PrintParameters(json);
16941  json.EndObject();
16942  }
16943 
16944  json.EndArray();
16945  }
16946  }
16947  if(dedicatedAllocationsStarted)
16948  {
16949  json.EndObject();
16950  }
16951 
16952  {
16953  bool allocationsStarted = false;
16954  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16955  {
16956  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16957  {
16958  if(allocationsStarted == false)
16959  {
16960  allocationsStarted = true;
16961  json.WriteString("DefaultPools");
16962  json.BeginObject();
16963  }
16964 
16965  json.BeginString("Type ");
16966  json.ContinueString(memTypeIndex);
16967  json.EndString();
16968 
16969  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16970  }
16971  }
16972  if(allocationsStarted)
16973  {
16974  json.EndObject();
16975  }
16976  }
16977 
16978  // Custom pools
16979  {
16980  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16981  const size_t poolCount = m_Pools.size();
16982  if(poolCount > 0)
16983  {
16984  json.WriteString("Pools");
16985  json.BeginObject();
16986  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16987  {
16988  json.BeginString();
16989  json.ContinueString(m_Pools[poolIndex]->GetId());
16990  json.EndString();
16991 
16992  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16993  }
16994  json.EndObject();
16995  }
16996  }
16997 }
16998 
16999 #endif // #if VMA_STATS_STRING_ENABLED
17000 
17002 // Public interface
17003 
17004 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17005  const VmaAllocatorCreateInfo* pCreateInfo,
17006  VmaAllocator* pAllocator)
17007 {
17008  VMA_ASSERT(pCreateInfo && pAllocator);
17009  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17010  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17011  VMA_DEBUG_LOG("vmaCreateAllocator");
17012  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17013  return (*pAllocator)->Init(pCreateInfo);
17014 }
17015 
17016 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17017  VmaAllocator allocator)
17018 {
17019  if(allocator != VK_NULL_HANDLE)
17020  {
17021  VMA_DEBUG_LOG("vmaDestroyAllocator");
17022  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17023  vma_delete(&allocationCallbacks, allocator);
17024  }
17025 }
17026 
17027 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17028 {
17029  VMA_ASSERT(allocator && pAllocatorInfo);
17030  pAllocatorInfo->instance = allocator->m_hInstance;
17031  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17032  pAllocatorInfo->device = allocator->m_hDevice;
17033 }
17034 
17035 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17036  VmaAllocator allocator,
17037  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17038 {
17039  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17040  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17041 }
17042 
17043 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17044  VmaAllocator allocator,
17045  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17046 {
17047  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17048  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17049 }
17050 
17051 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17052  VmaAllocator allocator,
17053  uint32_t memoryTypeIndex,
17054  VkMemoryPropertyFlags* pFlags)
17055 {
17056  VMA_ASSERT(allocator && pFlags);
17057  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17058  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17059 }
17060 
17061 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17062  VmaAllocator allocator,
17063  uint32_t frameIndex)
17064 {
17065  VMA_ASSERT(allocator);
17066  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17067 
17068  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17069 
17070  allocator->SetCurrentFrameIndex(frameIndex);
17071 }
17072 
17073 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17074  VmaAllocator allocator,
17075  VmaStats* pStats)
17076 {
17077  VMA_ASSERT(allocator && pStats);
17078  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17079  allocator->CalculateStats(pStats);
17080 }
17081 
17082 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17083  VmaAllocator allocator,
17084  VmaBudget* pBudget)
17085 {
17086  VMA_ASSERT(allocator && pBudget);
17087  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17088  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17089 }
17090 
17091 #if VMA_STATS_STRING_ENABLED
17092 
17093 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17094  VmaAllocator allocator,
17095  char** ppStatsString,
17096  VkBool32 detailedMap)
17097 {
17098  VMA_ASSERT(allocator && ppStatsString);
17099  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17100 
17101  VmaStringBuilder sb(allocator);
17102  {
17103  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17104  json.BeginObject();
17105 
17106  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17107  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17108 
17109  VmaStats stats;
17110  allocator->CalculateStats(&stats);
17111 
17112  json.WriteString("Total");
17113  VmaPrintStatInfo(json, stats.total);
17114 
17115  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17116  {
17117  json.BeginString("Heap ");
17118  json.ContinueString(heapIndex);
17119  json.EndString();
17120  json.BeginObject();
17121 
17122  json.WriteString("Size");
17123  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17124 
17125  json.WriteString("Flags");
17126  json.BeginArray(true);
17127  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17128  {
17129  json.WriteString("DEVICE_LOCAL");
17130  }
17131  json.EndArray();
17132 
17133  json.WriteString("Budget");
17134  json.BeginObject();
17135  {
17136  json.WriteString("BlockBytes");
17137  json.WriteNumber(budget[heapIndex].blockBytes);
17138  json.WriteString("AllocationBytes");
17139  json.WriteNumber(budget[heapIndex].allocationBytes);
17140  json.WriteString("Usage");
17141  json.WriteNumber(budget[heapIndex].usage);
17142  json.WriteString("Budget");
17143  json.WriteNumber(budget[heapIndex].budget);
17144  }
17145  json.EndObject();
17146 
17147  if(stats.memoryHeap[heapIndex].blockCount > 0)
17148  {
17149  json.WriteString("Stats");
17150  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17151  }
17152 
17153  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17154  {
17155  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17156  {
17157  json.BeginString("Type ");
17158  json.ContinueString(typeIndex);
17159  json.EndString();
17160 
17161  json.BeginObject();
17162 
17163  json.WriteString("Flags");
17164  json.BeginArray(true);
17165  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17166  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17167  {
17168  json.WriteString("DEVICE_LOCAL");
17169  }
17170  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17171  {
17172  json.WriteString("HOST_VISIBLE");
17173  }
17174  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17175  {
17176  json.WriteString("HOST_COHERENT");
17177  }
17178  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17179  {
17180  json.WriteString("HOST_CACHED");
17181  }
17182  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17183  {
17184  json.WriteString("LAZILY_ALLOCATED");
17185  }
17186  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17187  {
17188  json.WriteString(" PROTECTED");
17189  }
17190  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17191  {
17192  json.WriteString(" DEVICE_COHERENT");
17193  }
17194  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17195  {
17196  json.WriteString(" DEVICE_UNCACHED");
17197  }
17198  json.EndArray();
17199 
17200  if(stats.memoryType[typeIndex].blockCount > 0)
17201  {
17202  json.WriteString("Stats");
17203  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17204  }
17205 
17206  json.EndObject();
17207  }
17208  }
17209 
17210  json.EndObject();
17211  }
17212  if(detailedMap == VK_TRUE)
17213  {
17214  allocator->PrintDetailedMap(json);
17215  }
17216 
17217  json.EndObject();
17218  }
17219 
17220  const size_t len = sb.GetLength();
17221  char* const pChars = vma_new_array(allocator, char, len + 1);
17222  if(len > 0)
17223  {
17224  memcpy(pChars, sb.GetData(), len);
17225  }
17226  pChars[len] = '\0';
17227  *ppStatsString = pChars;
17228 }
17229 
17230 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17231  VmaAllocator allocator,
17232  char* pStatsString)
17233 {
17234  if(pStatsString != VMA_NULL)
17235  {
17236  VMA_ASSERT(allocator);
17237  size_t len = strlen(pStatsString);
17238  vma_delete_array(allocator, pStatsString, len + 1);
17239  }
17240 }
17241 
17242 #endif // #if VMA_STATS_STRING_ENABLED
17243 
17244 /*
17245 This function is not protected by any mutex because it just reads immutable data.
17246 */
17247 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17248  VmaAllocator allocator,
17249  uint32_t memoryTypeBits,
17250  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17251  uint32_t* pMemoryTypeIndex)
17252 {
17253  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17254  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17255  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17256 
17257  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17258 
17259  if(pAllocationCreateInfo->memoryTypeBits != 0)
17260  {
17261  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17262  }
17263 
17264  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17265  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17266  uint32_t notPreferredFlags = 0;
17267 
17268  // Convert usage to requiredFlags and preferredFlags.
17269  switch(pAllocationCreateInfo->usage)
17270  {
17272  break;
17274  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17275  {
17276  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17277  }
17278  break;
17280  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17281  break;
17283  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17284  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17285  {
17286  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17287  }
17288  break;
17290  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17291  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17292  break;
17294  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17295  break;
17297  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17298  break;
17299  default:
17300  VMA_ASSERT(0);
17301  break;
17302  }
17303 
17304  // Avoid DEVICE_COHERENT unless explicitly requested.
17305  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17306  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17307  {
17308  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17309  }
17310 
17311  *pMemoryTypeIndex = UINT32_MAX;
17312  uint32_t minCost = UINT32_MAX;
17313  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17314  memTypeIndex < allocator->GetMemoryTypeCount();
17315  ++memTypeIndex, memTypeBit <<= 1)
17316  {
17317  // This memory type is acceptable according to memoryTypeBits bitmask.
17318  if((memTypeBit & memoryTypeBits) != 0)
17319  {
17320  const VkMemoryPropertyFlags currFlags =
17321  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17322  // This memory type contains requiredFlags.
17323  if((requiredFlags & ~currFlags) == 0)
17324  {
17325  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17326  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17327  VmaCountBitsSet(currFlags & notPreferredFlags);
17328  // Remember memory type with lowest cost.
17329  if(currCost < minCost)
17330  {
17331  *pMemoryTypeIndex = memTypeIndex;
17332  if(currCost == 0)
17333  {
17334  return VK_SUCCESS;
17335  }
17336  minCost = currCost;
17337  }
17338  }
17339  }
17340  }
17341  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17342 }
17343 
17344 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17345  VmaAllocator allocator,
17346  const VkBufferCreateInfo* pBufferCreateInfo,
17347  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17348  uint32_t* pMemoryTypeIndex)
17349 {
17350  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17351  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17352  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17353  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17354 
17355  const VkDevice hDev = allocator->m_hDevice;
17356  VkBuffer hBuffer = VK_NULL_HANDLE;
17357  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17358  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17359  if(res == VK_SUCCESS)
17360  {
17361  VkMemoryRequirements memReq = {};
17362  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17363  hDev, hBuffer, &memReq);
17364 
17365  res = vmaFindMemoryTypeIndex(
17366  allocator,
17367  memReq.memoryTypeBits,
17368  pAllocationCreateInfo,
17369  pMemoryTypeIndex);
17370 
17371  allocator->GetVulkanFunctions().vkDestroyBuffer(
17372  hDev, hBuffer, allocator->GetAllocationCallbacks());
17373  }
17374  return res;
17375 }
17376 
17377 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17378  VmaAllocator allocator,
17379  const VkImageCreateInfo* pImageCreateInfo,
17380  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17381  uint32_t* pMemoryTypeIndex)
17382 {
17383  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17384  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17385  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17386  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17387 
17388  const VkDevice hDev = allocator->m_hDevice;
17389  VkImage hImage = VK_NULL_HANDLE;
17390  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17391  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17392  if(res == VK_SUCCESS)
17393  {
17394  VkMemoryRequirements memReq = {};
17395  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17396  hDev, hImage, &memReq);
17397 
17398  res = vmaFindMemoryTypeIndex(
17399  allocator,
17400  memReq.memoryTypeBits,
17401  pAllocationCreateInfo,
17402  pMemoryTypeIndex);
17403 
17404  allocator->GetVulkanFunctions().vkDestroyImage(
17405  hDev, hImage, allocator->GetAllocationCallbacks());
17406  }
17407  return res;
17408 }
17409 
17410 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17411  VmaAllocator allocator,
17412  const VmaPoolCreateInfo* pCreateInfo,
17413  VmaPool* pPool)
17414 {
17415  VMA_ASSERT(allocator && pCreateInfo && pPool);
17416 
17417  VMA_DEBUG_LOG("vmaCreatePool");
17418 
17419  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17420 
17421  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17422 
17423 #if VMA_RECORDING_ENABLED
17424  if(allocator->GetRecorder() != VMA_NULL)
17425  {
17426  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17427  }
17428 #endif
17429 
17430  return res;
17431 }
17432 
17433 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17434  VmaAllocator allocator,
17435  VmaPool pool)
17436 {
17437  VMA_ASSERT(allocator);
17438 
17439  if(pool == VK_NULL_HANDLE)
17440  {
17441  return;
17442  }
17443 
17444  VMA_DEBUG_LOG("vmaDestroyPool");
17445 
17446  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17447 
17448 #if VMA_RECORDING_ENABLED
17449  if(allocator->GetRecorder() != VMA_NULL)
17450  {
17451  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17452  }
17453 #endif
17454 
17455  allocator->DestroyPool(pool);
17456 }
17457 
17458 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
17459  VmaAllocator allocator,
17460  VmaPool pool,
17461  VmaPoolStats* pPoolStats)
17462 {
17463  VMA_ASSERT(allocator && pool && pPoolStats);
17464 
17465  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17466 
17467  allocator->GetPoolStats(pool, pPoolStats);
17468 }
17469 
17470 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
17471  VmaAllocator allocator,
17472  VmaPool pool,
17473  size_t* pLostAllocationCount)
17474 {
17475  VMA_ASSERT(allocator && pool);
17476 
17477  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17478 
17479 #if VMA_RECORDING_ENABLED
17480  if(allocator->GetRecorder() != VMA_NULL)
17481  {
17482  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
17483  }
17484 #endif
17485 
17486  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
17487 }
17488 
17489 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
17490 {
17491  VMA_ASSERT(allocator && pool);
17492 
17493  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17494 
17495  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
17496 
17497  return allocator->CheckPoolCorruption(pool);
17498 }
17499 
17500 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
17501  VmaAllocator allocator,
17502  VmaPool pool,
17503  const char** ppName)
17504 {
17505  VMA_ASSERT(allocator && pool);
17506 
17507  VMA_DEBUG_LOG("vmaGetPoolName");
17508 
17509  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17510 
17511  *ppName = pool->GetName();
17512 }
17513 
17514 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
17515  VmaAllocator allocator,
17516  VmaPool pool,
17517  const char* pName)
17518 {
17519  VMA_ASSERT(allocator && pool);
17520 
17521  VMA_DEBUG_LOG("vmaSetPoolName");
17522 
17523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17524 
17525  pool->SetName(pName);
17526 
17527 #if VMA_RECORDING_ENABLED
17528  if(allocator->GetRecorder() != VMA_NULL)
17529  {
17530  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
17531  }
17532 #endif
17533 }
17534 
17535 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
17536  VmaAllocator allocator,
17537  const VkMemoryRequirements* pVkMemoryRequirements,
17538  const VmaAllocationCreateInfo* pCreateInfo,
17539  VmaAllocation* pAllocation,
17540  VmaAllocationInfo* pAllocationInfo)
17541 {
17542  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
17543 
17544  VMA_DEBUG_LOG("vmaAllocateMemory");
17545 
17546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17547 
17548  VkResult result = allocator->AllocateMemory(
17549  *pVkMemoryRequirements,
17550  false, // requiresDedicatedAllocation
17551  false, // prefersDedicatedAllocation
17552  VK_NULL_HANDLE, // dedicatedBuffer
17553  UINT32_MAX, // dedicatedBufferUsage
17554  VK_NULL_HANDLE, // dedicatedImage
17555  *pCreateInfo,
17556  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17557  1, // allocationCount
17558  pAllocation);
17559 
17560 #if VMA_RECORDING_ENABLED
17561  if(allocator->GetRecorder() != VMA_NULL)
17562  {
17563  allocator->GetRecorder()->RecordAllocateMemory(
17564  allocator->GetCurrentFrameIndex(),
17565  *pVkMemoryRequirements,
17566  *pCreateInfo,
17567  *pAllocation);
17568  }
17569 #endif
17570 
17571  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17572  {
17573  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17574  }
17575 
17576  return result;
17577 }
17578 
17579 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
17580  VmaAllocator allocator,
17581  const VkMemoryRequirements* pVkMemoryRequirements,
17582  const VmaAllocationCreateInfo* pCreateInfo,
17583  size_t allocationCount,
17584  VmaAllocation* pAllocations,
17585  VmaAllocationInfo* pAllocationInfo)
17586 {
17587  if(allocationCount == 0)
17588  {
17589  return VK_SUCCESS;
17590  }
17591 
17592  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
17593 
17594  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
17595 
17596  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17597 
17598  VkResult result = allocator->AllocateMemory(
17599  *pVkMemoryRequirements,
17600  false, // requiresDedicatedAllocation
17601  false, // prefersDedicatedAllocation
17602  VK_NULL_HANDLE, // dedicatedBuffer
17603  UINT32_MAX, // dedicatedBufferUsage
17604  VK_NULL_HANDLE, // dedicatedImage
17605  *pCreateInfo,
17606  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17607  allocationCount,
17608  pAllocations);
17609 
17610 #if VMA_RECORDING_ENABLED
17611  if(allocator->GetRecorder() != VMA_NULL)
17612  {
17613  allocator->GetRecorder()->RecordAllocateMemoryPages(
17614  allocator->GetCurrentFrameIndex(),
17615  *pVkMemoryRequirements,
17616  *pCreateInfo,
17617  (uint64_t)allocationCount,
17618  pAllocations);
17619  }
17620 #endif
17621 
17622  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17623  {
17624  for(size_t i = 0; i < allocationCount; ++i)
17625  {
17626  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
17627  }
17628  }
17629 
17630  return result;
17631 }
17632 
17633 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
17634  VmaAllocator allocator,
17635  VkBuffer buffer,
17636  const VmaAllocationCreateInfo* pCreateInfo,
17637  VmaAllocation* pAllocation,
17638  VmaAllocationInfo* pAllocationInfo)
17639 {
17640  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17641 
17642  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
17643 
17644  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17645 
17646  VkMemoryRequirements vkMemReq = {};
17647  bool requiresDedicatedAllocation = false;
17648  bool prefersDedicatedAllocation = false;
17649  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
17650  requiresDedicatedAllocation,
17651  prefersDedicatedAllocation);
17652 
17653  VkResult result = allocator->AllocateMemory(
17654  vkMemReq,
17655  requiresDedicatedAllocation,
17656  prefersDedicatedAllocation,
17657  buffer, // dedicatedBuffer
17658  UINT32_MAX, // dedicatedBufferUsage
17659  VK_NULL_HANDLE, // dedicatedImage
17660  *pCreateInfo,
17661  VMA_SUBALLOCATION_TYPE_BUFFER,
17662  1, // allocationCount
17663  pAllocation);
17664 
17665 #if VMA_RECORDING_ENABLED
17666  if(allocator->GetRecorder() != VMA_NULL)
17667  {
17668  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
17669  allocator->GetCurrentFrameIndex(),
17670  vkMemReq,
17671  requiresDedicatedAllocation,
17672  prefersDedicatedAllocation,
17673  *pCreateInfo,
17674  *pAllocation);
17675  }
17676 #endif
17677 
17678  if(pAllocationInfo && result == VK_SUCCESS)
17679  {
17680  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17681  }
17682 
17683  return result;
17684 }
17685 
17686 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17687  VmaAllocator allocator,
17688  VkImage image,
17689  const VmaAllocationCreateInfo* pCreateInfo,
17690  VmaAllocation* pAllocation,
17691  VmaAllocationInfo* pAllocationInfo)
17692 {
17693  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17694 
17695  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17696 
17697  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17698 
17699  VkMemoryRequirements vkMemReq = {};
17700  bool requiresDedicatedAllocation = false;
17701  bool prefersDedicatedAllocation = false;
17702  allocator->GetImageMemoryRequirements(image, vkMemReq,
17703  requiresDedicatedAllocation, prefersDedicatedAllocation);
17704 
17705  VkResult result = allocator->AllocateMemory(
17706  vkMemReq,
17707  requiresDedicatedAllocation,
17708  prefersDedicatedAllocation,
17709  VK_NULL_HANDLE, // dedicatedBuffer
17710  UINT32_MAX, // dedicatedBufferUsage
17711  image, // dedicatedImage
17712  *pCreateInfo,
17713  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17714  1, // allocationCount
17715  pAllocation);
17716 
17717 #if VMA_RECORDING_ENABLED
17718  if(allocator->GetRecorder() != VMA_NULL)
17719  {
17720  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17721  allocator->GetCurrentFrameIndex(),
17722  vkMemReq,
17723  requiresDedicatedAllocation,
17724  prefersDedicatedAllocation,
17725  *pCreateInfo,
17726  *pAllocation);
17727  }
17728 #endif
17729 
17730  if(pAllocationInfo && result == VK_SUCCESS)
17731  {
17732  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17733  }
17734 
17735  return result;
17736 }
17737 
17738 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17739  VmaAllocator allocator,
17740  VmaAllocation allocation)
17741 {
17742  VMA_ASSERT(allocator);
17743 
17744  if(allocation == VK_NULL_HANDLE)
17745  {
17746  return;
17747  }
17748 
17749  VMA_DEBUG_LOG("vmaFreeMemory");
17750 
17751  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17752 
17753 #if VMA_RECORDING_ENABLED
17754  if(allocator->GetRecorder() != VMA_NULL)
17755  {
17756  allocator->GetRecorder()->RecordFreeMemory(
17757  allocator->GetCurrentFrameIndex(),
17758  allocation);
17759  }
17760 #endif
17761 
17762  allocator->FreeMemory(
17763  1, // allocationCount
17764  &allocation);
17765 }
17766 
17767 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17768  VmaAllocator allocator,
17769  size_t allocationCount,
17770  VmaAllocation* pAllocations)
17771 {
17772  if(allocationCount == 0)
17773  {
17774  return;
17775  }
17776 
17777  VMA_ASSERT(allocator);
17778 
17779  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17780 
17781  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17782 
17783 #if VMA_RECORDING_ENABLED
17784  if(allocator->GetRecorder() != VMA_NULL)
17785  {
17786  allocator->GetRecorder()->RecordFreeMemoryPages(
17787  allocator->GetCurrentFrameIndex(),
17788  (uint64_t)allocationCount,
17789  pAllocations);
17790  }
17791 #endif
17792 
17793  allocator->FreeMemory(allocationCount, pAllocations);
17794 }
17795 
17796 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17797  VmaAllocator allocator,
17798  VmaAllocation allocation,
17799  VkDeviceSize newSize)
17800 {
17801  VMA_ASSERT(allocator && allocation);
17802 
17803  VMA_DEBUG_LOG("vmaResizeAllocation");
17804 
17805  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17806 
17807  return allocator->ResizeAllocation(allocation, newSize);
17808 }
17809 
17810 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17811  VmaAllocator allocator,
17812  VmaAllocation allocation,
17813  VmaAllocationInfo* pAllocationInfo)
17814 {
17815  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17816 
17817  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17818 
17819 #if VMA_RECORDING_ENABLED
17820  if(allocator->GetRecorder() != VMA_NULL)
17821  {
17822  allocator->GetRecorder()->RecordGetAllocationInfo(
17823  allocator->GetCurrentFrameIndex(),
17824  allocation);
17825  }
17826 #endif
17827 
17828  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17829 }
17830 
17831 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17832  VmaAllocator allocator,
17833  VmaAllocation allocation)
17834 {
17835  VMA_ASSERT(allocator && allocation);
17836 
17837  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17838 
17839 #if VMA_RECORDING_ENABLED
17840  if(allocator->GetRecorder() != VMA_NULL)
17841  {
17842  allocator->GetRecorder()->RecordTouchAllocation(
17843  allocator->GetCurrentFrameIndex(),
17844  allocation);
17845  }
17846 #endif
17847 
17848  return allocator->TouchAllocation(allocation);
17849 }
17850 
17851 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17852  VmaAllocator allocator,
17853  VmaAllocation allocation,
17854  void* pUserData)
17855 {
17856  VMA_ASSERT(allocator && allocation);
17857 
17858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17859 
17860  allocation->SetUserData(allocator, pUserData);
17861 
17862 #if VMA_RECORDING_ENABLED
17863  if(allocator->GetRecorder() != VMA_NULL)
17864  {
17865  allocator->GetRecorder()->RecordSetAllocationUserData(
17866  allocator->GetCurrentFrameIndex(),
17867  allocation,
17868  pUserData);
17869  }
17870 #endif
17871 }
17872 
17873 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17874  VmaAllocator allocator,
17875  VmaAllocation* pAllocation)
17876 {
17877  VMA_ASSERT(allocator && pAllocation);
17878 
17879  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17880 
17881  allocator->CreateLostAllocation(pAllocation);
17882 
17883 #if VMA_RECORDING_ENABLED
17884  if(allocator->GetRecorder() != VMA_NULL)
17885  {
17886  allocator->GetRecorder()->RecordCreateLostAllocation(
17887  allocator->GetCurrentFrameIndex(),
17888  *pAllocation);
17889  }
17890 #endif
17891 }
17892 
17893 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17894  VmaAllocator allocator,
17895  VmaAllocation allocation,
17896  void** ppData)
17897 {
17898  VMA_ASSERT(allocator && allocation && ppData);
17899 
17900  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17901 
17902  VkResult res = allocator->Map(allocation, ppData);
17903 
17904 #if VMA_RECORDING_ENABLED
17905  if(allocator->GetRecorder() != VMA_NULL)
17906  {
17907  allocator->GetRecorder()->RecordMapMemory(
17908  allocator->GetCurrentFrameIndex(),
17909  allocation);
17910  }
17911 #endif
17912 
17913  return res;
17914 }
17915 
17916 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17917  VmaAllocator allocator,
17918  VmaAllocation allocation)
17919 {
17920  VMA_ASSERT(allocator && allocation);
17921 
17922  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17923 
17924 #if VMA_RECORDING_ENABLED
17925  if(allocator->GetRecorder() != VMA_NULL)
17926  {
17927  allocator->GetRecorder()->RecordUnmapMemory(
17928  allocator->GetCurrentFrameIndex(),
17929  allocation);
17930  }
17931 #endif
17932 
17933  allocator->Unmap(allocation);
17934 }
17935 
17936 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17937 {
17938  VMA_ASSERT(allocator && allocation);
17939 
17940  VMA_DEBUG_LOG("vmaFlushAllocation");
17941 
17942  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17943 
17944  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17945 
17946 #if VMA_RECORDING_ENABLED
17947  if(allocator->GetRecorder() != VMA_NULL)
17948  {
17949  allocator->GetRecorder()->RecordFlushAllocation(
17950  allocator->GetCurrentFrameIndex(),
17951  allocation, offset, size);
17952  }
17953 #endif
17954 }
17955 
17956 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17957 {
17958  VMA_ASSERT(allocator && allocation);
17959 
17960  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17961 
17962  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17963 
17964  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17965 
17966 #if VMA_RECORDING_ENABLED
17967  if(allocator->GetRecorder() != VMA_NULL)
17968  {
17969  allocator->GetRecorder()->RecordInvalidateAllocation(
17970  allocator->GetCurrentFrameIndex(),
17971  allocation, offset, size);
17972  }
17973 #endif
17974 }
17975 
17976 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17977 {
17978  VMA_ASSERT(allocator);
17979 
17980  VMA_DEBUG_LOG("vmaCheckCorruption");
17981 
17982  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17983 
17984  return allocator->CheckCorruption(memoryTypeBits);
17985 }
17986 
17987 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17988  VmaAllocator allocator,
17989  VmaAllocation* pAllocations,
17990  size_t allocationCount,
17991  VkBool32* pAllocationsChanged,
17992  const VmaDefragmentationInfo *pDefragmentationInfo,
17993  VmaDefragmentationStats* pDefragmentationStats)
17994 {
17995  // Deprecated interface, reimplemented using new one.
17996 
17997  VmaDefragmentationInfo2 info2 = {};
17998  info2.allocationCount = (uint32_t)allocationCount;
17999  info2.pAllocations = pAllocations;
18000  info2.pAllocationsChanged = pAllocationsChanged;
18001  if(pDefragmentationInfo != VMA_NULL)
18002  {
18003  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18004  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18005  }
18006  else
18007  {
18008  info2.maxCpuAllocationsToMove = UINT32_MAX;
18009  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18010  }
18011  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18012 
18014  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18015  if(res == VK_NOT_READY)
18016  {
18017  res = vmaDefragmentationEnd( allocator, ctx);
18018  }
18019  return res;
18020 }
18021 
18022 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18023  VmaAllocator allocator,
18024  const VmaDefragmentationInfo2* pInfo,
18025  VmaDefragmentationStats* pStats,
18026  VmaDefragmentationContext *pContext)
18027 {
18028  VMA_ASSERT(allocator && pInfo && pContext);
18029 
18030  // Degenerate case: Nothing to defragment.
18031  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18032  {
18033  return VK_SUCCESS;
18034  }
18035 
18036  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18037  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18038  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18039  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18040 
18041  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18042 
18043  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18044 
18045  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18046 
18047 #if VMA_RECORDING_ENABLED
18048  if(allocator->GetRecorder() != VMA_NULL)
18049  {
18050  allocator->GetRecorder()->RecordDefragmentationBegin(
18051  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18052  }
18053 #endif
18054 
18055  return res;
18056 }
18057 
18058 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18059  VmaAllocator allocator,
18060  VmaDefragmentationContext context)
18061 {
18062  VMA_ASSERT(allocator);
18063 
18064  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18065 
18066  if(context != VK_NULL_HANDLE)
18067  {
18068  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18069 
18070 #if VMA_RECORDING_ENABLED
18071  if(allocator->GetRecorder() != VMA_NULL)
18072  {
18073  allocator->GetRecorder()->RecordDefragmentationEnd(
18074  allocator->GetCurrentFrameIndex(), context);
18075  }
18076 #endif
18077 
18078  return allocator->DefragmentationEnd(context);
18079  }
18080  else
18081  {
18082  return VK_SUCCESS;
18083  }
18084 }
18085 
18086 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18087  VmaAllocator allocator,
18088  VmaDefragmentationContext context,
18090  )
18091 {
18092  VMA_ASSERT(allocator);
18093  VMA_ASSERT(pInfo);
18094  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
18095 
18096  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18097 
18098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18099 
18100  if(context == VK_NULL_HANDLE)
18101  {
18102  pInfo->moveCount = 0;
18103  return VK_SUCCESS;
18104  }
18105 
18106  return allocator->DefragmentationPassBegin(pInfo, context);
18107 }
18108 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18109  VmaAllocator allocator,
18110  VmaDefragmentationContext context)
18111 {
18112  VMA_ASSERT(allocator);
18113 
18114  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18115  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18116 
18117  if(context == VK_NULL_HANDLE)
18118  return VK_SUCCESS;
18119 
18120  return allocator->DefragmentationPassEnd(context);
18121 }
18122 
18123 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18124  VmaAllocator allocator,
18125  VmaAllocation allocation,
18126  VkBuffer buffer)
18127 {
18128  VMA_ASSERT(allocator && allocation && buffer);
18129 
18130  VMA_DEBUG_LOG("vmaBindBufferMemory");
18131 
18132  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18133 
18134  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18135 }
18136 
18137 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18138  VmaAllocator allocator,
18139  VmaAllocation allocation,
18140  VkDeviceSize allocationLocalOffset,
18141  VkBuffer buffer,
18142  const void* pNext)
18143 {
18144  VMA_ASSERT(allocator && allocation && buffer);
18145 
18146  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18147 
18148  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18149 
18150  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18151 }
18152 
18153 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18154  VmaAllocator allocator,
18155  VmaAllocation allocation,
18156  VkImage image)
18157 {
18158  VMA_ASSERT(allocator && allocation && image);
18159 
18160  VMA_DEBUG_LOG("vmaBindImageMemory");
18161 
18162  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18163 
18164  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18165 }
18166 
18167 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18168  VmaAllocator allocator,
18169  VmaAllocation allocation,
18170  VkDeviceSize allocationLocalOffset,
18171  VkImage image,
18172  const void* pNext)
18173 {
18174  VMA_ASSERT(allocator && allocation && image);
18175 
18176  VMA_DEBUG_LOG("vmaBindImageMemory2");
18177 
18178  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18179 
18180  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18181 }
18182 
18183 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18184  VmaAllocator allocator,
18185  const VkBufferCreateInfo* pBufferCreateInfo,
18186  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18187  VkBuffer* pBuffer,
18188  VmaAllocation* pAllocation,
18189  VmaAllocationInfo* pAllocationInfo)
18190 {
18191  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18192 
18193  if(pBufferCreateInfo->size == 0)
18194  {
18195  return VK_ERROR_VALIDATION_FAILED_EXT;
18196  }
18197  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18198  !allocator->m_UseKhrBufferDeviceAddress)
18199  {
18200  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18201  return VK_ERROR_VALIDATION_FAILED_EXT;
18202  }
18203 
18204  VMA_DEBUG_LOG("vmaCreateBuffer");
18205 
18206  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18207 
18208  *pBuffer = VK_NULL_HANDLE;
18209  *pAllocation = VK_NULL_HANDLE;
18210 
18211  // 1. Create VkBuffer.
18212  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18213  allocator->m_hDevice,
18214  pBufferCreateInfo,
18215  allocator->GetAllocationCallbacks(),
18216  pBuffer);
18217  if(res >= 0)
18218  {
18219  // 2. vkGetBufferMemoryRequirements.
18220  VkMemoryRequirements vkMemReq = {};
18221  bool requiresDedicatedAllocation = false;
18222  bool prefersDedicatedAllocation = false;
18223  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18224  requiresDedicatedAllocation, prefersDedicatedAllocation);
18225 
18226  // 3. Allocate memory using allocator.
18227  res = allocator->AllocateMemory(
18228  vkMemReq,
18229  requiresDedicatedAllocation,
18230  prefersDedicatedAllocation,
18231  *pBuffer, // dedicatedBuffer
18232  pBufferCreateInfo->usage, // dedicatedBufferUsage
18233  VK_NULL_HANDLE, // dedicatedImage
18234  *pAllocationCreateInfo,
18235  VMA_SUBALLOCATION_TYPE_BUFFER,
18236  1, // allocationCount
18237  pAllocation);
18238 
18239 #if VMA_RECORDING_ENABLED
18240  if(allocator->GetRecorder() != VMA_NULL)
18241  {
18242  allocator->GetRecorder()->RecordCreateBuffer(
18243  allocator->GetCurrentFrameIndex(),
18244  *pBufferCreateInfo,
18245  *pAllocationCreateInfo,
18246  *pAllocation);
18247  }
18248 #endif
18249 
18250  if(res >= 0)
18251  {
18252  // 3. Bind buffer with memory.
18253  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18254  {
18255  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18256  }
18257  if(res >= 0)
18258  {
18259  // All steps succeeded.
18260  #if VMA_STATS_STRING_ENABLED
18261  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18262  #endif
18263  if(pAllocationInfo != VMA_NULL)
18264  {
18265  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18266  }
18267 
18268  return VK_SUCCESS;
18269  }
18270  allocator->FreeMemory(
18271  1, // allocationCount
18272  pAllocation);
18273  *pAllocation = VK_NULL_HANDLE;
18274  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18275  *pBuffer = VK_NULL_HANDLE;
18276  return res;
18277  }
18278  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18279  *pBuffer = VK_NULL_HANDLE;
18280  return res;
18281  }
18282  return res;
18283 }
18284 
18285 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18286  VmaAllocator allocator,
18287  VkBuffer buffer,
18288  VmaAllocation allocation)
18289 {
18290  VMA_ASSERT(allocator);
18291 
18292  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18293  {
18294  return;
18295  }
18296 
18297  VMA_DEBUG_LOG("vmaDestroyBuffer");
18298 
18299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18300 
18301 #if VMA_RECORDING_ENABLED
18302  if(allocator->GetRecorder() != VMA_NULL)
18303  {
18304  allocator->GetRecorder()->RecordDestroyBuffer(
18305  allocator->GetCurrentFrameIndex(),
18306  allocation);
18307  }
18308 #endif
18309 
18310  if(buffer != VK_NULL_HANDLE)
18311  {
18312  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18313  }
18314 
18315  if(allocation != VK_NULL_HANDLE)
18316  {
18317  allocator->FreeMemory(
18318  1, // allocationCount
18319  &allocation);
18320  }
18321 }
18322 
18323 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18324  VmaAllocator allocator,
18325  const VkImageCreateInfo* pImageCreateInfo,
18326  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18327  VkImage* pImage,
18328  VmaAllocation* pAllocation,
18329  VmaAllocationInfo* pAllocationInfo)
18330 {
18331  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18332 
18333  if(pImageCreateInfo->extent.width == 0 ||
18334  pImageCreateInfo->extent.height == 0 ||
18335  pImageCreateInfo->extent.depth == 0 ||
18336  pImageCreateInfo->mipLevels == 0 ||
18337  pImageCreateInfo->arrayLayers == 0)
18338  {
18339  return VK_ERROR_VALIDATION_FAILED_EXT;
18340  }
18341 
18342  VMA_DEBUG_LOG("vmaCreateImage");
18343 
18344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18345 
18346  *pImage = VK_NULL_HANDLE;
18347  *pAllocation = VK_NULL_HANDLE;
18348 
18349  // 1. Create VkImage.
18350  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18351  allocator->m_hDevice,
18352  pImageCreateInfo,
18353  allocator->GetAllocationCallbacks(),
18354  pImage);
18355  if(res >= 0)
18356  {
18357  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18358  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18359  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18360 
18361  // 2. Allocate memory using allocator.
18362  VkMemoryRequirements vkMemReq = {};
18363  bool requiresDedicatedAllocation = false;
18364  bool prefersDedicatedAllocation = false;
18365  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18366  requiresDedicatedAllocation, prefersDedicatedAllocation);
18367 
18368  res = allocator->AllocateMemory(
18369  vkMemReq,
18370  requiresDedicatedAllocation,
18371  prefersDedicatedAllocation,
18372  VK_NULL_HANDLE, // dedicatedBuffer
18373  UINT32_MAX, // dedicatedBufferUsage
18374  *pImage, // dedicatedImage
18375  *pAllocationCreateInfo,
18376  suballocType,
18377  1, // allocationCount
18378  pAllocation);
18379 
18380 #if VMA_RECORDING_ENABLED
18381  if(allocator->GetRecorder() != VMA_NULL)
18382  {
18383  allocator->GetRecorder()->RecordCreateImage(
18384  allocator->GetCurrentFrameIndex(),
18385  *pImageCreateInfo,
18386  *pAllocationCreateInfo,
18387  *pAllocation);
18388  }
18389 #endif
18390 
18391  if(res >= 0)
18392  {
18393  // 3. Bind image with memory.
18394  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18395  {
18396  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
18397  }
18398  if(res >= 0)
18399  {
18400  // All steps succeeded.
18401  #if VMA_STATS_STRING_ENABLED
18402  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
18403  #endif
18404  if(pAllocationInfo != VMA_NULL)
18405  {
18406  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18407  }
18408 
18409  return VK_SUCCESS;
18410  }
18411  allocator->FreeMemory(
18412  1, // allocationCount
18413  pAllocation);
18414  *pAllocation = VK_NULL_HANDLE;
18415  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18416  *pImage = VK_NULL_HANDLE;
18417  return res;
18418  }
18419  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18420  *pImage = VK_NULL_HANDLE;
18421  return res;
18422  }
18423  return res;
18424 }
18425 
18426 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
18427  VmaAllocator allocator,
18428  VkImage image,
18429  VmaAllocation allocation)
18430 {
18431  VMA_ASSERT(allocator);
18432 
18433  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18434  {
18435  return;
18436  }
18437 
18438  VMA_DEBUG_LOG("vmaDestroyImage");
18439 
18440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18441 
18442 #if VMA_RECORDING_ENABLED
18443  if(allocator->GetRecorder() != VMA_NULL)
18444  {
18445  allocator->GetRecorder()->RecordDestroyImage(
18446  allocator->GetCurrentFrameIndex(),
18447  allocation);
18448  }
18449 #endif
18450 
18451  if(image != VK_NULL_HANDLE)
18452  {
18453  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
18454  }
18455  if(allocation != VK_NULL_HANDLE)
18456  {
18457  allocator->FreeMemory(
18458  1, // allocationCount
18459  &allocation);
18460  }
18461 }
18462 
18463 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2135
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2093
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1983
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2130
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2091
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1960
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2156
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2760
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3311
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2182
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2044
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2361
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3379
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2506
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2832
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2589
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1992
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2838
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2569
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2122
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1979
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2556
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2162
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2128
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2096
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2975
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2329
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2663
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2335
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2095
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1875
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3380
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3389
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2600
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2261
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2386
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2107
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2150
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2530
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2271
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3297
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2851
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2593
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2017
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3406
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2584
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2444
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2325
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2788
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2800
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3329
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3377
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2841
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2084
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2082
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3295
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2980
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2512
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2092
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2781
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2656
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2343
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2519
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2032
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3308
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2100
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3360
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3410
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2835
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2094
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2132
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2476
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3345
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2498
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3296
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2226
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2099
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2101
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3374
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1966
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2339
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2805
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2241
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2322
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3414
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3388
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2466
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2333
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2080
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2331
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2165
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1987
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2813
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2785
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1981
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2492
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2563
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2791
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2508
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2327
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2098
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3305
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3401
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2365
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2255
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2779
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2637
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2610
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2334
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2168
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2614
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2159
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2207
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3378
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2397
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2090
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2994
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2153
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3299
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2105
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2335
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2483
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2607
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2604
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2062
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2632
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2334
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2985
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2138
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2097
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2999
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2743
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2342
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2219
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2854
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3387
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2621
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2341
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2232
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2449
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3365
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2104
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2827
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2106
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3350
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2966
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2543
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2103
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2844
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2725
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3412
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2335
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2574
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2626
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2102
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2623
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2642
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3326
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3416
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2650
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2266
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3418
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2146
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2334
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2961
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2771
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2376
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2619
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3320
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2775
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3355
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3396
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2580
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.