Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1860 /*
1861 Define this macro to 0/1 to disable/enable support for recording functionality,
1862 available through VmaAllocatorCreateInfo::pRecordSettings.
1863 */
1864 #ifndef VMA_RECORDING_ENABLED
1865  #define VMA_RECORDING_ENABLED 0
1866 #endif
1867 
1868 #ifndef NOMINMAX
1869  #define NOMINMAX // For windows.h
1870 #endif
1871 
1872 #ifndef VULKAN_H_
1873  #include <vulkan/vulkan.h>
1874 #endif
1875 
1876 #if VMA_RECORDING_ENABLED
1877  #include <windows.h>
1878 #endif
1879 
1880 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1881 // where AAA = major, BBB = minor, CCC = patch.
1882 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1883 #if !defined(VMA_VULKAN_VERSION)
1884  #if defined(VK_VERSION_1_2)
1885  #define VMA_VULKAN_VERSION 1002000
1886  #elif defined(VK_VERSION_1_1)
1887  #define VMA_VULKAN_VERSION 1001000
1888  #else
1889  #define VMA_VULKAN_VERSION 1000000
1890  #endif
1891 #endif
1892 
1893 #if !defined(VMA_DEDICATED_ALLOCATION)
1894  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1895  #define VMA_DEDICATED_ALLOCATION 1
1896  #else
1897  #define VMA_DEDICATED_ALLOCATION 0
1898  #endif
1899 #endif
1900 
1901 #if !defined(VMA_BIND_MEMORY2)
1902  #if VK_KHR_bind_memory2
1903  #define VMA_BIND_MEMORY2 1
1904  #else
1905  #define VMA_BIND_MEMORY2 0
1906  #endif
1907 #endif
1908 
1909 #if !defined(VMA_MEMORY_BUDGET)
1910  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1911  #define VMA_MEMORY_BUDGET 1
1912  #else
1913  #define VMA_MEMORY_BUDGET 0
1914  #endif
1915 #endif
1916 
1917 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
1918 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
1919  #if VK_KHR_buffer_device_address || VK_EXT_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
1920  #define VMA_BUFFER_DEVICE_ADDRESS 1
1921  #else
1922  #define VMA_BUFFER_DEVICE_ADDRESS 0
1923  #endif
1924 #endif
1925 
1926 // Define these macros to decorate all public functions with additional code,
1927 // before and after returned type, appropriately. This may be useful for
1928 // exporing the functions when compiling VMA as a separate library. Example:
1929 // #define VMA_CALL_PRE __declspec(dllexport)
1930 // #define VMA_CALL_POST __cdecl
1931 #ifndef VMA_CALL_PRE
1932  #define VMA_CALL_PRE
1933 #endif
1934 #ifndef VMA_CALL_POST
1935  #define VMA_CALL_POST
1936 #endif
1937 
1947 VK_DEFINE_HANDLE(VmaAllocator)
1948 
1949 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1951  VmaAllocator allocator,
1952  uint32_t memoryType,
1953  VkDeviceMemory memory,
1954  VkDeviceSize size,
1955  void* pUserData);
1957 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1958  VmaAllocator allocator,
1959  uint32_t memoryType,
1960  VkDeviceMemory memory,
1961  VkDeviceSize size,
1962  void* pUserData);
1963 
1977  void* pUserData;
1979 
2075 
2078 typedef VkFlags VmaAllocatorCreateFlags;
2079 
2084 typedef struct VmaVulkanFunctions {
2085  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2086  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2087  PFN_vkAllocateMemory vkAllocateMemory;
2088  PFN_vkFreeMemory vkFreeMemory;
2089  PFN_vkMapMemory vkMapMemory;
2090  PFN_vkUnmapMemory vkUnmapMemory;
2091  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2092  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2093  PFN_vkBindBufferMemory vkBindBufferMemory;
2094  PFN_vkBindImageMemory vkBindImageMemory;
2095  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2096  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2097  PFN_vkCreateBuffer vkCreateBuffer;
2098  PFN_vkDestroyBuffer vkDestroyBuffer;
2099  PFN_vkCreateImage vkCreateImage;
2100  PFN_vkDestroyImage vkDestroyImage;
2101  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2102 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2103  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
2104  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
2105 #endif
2106 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2107  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
2108  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
2109 #endif
2110 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2111  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
2112 #endif
2114 
2116 typedef enum VmaRecordFlagBits {
2123 
2126 typedef VkFlags VmaRecordFlags;
2127 
2129 typedef struct VmaRecordSettings
2130 {
2140  const char* pFilePath;
2142 
2145 {
2149 
2150  VkPhysicalDevice physicalDevice;
2152 
2153  VkDevice device;
2155 
2158 
2159  const VkAllocationCallbacks* pAllocationCallbacks;
2161 
2201  const VkDeviceSize* pHeapSizeLimit;
2222  VkInstance instance;
2233 
2235 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2236  const VmaAllocatorCreateInfo* pCreateInfo,
2237  VmaAllocator* pAllocator);
2238 
2240 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2241  VmaAllocator allocator);
2242 
2245 typedef struct VmaAllocatorInfo
2246 {
2251  VkInstance instance;
2256  VkPhysicalDevice physicalDevice;
2261  VkDevice device;
2263 
2269 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo);
2270 
2275 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2276  VmaAllocator allocator,
2277  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2278 
2283 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2284  VmaAllocator allocator,
2285  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2286 
2293 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2294  VmaAllocator allocator,
2295  uint32_t memoryTypeIndex,
2296  VkMemoryPropertyFlags* pFlags);
2297 
2306 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2307  VmaAllocator allocator,
2308  uint32_t frameIndex);
2309 
2312 typedef struct VmaStatInfo
2313 {
2315  uint32_t blockCount;
2321  VkDeviceSize usedBytes;
2323  VkDeviceSize unusedBytes;
2326 } VmaStatInfo;
2327 
2329 typedef struct VmaStats
2330 {
2331  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2332  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2334 } VmaStats;
2335 
2345 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2346  VmaAllocator allocator,
2347  VmaStats* pStats);
2348 
2351 typedef struct VmaBudget
2352 {
2355  VkDeviceSize blockBytes;
2356 
2366  VkDeviceSize allocationBytes;
2367 
2376  VkDeviceSize usage;
2377 
2387  VkDeviceSize budget;
2388 } VmaBudget;
2389 
2400 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2401  VmaAllocator allocator,
2402  VmaBudget* pBudget);
2403 
2404 #ifndef VMA_STATS_STRING_ENABLED
2405 #define VMA_STATS_STRING_ENABLED 1
2406 #endif
2407 
2408 #if VMA_STATS_STRING_ENABLED
2409 
2411 
2413 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2414  VmaAllocator allocator,
2415  char** ppStatsString,
2416  VkBool32 detailedMap);
2417 
2418 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2419  VmaAllocator allocator,
2420  char* pStatsString);
2421 
2422 #endif // #if VMA_STATS_STRING_ENABLED
2423 
2432 VK_DEFINE_HANDLE(VmaPool)
2433 
2434 typedef enum VmaMemoryUsage
2435 {
2497 
2499 } VmaMemoryUsage;
2500 
2510 
2575 
2591 
2601 
2608 
2612 
2614 {
2627  VkMemoryPropertyFlags requiredFlags;
2632  VkMemoryPropertyFlags preferredFlags;
2640  uint32_t memoryTypeBits;
2653  void* pUserData;
2655 
2672 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2673  VmaAllocator allocator,
2674  uint32_t memoryTypeBits,
2675  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2676  uint32_t* pMemoryTypeIndex);
2677 
2690 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2691  VmaAllocator allocator,
2692  const VkBufferCreateInfo* pBufferCreateInfo,
2693  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2694  uint32_t* pMemoryTypeIndex);
2695 
2708 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2709  VmaAllocator allocator,
2710  const VkImageCreateInfo* pImageCreateInfo,
2711  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2712  uint32_t* pMemoryTypeIndex);
2713 
2734 
2751 
2762 
2768 
2771 typedef VkFlags VmaPoolCreateFlags;
2772 
2775 typedef struct VmaPoolCreateInfo {
2790  VkDeviceSize blockSize;
2819 
2822 typedef struct VmaPoolStats {
2825  VkDeviceSize size;
2828  VkDeviceSize unusedSize;
2841  VkDeviceSize unusedRangeSizeMax;
2844  size_t blockCount;
2845 } VmaPoolStats;
2846 
2853 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2854  VmaAllocator allocator,
2855  const VmaPoolCreateInfo* pCreateInfo,
2856  VmaPool* pPool);
2857 
2860 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2861  VmaAllocator allocator,
2862  VmaPool pool);
2863 
2870 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2871  VmaAllocator allocator,
2872  VmaPool pool,
2873  VmaPoolStats* pPoolStats);
2874 
2881 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2882  VmaAllocator allocator,
2883  VmaPool pool,
2884  size_t* pLostAllocationCount);
2885 
2900 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2901 
2908 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2909  VmaAllocator allocator,
2910  VmaPool pool,
2911  const char** ppName);
2912 
2918 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2919  VmaAllocator allocator,
2920  VmaPool pool,
2921  const char* pName);
2922 
2947 VK_DEFINE_HANDLE(VmaAllocation)
2948 
2949 
2951 typedef struct VmaAllocationInfo {
2956  uint32_t memoryType;
2965  VkDeviceMemory deviceMemory;
2970  VkDeviceSize offset;
2975  VkDeviceSize size;
2989  void* pUserData;
2991 
3002 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3003  VmaAllocator allocator,
3004  const VkMemoryRequirements* pVkMemoryRequirements,
3005  const VmaAllocationCreateInfo* pCreateInfo,
3006  VmaAllocation* pAllocation,
3007  VmaAllocationInfo* pAllocationInfo);
3008 
3028 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3029  VmaAllocator allocator,
3030  const VkMemoryRequirements* pVkMemoryRequirements,
3031  const VmaAllocationCreateInfo* pCreateInfo,
3032  size_t allocationCount,
3033  VmaAllocation* pAllocations,
3034  VmaAllocationInfo* pAllocationInfo);
3035 
3042 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3043  VmaAllocator allocator,
3044  VkBuffer buffer,
3045  const VmaAllocationCreateInfo* pCreateInfo,
3046  VmaAllocation* pAllocation,
3047  VmaAllocationInfo* pAllocationInfo);
3048 
3050 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3051  VmaAllocator allocator,
3052  VkImage image,
3053  const VmaAllocationCreateInfo* pCreateInfo,
3054  VmaAllocation* pAllocation,
3055  VmaAllocationInfo* pAllocationInfo);
3056 
3061 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3062  VmaAllocator allocator,
3063  VmaAllocation allocation);
3064 
3075 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3076  VmaAllocator allocator,
3077  size_t allocationCount,
3078  VmaAllocation* pAllocations);
3079 
3087 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3088  VmaAllocator allocator,
3089  VmaAllocation allocation,
3090  VkDeviceSize newSize);
3091 
3108 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3109  VmaAllocator allocator,
3110  VmaAllocation allocation,
3111  VmaAllocationInfo* pAllocationInfo);
3112 
3127 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3128  VmaAllocator allocator,
3129  VmaAllocation allocation);
3130 
3144 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3145  VmaAllocator allocator,
3146  VmaAllocation allocation,
3147  void* pUserData);
3148 
3159 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3160  VmaAllocator allocator,
3161  VmaAllocation* pAllocation);
3162 
3201 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3202  VmaAllocator allocator,
3203  VmaAllocation allocation,
3204  void** ppData);
3205 
3214 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3215  VmaAllocator allocator,
3216  VmaAllocation allocation);
3217 
3236 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3237 
3256 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3257 
3274 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3275 
3282 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3283 
3284 typedef enum VmaDefragmentationFlagBits {
3289 typedef VkFlags VmaDefragmentationFlags;
3290 
3295 typedef struct VmaDefragmentationInfo2 {
3319  uint32_t poolCount;
3340  VkDeviceSize maxCpuBytesToMove;
3350  VkDeviceSize maxGpuBytesToMove;
3364  VkCommandBuffer commandBuffer;
3366 
3369  VkDeviceMemory memory;
3370  VkDeviceSize offset;
3372 
3378  uint32_t moveCount;
3381 
3386 typedef struct VmaDefragmentationInfo {
3391  VkDeviceSize maxBytesToMove;
3398 
3400 typedef struct VmaDefragmentationStats {
3402  VkDeviceSize bytesMoved;
3404  VkDeviceSize bytesFreed;
3410 
3440 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3441  VmaAllocator allocator,
3442  const VmaDefragmentationInfo2* pInfo,
3443  VmaDefragmentationStats* pStats,
3444  VmaDefragmentationContext *pContext);
3445 
3451 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3452  VmaAllocator allocator,
3453  VmaDefragmentationContext context);
3454 
3455 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3456  VmaAllocator allocator,
3457  VmaDefragmentationContext context,
3459 );
3460 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3461  VmaAllocator allocator,
3463 );
3464 
3505 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3506  VmaAllocator allocator,
3507  VmaAllocation* pAllocations,
3508  size_t allocationCount,
3509  VkBool32* pAllocationsChanged,
3510  const VmaDefragmentationInfo *pDefragmentationInfo,
3511  VmaDefragmentationStats* pDefragmentationStats);
3512 
3525 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3526  VmaAllocator allocator,
3527  VmaAllocation allocation,
3528  VkBuffer buffer);
3529 
3540 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3541  VmaAllocator allocator,
3542  VmaAllocation allocation,
3543  VkDeviceSize allocationLocalOffset,
3544  VkBuffer buffer,
3545  const void* pNext);
3546 
3559 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3560  VmaAllocator allocator,
3561  VmaAllocation allocation,
3562  VkImage image);
3563 
3574 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3575  VmaAllocator allocator,
3576  VmaAllocation allocation,
3577  VkDeviceSize allocationLocalOffset,
3578  VkImage image,
3579  const void* pNext);
3580 
3607 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3608  VmaAllocator allocator,
3609  const VkBufferCreateInfo* pBufferCreateInfo,
3610  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3611  VkBuffer* pBuffer,
3612  VmaAllocation* pAllocation,
3613  VmaAllocationInfo* pAllocationInfo);
3614 
3626 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3627  VmaAllocator allocator,
3628  VkBuffer buffer,
3629  VmaAllocation allocation);
3630 
3632 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3633  VmaAllocator allocator,
3634  const VkImageCreateInfo* pImageCreateInfo,
3635  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3636  VkImage* pImage,
3637  VmaAllocation* pAllocation,
3638  VmaAllocationInfo* pAllocationInfo);
3639 
3651 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3652  VmaAllocator allocator,
3653  VkImage image,
3654  VmaAllocation allocation);
3655 
3656 #ifdef __cplusplus
3657 }
3658 #endif
3659 
3660 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3661 
3662 // For Visual Studio IntelliSense.
3663 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3664 #define VMA_IMPLEMENTATION
3665 #endif
3666 
3667 #ifdef VMA_IMPLEMENTATION
3668 #undef VMA_IMPLEMENTATION
3669 
3670 #include <cstdint>
3671 #include <cstdlib>
3672 #include <cstring>
3673 #include <utility>
3674 
3675 /*******************************************************************************
3676 CONFIGURATION SECTION
3677 
3678 Define some of these macros before each #include of this header or change them
3679 here if you need other then default behavior depending on your environment.
3680 */
3681 
3682 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3683 //#define VMA_USE_STL_CONTAINERS 1
3684 
3685 /* Set this macro to 1 to make the library including and using STL containers:
3686 std::pair, std::vector, std::list, std::unordered_map.
3687 
3688 Set it to 0 or undefined to make the library using its own implementation of
3689 the containers.
3690 */
3691 #if VMA_USE_STL_CONTAINERS
3692  #define VMA_USE_STL_VECTOR 1
3693  #define VMA_USE_STL_UNORDERED_MAP 1
3694  #define VMA_USE_STL_LIST 1
3695 #endif
3696 
3697 #ifndef VMA_USE_STL_SHARED_MUTEX
3698  // Compiler conforms to C++17.
3699  #if __cplusplus >= 201703L
3700  #define VMA_USE_STL_SHARED_MUTEX 1
3701  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3702  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3703  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3704  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3705  #define VMA_USE_STL_SHARED_MUTEX 1
3706  #else
3707  #define VMA_USE_STL_SHARED_MUTEX 0
3708  #endif
3709 #endif
3710 
3711 /*
3712 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3713 Library has its own container implementation.
3714 */
3715 #if VMA_USE_STL_VECTOR
3716  #include <vector>
3717 #endif
3718 
3719 #if VMA_USE_STL_UNORDERED_MAP
3720  #include <unordered_map>
3721 #endif
3722 
3723 #if VMA_USE_STL_LIST
3724  #include <list>
3725 #endif
3726 
3727 /*
3728 Following headers are used in this CONFIGURATION section only, so feel free to
3729 remove them if not needed.
3730 */
3731 #include <cassert> // for assert
3732 #include <algorithm> // for min, max
3733 #include <mutex>
3734 
3735 #ifndef VMA_NULL
3736  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3737  #define VMA_NULL nullptr
3738 #endif
3739 
3740 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3741 #include <cstdlib>
3742 void *aligned_alloc(size_t alignment, size_t size)
3743 {
3744  // alignment must be >= sizeof(void*)
3745  if(alignment < sizeof(void*))
3746  {
3747  alignment = sizeof(void*);
3748  }
3749 
3750  return memalign(alignment, size);
3751 }
3752 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3753 #include <cstdlib>
3754 void *aligned_alloc(size_t alignment, size_t size)
3755 {
3756  // alignment must be >= sizeof(void*)
3757  if(alignment < sizeof(void*))
3758  {
3759  alignment = sizeof(void*);
3760  }
3761 
3762  void *pointer;
3763  if(posix_memalign(&pointer, alignment, size) == 0)
3764  return pointer;
3765  return VMA_NULL;
3766 }
3767 #endif
3768 
3769 // If your compiler is not compatible with C++11 and definition of
3770 // aligned_alloc() function is missing, uncommeting following line may help:
3771 
3772 //#include <malloc.h>
3773 
3774 // Normal assert to check for programmer's errors, especially in Debug configuration.
3775 #ifndef VMA_ASSERT
3776  #ifdef NDEBUG
3777  #define VMA_ASSERT(expr)
3778  #else
3779  #define VMA_ASSERT(expr) assert(expr)
3780  #endif
3781 #endif
3782 
3783 // Assert that will be called very often, like inside data structures e.g. operator[].
3784 // Making it non-empty can make program slow.
3785 #ifndef VMA_HEAVY_ASSERT
3786  #ifdef NDEBUG
3787  #define VMA_HEAVY_ASSERT(expr)
3788  #else
3789  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3790  #endif
3791 #endif
3792 
3793 #ifndef VMA_ALIGN_OF
3794  #define VMA_ALIGN_OF(type) (__alignof(type))
3795 #endif
3796 
3797 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3798  #if defined(_WIN32)
3799  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3800  #else
3801  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3802  #endif
3803 #endif
3804 
3805 #ifndef VMA_SYSTEM_FREE
3806  #if defined(_WIN32)
3807  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3808  #else
3809  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3810  #endif
3811 #endif
3812 
3813 #ifndef VMA_MIN
3814  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3815 #endif
3816 
3817 #ifndef VMA_MAX
3818  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3819 #endif
3820 
3821 #ifndef VMA_SWAP
3822  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3823 #endif
3824 
3825 #ifndef VMA_SORT
3826  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3827 #endif
3828 
3829 #ifndef VMA_DEBUG_LOG
3830  #define VMA_DEBUG_LOG(format, ...)
3831  /*
3832  #define VMA_DEBUG_LOG(format, ...) do { \
3833  printf(format, __VA_ARGS__); \
3834  printf("\n"); \
3835  } while(false)
3836  */
3837 #endif
3838 
3839 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3840 #if VMA_STATS_STRING_ENABLED
3841  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3842  {
3843  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3844  }
3845  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3846  {
3847  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3848  }
3849  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3850  {
3851  snprintf(outStr, strLen, "%p", ptr);
3852  }
3853 #endif
3854 
3855 #ifndef VMA_MUTEX
3856  class VmaMutex
3857  {
3858  public:
3859  void Lock() { m_Mutex.lock(); }
3860  void Unlock() { m_Mutex.unlock(); }
3861  bool TryLock() { return m_Mutex.try_lock(); }
3862  private:
3863  std::mutex m_Mutex;
3864  };
3865  #define VMA_MUTEX VmaMutex
3866 #endif
3867 
3868 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3869 #ifndef VMA_RW_MUTEX
3870  #if VMA_USE_STL_SHARED_MUTEX
3871  // Use std::shared_mutex from C++17.
3872  #include <shared_mutex>
3873  class VmaRWMutex
3874  {
3875  public:
3876  void LockRead() { m_Mutex.lock_shared(); }
3877  void UnlockRead() { m_Mutex.unlock_shared(); }
3878  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
3879  void LockWrite() { m_Mutex.lock(); }
3880  void UnlockWrite() { m_Mutex.unlock(); }
3881  bool TryLockWrite() { return m_Mutex.try_lock(); }
3882  private:
3883  std::shared_mutex m_Mutex;
3884  };
3885  #define VMA_RW_MUTEX VmaRWMutex
3886  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3887  // Use SRWLOCK from WinAPI.
3888  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3889  class VmaRWMutex
3890  {
3891  public:
3892  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3893  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3894  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3895  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
3896  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3897  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3898  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
3899  private:
3900  SRWLOCK m_Lock;
3901  };
3902  #define VMA_RW_MUTEX VmaRWMutex
3903  #else
3904  // Less efficient fallback: Use normal mutex.
3905  class VmaRWMutex
3906  {
3907  public:
3908  void LockRead() { m_Mutex.Lock(); }
3909  void UnlockRead() { m_Mutex.Unlock(); }
3910  bool TryLockRead() { return m_Mutex.TryLock(); }
3911  void LockWrite() { m_Mutex.Lock(); }
3912  void UnlockWrite() { m_Mutex.Unlock(); }
3913  bool TryLockWrite() { return m_Mutex.TryLock(); }
3914  private:
3915  VMA_MUTEX m_Mutex;
3916  };
3917  #define VMA_RW_MUTEX VmaRWMutex
3918  #endif // #if VMA_USE_STL_SHARED_MUTEX
3919 #endif // #ifndef VMA_RW_MUTEX
3920 
3921 /*
3922 If providing your own implementation, you need to implement a subset of std::atomic.
3923 */
3924 #ifndef VMA_ATOMIC_UINT32
3925  #include <atomic>
3926  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3927 #endif
3928 
3929 #ifndef VMA_ATOMIC_UINT64
3930  #include <atomic>
3931  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3932 #endif
3933 
3934 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3935 
3939  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3940 #endif
3941 
3942 #ifndef VMA_DEBUG_ALIGNMENT
3943 
3947  #define VMA_DEBUG_ALIGNMENT (1)
3948 #endif
3949 
3950 #ifndef VMA_DEBUG_MARGIN
3951 
3955  #define VMA_DEBUG_MARGIN (0)
3956 #endif
3957 
3958 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3959 
3963  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3964 #endif
3965 
3966 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3967 
3972  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3973 #endif
3974 
3975 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3976 
3980  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3981 #endif
3982 
3983 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3984 
3988  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3989 #endif
3990 
3991 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3992  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3994 #endif
3995 
3996 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3997  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3999 #endif
4000 
4001 #ifndef VMA_CLASS_NO_COPY
4002  #define VMA_CLASS_NO_COPY(className) \
4003  private: \
4004  className(const className&) = delete; \
4005  className& operator=(const className&) = delete;
4006 #endif
4007 
4008 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4009 
4010 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4011 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4012 
4013 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4014 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4015 
4016 /*******************************************************************************
4017 END OF CONFIGURATION
4018 */
4019 
4020 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4021 
4022 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4023 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4024 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4025 
4026 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4027 
4028 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4029  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4030 
4031 // Returns number of bits set to 1 in (v).
4032 static inline uint32_t VmaCountBitsSet(uint32_t v)
4033 {
4034  uint32_t c = v - ((v >> 1) & 0x55555555);
4035  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4036  c = ((c >> 4) + c) & 0x0F0F0F0F;
4037  c = ((c >> 8) + c) & 0x00FF00FF;
4038  c = ((c >> 16) + c) & 0x0000FFFF;
4039  return c;
4040 }
4041 
4042 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4043 // Use types like uint32_t, uint64_t as T.
4044 template <typename T>
4045 static inline T VmaAlignUp(T val, T align)
4046 {
4047  return (val + align - 1) / align * align;
4048 }
4049 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4050 // Use types like uint32_t, uint64_t as T.
4051 template <typename T>
4052 static inline T VmaAlignDown(T val, T align)
4053 {
4054  return val / align * align;
4055 }
4056 
4057 // Division with mathematical rounding to nearest number.
4058 template <typename T>
4059 static inline T VmaRoundDiv(T x, T y)
4060 {
4061  return (x + (y / (T)2)) / y;
4062 }
4063 
4064 /*
4065 Returns true if given number is a power of two.
4066 T must be unsigned integer number or signed integer but always nonnegative.
4067 For 0 returns true.
4068 */
4069 template <typename T>
4070 inline bool VmaIsPow2(T x)
4071 {
4072  return (x & (x-1)) == 0;
4073 }
4074 
4075 // Returns smallest power of 2 greater or equal to v.
4076 static inline uint32_t VmaNextPow2(uint32_t v)
4077 {
4078  v--;
4079  v |= v >> 1;
4080  v |= v >> 2;
4081  v |= v >> 4;
4082  v |= v >> 8;
4083  v |= v >> 16;
4084  v++;
4085  return v;
4086 }
4087 static inline uint64_t VmaNextPow2(uint64_t v)
4088 {
4089  v--;
4090  v |= v >> 1;
4091  v |= v >> 2;
4092  v |= v >> 4;
4093  v |= v >> 8;
4094  v |= v >> 16;
4095  v |= v >> 32;
4096  v++;
4097  return v;
4098 }
4099 
4100 // Returns largest power of 2 less or equal to v.
4101 static inline uint32_t VmaPrevPow2(uint32_t v)
4102 {
4103  v |= v >> 1;
4104  v |= v >> 2;
4105  v |= v >> 4;
4106  v |= v >> 8;
4107  v |= v >> 16;
4108  v = v ^ (v >> 1);
4109  return v;
4110 }
4111 static inline uint64_t VmaPrevPow2(uint64_t v)
4112 {
4113  v |= v >> 1;
4114  v |= v >> 2;
4115  v |= v >> 4;
4116  v |= v >> 8;
4117  v |= v >> 16;
4118  v |= v >> 32;
4119  v = v ^ (v >> 1);
4120  return v;
4121 }
4122 
4123 static inline bool VmaStrIsEmpty(const char* pStr)
4124 {
4125  return pStr == VMA_NULL || *pStr == '\0';
4126 }
4127 
4128 #if VMA_STATS_STRING_ENABLED
4129 
4130 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4131 {
4132  switch(algorithm)
4133  {
4135  return "Linear";
4137  return "Buddy";
4138  case 0:
4139  return "Default";
4140  default:
4141  VMA_ASSERT(0);
4142  return "";
4143  }
4144 }
4145 
4146 #endif // #if VMA_STATS_STRING_ENABLED
4147 
4148 #ifndef VMA_SORT
4149 
4150 template<typename Iterator, typename Compare>
4151 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4152 {
4153  Iterator centerValue = end; --centerValue;
4154  Iterator insertIndex = beg;
4155  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4156  {
4157  if(cmp(*memTypeIndex, *centerValue))
4158  {
4159  if(insertIndex != memTypeIndex)
4160  {
4161  VMA_SWAP(*memTypeIndex, *insertIndex);
4162  }
4163  ++insertIndex;
4164  }
4165  }
4166  if(insertIndex != centerValue)
4167  {
4168  VMA_SWAP(*insertIndex, *centerValue);
4169  }
4170  return insertIndex;
4171 }
4172 
4173 template<typename Iterator, typename Compare>
4174 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4175 {
4176  if(beg < end)
4177  {
4178  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4179  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4180  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4181  }
4182 }
4183 
4184 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4185 
4186 #endif // #ifndef VMA_SORT
4187 
4188 /*
4189 Returns true if two memory blocks occupy overlapping pages.
4190 ResourceA must be in less memory offset than ResourceB.
4191 
4192 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4193 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4194 */
4195 static inline bool VmaBlocksOnSamePage(
4196  VkDeviceSize resourceAOffset,
4197  VkDeviceSize resourceASize,
4198  VkDeviceSize resourceBOffset,
4199  VkDeviceSize pageSize)
4200 {
4201  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4202  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4203  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4204  VkDeviceSize resourceBStart = resourceBOffset;
4205  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4206  return resourceAEndPage == resourceBStartPage;
4207 }
4208 
4209 enum VmaSuballocationType
4210 {
4211  VMA_SUBALLOCATION_TYPE_FREE = 0,
4212  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4213  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4214  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4215  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4216  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4217  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4218 };
4219 
4220 /*
4221 Returns true if given suballocation types could conflict and must respect
4222 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4223 or linear image and another one is optimal image. If type is unknown, behave
4224 conservatively.
4225 */
4226 static inline bool VmaIsBufferImageGranularityConflict(
4227  VmaSuballocationType suballocType1,
4228  VmaSuballocationType suballocType2)
4229 {
4230  if(suballocType1 > suballocType2)
4231  {
4232  VMA_SWAP(suballocType1, suballocType2);
4233  }
4234 
4235  switch(suballocType1)
4236  {
4237  case VMA_SUBALLOCATION_TYPE_FREE:
4238  return false;
4239  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4240  return true;
4241  case VMA_SUBALLOCATION_TYPE_BUFFER:
4242  return
4243  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4244  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4245  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4246  return
4247  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4248  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4249  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4250  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4251  return
4252  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4253  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4254  return false;
4255  default:
4256  VMA_ASSERT(0);
4257  return true;
4258  }
4259 }
4260 
4261 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4262 {
4263 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4264  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4265  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4266  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4267  {
4268  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4269  }
4270 #else
4271  // no-op
4272 #endif
4273 }
4274 
4275 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4276 {
4277 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4278  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4279  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4280  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4281  {
4282  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4283  {
4284  return false;
4285  }
4286  }
4287 #endif
4288  return true;
4289 }
4290 
4291 /*
4292 Fills structure with parameters of an example buffer to be used for transfers
4293 during GPU memory defragmentation.
4294 */
4295 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4296 {
4297  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4298  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4299  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4300  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4301 }
4302 
4303 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4304 struct VmaMutexLock
4305 {
4306  VMA_CLASS_NO_COPY(VmaMutexLock)
4307 public:
4308  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4309  m_pMutex(useMutex ? &mutex : VMA_NULL)
4310  { if(m_pMutex) { m_pMutex->Lock(); } }
4311  ~VmaMutexLock()
4312  { if(m_pMutex) { m_pMutex->Unlock(); } }
4313 private:
4314  VMA_MUTEX* m_pMutex;
4315 };
4316 
4317 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4318 struct VmaMutexLockRead
4319 {
4320  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4321 public:
4322  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4323  m_pMutex(useMutex ? &mutex : VMA_NULL)
4324  { if(m_pMutex) { m_pMutex->LockRead(); } }
4325  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4326 private:
4327  VMA_RW_MUTEX* m_pMutex;
4328 };
4329 
4330 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4331 struct VmaMutexLockWrite
4332 {
4333  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4334 public:
4335  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4336  m_pMutex(useMutex ? &mutex : VMA_NULL)
4337  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4338  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4339 private:
4340  VMA_RW_MUTEX* m_pMutex;
4341 };
4342 
4343 #if VMA_DEBUG_GLOBAL_MUTEX
4344  static VMA_MUTEX gDebugGlobalMutex;
4345  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4346 #else
4347  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4348 #endif
4349 
4350 // Minimum size of a free suballocation to register it in the free suballocation collection.
4351 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4352 
4353 /*
4354 Performs binary search and returns iterator to first element that is greater or
4355 equal to (key), according to comparison (cmp).
4356 
4357 Cmp should return true if first argument is less than second argument.
4358 
4359 Returned value is the found element, if present in the collection or place where
4360 new element with value (key) should be inserted.
4361 */
4362 template <typename CmpLess, typename IterT, typename KeyT>
4363 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4364 {
4365  size_t down = 0, up = (end - beg);
4366  while(down < up)
4367  {
4368  const size_t mid = (down + up) / 2;
4369  if(cmp(*(beg+mid), key))
4370  {
4371  down = mid + 1;
4372  }
4373  else
4374  {
4375  up = mid;
4376  }
4377  }
4378  return beg + down;
4379 }
4380 
4381 template<typename CmpLess, typename IterT, typename KeyT>
4382 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4383 {
4384  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4385  beg, end, value, cmp);
4386  if(it == end ||
4387  (!cmp(*it, value) && !cmp(value, *it)))
4388  {
4389  return it;
4390  }
4391  return end;
4392 }
4393 
4394 /*
4395 Returns true if all pointers in the array are not-null and unique.
4396 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4397 T must be pointer type, e.g. VmaAllocation, VmaPool.
4398 */
4399 template<typename T>
4400 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4401 {
4402  for(uint32_t i = 0; i < count; ++i)
4403  {
4404  const T iPtr = arr[i];
4405  if(iPtr == VMA_NULL)
4406  {
4407  return false;
4408  }
4409  for(uint32_t j = i + 1; j < count; ++j)
4410  {
4411  if(iPtr == arr[j])
4412  {
4413  return false;
4414  }
4415  }
4416  }
4417  return true;
4418 }
4419 
4420 template<typename MainT, typename NewT>
4421 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4422 {
4423  newStruct->pNext = mainStruct->pNext;
4424  mainStruct->pNext = newStruct;
4425 }
4426 
4428 // Memory allocation
4429 
4430 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4431 {
4432  if((pAllocationCallbacks != VMA_NULL) &&
4433  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4434  {
4435  return (*pAllocationCallbacks->pfnAllocation)(
4436  pAllocationCallbacks->pUserData,
4437  size,
4438  alignment,
4439  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4440  }
4441  else
4442  {
4443  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4444  }
4445 }
4446 
4447 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4448 {
4449  if((pAllocationCallbacks != VMA_NULL) &&
4450  (pAllocationCallbacks->pfnFree != VMA_NULL))
4451  {
4452  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4453  }
4454  else
4455  {
4456  VMA_SYSTEM_FREE(ptr);
4457  }
4458 }
4459 
4460 template<typename T>
4461 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4462 {
4463  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4464 }
4465 
4466 template<typename T>
4467 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4468 {
4469  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4470 }
4471 
4472 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4473 
4474 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4475 
4476 template<typename T>
4477 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4478 {
4479  ptr->~T();
4480  VmaFree(pAllocationCallbacks, ptr);
4481 }
4482 
4483 template<typename T>
4484 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4485 {
4486  if(ptr != VMA_NULL)
4487  {
4488  for(size_t i = count; i--; )
4489  {
4490  ptr[i].~T();
4491  }
4492  VmaFree(pAllocationCallbacks, ptr);
4493  }
4494 }
4495 
4496 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4497 {
4498  if(srcStr != VMA_NULL)
4499  {
4500  const size_t len = strlen(srcStr);
4501  char* const result = vma_new_array(allocs, char, len + 1);
4502  memcpy(result, srcStr, len + 1);
4503  return result;
4504  }
4505  else
4506  {
4507  return VMA_NULL;
4508  }
4509 }
4510 
4511 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4512 {
4513  if(str != VMA_NULL)
4514  {
4515  const size_t len = strlen(str);
4516  vma_delete_array(allocs, str, len + 1);
4517  }
4518 }
4519 
4520 // STL-compatible allocator.
4521 template<typename T>
4522 class VmaStlAllocator
4523 {
4524 public:
4525  const VkAllocationCallbacks* const m_pCallbacks;
4526  typedef T value_type;
4527 
4528  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4529  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4530 
4531  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4532  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4533 
4534  template<typename U>
4535  bool operator==(const VmaStlAllocator<U>& rhs) const
4536  {
4537  return m_pCallbacks == rhs.m_pCallbacks;
4538  }
4539  template<typename U>
4540  bool operator!=(const VmaStlAllocator<U>& rhs) const
4541  {
4542  return m_pCallbacks != rhs.m_pCallbacks;
4543  }
4544 
4545  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4546 };
4547 
4548 #if VMA_USE_STL_VECTOR
4549 
4550 #define VmaVector std::vector
4551 
4552 template<typename T, typename allocatorT>
4553 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4554 {
4555  vec.insert(vec.begin() + index, item);
4556 }
4557 
4558 template<typename T, typename allocatorT>
4559 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4560 {
4561  vec.erase(vec.begin() + index);
4562 }
4563 
4564 #else // #if VMA_USE_STL_VECTOR
4565 
4566 /* Class with interface compatible with subset of std::vector.
4567 T must be POD because constructors and destructors are not called and memcpy is
4568 used for these objects. */
4569 template<typename T, typename AllocatorT>
4570 class VmaVector
4571 {
4572 public:
4573  typedef T value_type;
4574 
4575  VmaVector(const AllocatorT& allocator) :
4576  m_Allocator(allocator),
4577  m_pArray(VMA_NULL),
4578  m_Count(0),
4579  m_Capacity(0)
4580  {
4581  }
4582 
4583  VmaVector(size_t count, const AllocatorT& allocator) :
4584  m_Allocator(allocator),
4585  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4586  m_Count(count),
4587  m_Capacity(count)
4588  {
4589  }
4590 
4591  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4592  // value is unused.
4593  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4594  : VmaVector(count, allocator) {}
4595 
4596  VmaVector(const VmaVector<T, AllocatorT>& src) :
4597  m_Allocator(src.m_Allocator),
4598  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4599  m_Count(src.m_Count),
4600  m_Capacity(src.m_Count)
4601  {
4602  if(m_Count != 0)
4603  {
4604  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4605  }
4606  }
4607 
4608  ~VmaVector()
4609  {
4610  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4611  }
4612 
4613  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4614  {
4615  if(&rhs != this)
4616  {
4617  resize(rhs.m_Count);
4618  if(m_Count != 0)
4619  {
4620  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4621  }
4622  }
4623  return *this;
4624  }
4625 
4626  bool empty() const { return m_Count == 0; }
4627  size_t size() const { return m_Count; }
4628  T* data() { return m_pArray; }
4629  const T* data() const { return m_pArray; }
4630 
4631  T& operator[](size_t index)
4632  {
4633  VMA_HEAVY_ASSERT(index < m_Count);
4634  return m_pArray[index];
4635  }
4636  const T& operator[](size_t index) const
4637  {
4638  VMA_HEAVY_ASSERT(index < m_Count);
4639  return m_pArray[index];
4640  }
4641 
4642  T& front()
4643  {
4644  VMA_HEAVY_ASSERT(m_Count > 0);
4645  return m_pArray[0];
4646  }
4647  const T& front() const
4648  {
4649  VMA_HEAVY_ASSERT(m_Count > 0);
4650  return m_pArray[0];
4651  }
4652  T& back()
4653  {
4654  VMA_HEAVY_ASSERT(m_Count > 0);
4655  return m_pArray[m_Count - 1];
4656  }
4657  const T& back() const
4658  {
4659  VMA_HEAVY_ASSERT(m_Count > 0);
4660  return m_pArray[m_Count - 1];
4661  }
4662 
4663  void reserve(size_t newCapacity, bool freeMemory = false)
4664  {
4665  newCapacity = VMA_MAX(newCapacity, m_Count);
4666 
4667  if((newCapacity < m_Capacity) && !freeMemory)
4668  {
4669  newCapacity = m_Capacity;
4670  }
4671 
4672  if(newCapacity != m_Capacity)
4673  {
4674  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4675  if(m_Count != 0)
4676  {
4677  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4678  }
4679  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4680  m_Capacity = newCapacity;
4681  m_pArray = newArray;
4682  }
4683  }
4684 
4685  void resize(size_t newCount, bool freeMemory = false)
4686  {
4687  size_t newCapacity = m_Capacity;
4688  if(newCount > m_Capacity)
4689  {
4690  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4691  }
4692  else if(freeMemory)
4693  {
4694  newCapacity = newCount;
4695  }
4696 
4697  if(newCapacity != m_Capacity)
4698  {
4699  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4700  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4701  if(elementsToCopy != 0)
4702  {
4703  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4704  }
4705  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4706  m_Capacity = newCapacity;
4707  m_pArray = newArray;
4708  }
4709 
4710  m_Count = newCount;
4711  }
4712 
4713  void clear(bool freeMemory = false)
4714  {
4715  resize(0, freeMemory);
4716  }
4717 
4718  void insert(size_t index, const T& src)
4719  {
4720  VMA_HEAVY_ASSERT(index <= m_Count);
4721  const size_t oldCount = size();
4722  resize(oldCount + 1);
4723  if(index < oldCount)
4724  {
4725  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4726  }
4727  m_pArray[index] = src;
4728  }
4729 
4730  void remove(size_t index)
4731  {
4732  VMA_HEAVY_ASSERT(index < m_Count);
4733  const size_t oldCount = size();
4734  if(index < oldCount - 1)
4735  {
4736  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4737  }
4738  resize(oldCount - 1);
4739  }
4740 
4741  void push_back(const T& src)
4742  {
4743  const size_t newIndex = size();
4744  resize(newIndex + 1);
4745  m_pArray[newIndex] = src;
4746  }
4747 
4748  void pop_back()
4749  {
4750  VMA_HEAVY_ASSERT(m_Count > 0);
4751  resize(size() - 1);
4752  }
4753 
4754  void push_front(const T& src)
4755  {
4756  insert(0, src);
4757  }
4758 
4759  void pop_front()
4760  {
4761  VMA_HEAVY_ASSERT(m_Count > 0);
4762  remove(0);
4763  }
4764 
4765  typedef T* iterator;
4766 
4767  iterator begin() { return m_pArray; }
4768  iterator end() { return m_pArray + m_Count; }
4769 
4770 private:
4771  AllocatorT m_Allocator;
4772  T* m_pArray;
4773  size_t m_Count;
4774  size_t m_Capacity;
4775 };
4776 
4777 template<typename T, typename allocatorT>
4778 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4779 {
4780  vec.insert(index, item);
4781 }
4782 
4783 template<typename T, typename allocatorT>
4784 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4785 {
4786  vec.remove(index);
4787 }
4788 
4789 #endif // #if VMA_USE_STL_VECTOR
4790 
4791 template<typename CmpLess, typename VectorT>
4792 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4793 {
4794  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4795  vector.data(),
4796  vector.data() + vector.size(),
4797  value,
4798  CmpLess()) - vector.data();
4799  VmaVectorInsert(vector, indexToInsert, value);
4800  return indexToInsert;
4801 }
4802 
4803 template<typename CmpLess, typename VectorT>
4804 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4805 {
4806  CmpLess comparator;
4807  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4808  vector.begin(),
4809  vector.end(),
4810  value,
4811  comparator);
4812  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4813  {
4814  size_t indexToRemove = it - vector.begin();
4815  VmaVectorRemove(vector, indexToRemove);
4816  return true;
4817  }
4818  return false;
4819 }
4820 
4822 // class VmaPoolAllocator
4823 
4824 /*
4825 Allocator for objects of type T using a list of arrays (pools) to speed up
4826 allocation. Number of elements that can be allocated is not bounded because
4827 allocator can create multiple blocks.
4828 */
4829 template<typename T>
4830 class VmaPoolAllocator
4831 {
4832  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4833 public:
4834  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4835  ~VmaPoolAllocator();
4836  template<typename... Types> T* Alloc(Types... args);
4837  void Free(T* ptr);
4838 
4839 private:
4840  union Item
4841  {
4842  uint32_t NextFreeIndex;
4843  alignas(T) char Value[sizeof(T)];
4844  };
4845 
4846  struct ItemBlock
4847  {
4848  Item* pItems;
4849  uint32_t Capacity;
4850  uint32_t FirstFreeIndex;
4851  };
4852 
4853  const VkAllocationCallbacks* m_pAllocationCallbacks;
4854  const uint32_t m_FirstBlockCapacity;
4855  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4856 
4857  ItemBlock& CreateNewBlock();
4858 };
4859 
4860 template<typename T>
4861 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4862  m_pAllocationCallbacks(pAllocationCallbacks),
4863  m_FirstBlockCapacity(firstBlockCapacity),
4864  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4865 {
4866  VMA_ASSERT(m_FirstBlockCapacity > 1);
4867 }
4868 
4869 template<typename T>
4870 VmaPoolAllocator<T>::~VmaPoolAllocator()
4871 {
4872  for(size_t i = m_ItemBlocks.size(); i--; )
4873  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4874  m_ItemBlocks.clear();
4875 }
4876 
4877 template<typename T>
4878 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
4879 {
4880  for(size_t i = m_ItemBlocks.size(); i--; )
4881  {
4882  ItemBlock& block = m_ItemBlocks[i];
4883  // This block has some free items: Use first one.
4884  if(block.FirstFreeIndex != UINT32_MAX)
4885  {
4886  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4887  block.FirstFreeIndex = pItem->NextFreeIndex;
4888  T* result = (T*)&pItem->Value;
4889  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4890  return result;
4891  }
4892  }
4893 
4894  // No block has free item: Create new one and use it.
4895  ItemBlock& newBlock = CreateNewBlock();
4896  Item* const pItem = &newBlock.pItems[0];
4897  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4898  T* result = (T*)&pItem->Value;
4899  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4900  return result;
4901 }
4902 
4903 template<typename T>
4904 void VmaPoolAllocator<T>::Free(T* ptr)
4905 {
4906  // Search all memory blocks to find ptr.
4907  for(size_t i = m_ItemBlocks.size(); i--; )
4908  {
4909  ItemBlock& block = m_ItemBlocks[i];
4910 
4911  // Casting to union.
4912  Item* pItemPtr;
4913  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4914 
4915  // Check if pItemPtr is in address range of this block.
4916  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4917  {
4918  ptr->~T(); // Explicit destructor call.
4919  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4920  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4921  block.FirstFreeIndex = index;
4922  return;
4923  }
4924  }
4925  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4926 }
4927 
4928 template<typename T>
4929 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4930 {
4931  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4932  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4933 
4934  const ItemBlock newBlock = {
4935  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4936  newBlockCapacity,
4937  0 };
4938 
4939  m_ItemBlocks.push_back(newBlock);
4940 
4941  // Setup singly-linked list of all free items in this block.
4942  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4943  newBlock.pItems[i].NextFreeIndex = i + 1;
4944  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4945  return m_ItemBlocks.back();
4946 }
4947 
4949 // class VmaRawList, VmaList
4950 
4951 #if VMA_USE_STL_LIST
4952 
4953 #define VmaList std::list
4954 
4955 #else // #if VMA_USE_STL_LIST
4956 
4957 template<typename T>
4958 struct VmaListItem
4959 {
4960  VmaListItem* pPrev;
4961  VmaListItem* pNext;
4962  T Value;
4963 };
4964 
4965 // Doubly linked list.
4966 template<typename T>
4967 class VmaRawList
4968 {
4969  VMA_CLASS_NO_COPY(VmaRawList)
4970 public:
4971  typedef VmaListItem<T> ItemType;
4972 
4973  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4974  ~VmaRawList();
4975  void Clear();
4976 
4977  size_t GetCount() const { return m_Count; }
4978  bool IsEmpty() const { return m_Count == 0; }
4979 
4980  ItemType* Front() { return m_pFront; }
4981  const ItemType* Front() const { return m_pFront; }
4982  ItemType* Back() { return m_pBack; }
4983  const ItemType* Back() const { return m_pBack; }
4984 
4985  ItemType* PushBack();
4986  ItemType* PushFront();
4987  ItemType* PushBack(const T& value);
4988  ItemType* PushFront(const T& value);
4989  void PopBack();
4990  void PopFront();
4991 
4992  // Item can be null - it means PushBack.
4993  ItemType* InsertBefore(ItemType* pItem);
4994  // Item can be null - it means PushFront.
4995  ItemType* InsertAfter(ItemType* pItem);
4996 
4997  ItemType* InsertBefore(ItemType* pItem, const T& value);
4998  ItemType* InsertAfter(ItemType* pItem, const T& value);
4999 
5000  void Remove(ItemType* pItem);
5001 
5002 private:
5003  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5004  VmaPoolAllocator<ItemType> m_ItemAllocator;
5005  ItemType* m_pFront;
5006  ItemType* m_pBack;
5007  size_t m_Count;
5008 };
5009 
5010 template<typename T>
5011 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5012  m_pAllocationCallbacks(pAllocationCallbacks),
5013  m_ItemAllocator(pAllocationCallbacks, 128),
5014  m_pFront(VMA_NULL),
5015  m_pBack(VMA_NULL),
5016  m_Count(0)
5017 {
5018 }
5019 
5020 template<typename T>
5021 VmaRawList<T>::~VmaRawList()
5022 {
5023  // Intentionally not calling Clear, because that would be unnecessary
5024  // computations to return all items to m_ItemAllocator as free.
5025 }
5026 
5027 template<typename T>
5028 void VmaRawList<T>::Clear()
5029 {
5030  if(IsEmpty() == false)
5031  {
5032  ItemType* pItem = m_pBack;
5033  while(pItem != VMA_NULL)
5034  {
5035  ItemType* const pPrevItem = pItem->pPrev;
5036  m_ItemAllocator.Free(pItem);
5037  pItem = pPrevItem;
5038  }
5039  m_pFront = VMA_NULL;
5040  m_pBack = VMA_NULL;
5041  m_Count = 0;
5042  }
5043 }
5044 
5045 template<typename T>
5046 VmaListItem<T>* VmaRawList<T>::PushBack()
5047 {
5048  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5049  pNewItem->pNext = VMA_NULL;
5050  if(IsEmpty())
5051  {
5052  pNewItem->pPrev = VMA_NULL;
5053  m_pFront = pNewItem;
5054  m_pBack = pNewItem;
5055  m_Count = 1;
5056  }
5057  else
5058  {
5059  pNewItem->pPrev = m_pBack;
5060  m_pBack->pNext = pNewItem;
5061  m_pBack = pNewItem;
5062  ++m_Count;
5063  }
5064  return pNewItem;
5065 }
5066 
5067 template<typename T>
5068 VmaListItem<T>* VmaRawList<T>::PushFront()
5069 {
5070  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5071  pNewItem->pPrev = VMA_NULL;
5072  if(IsEmpty())
5073  {
5074  pNewItem->pNext = VMA_NULL;
5075  m_pFront = pNewItem;
5076  m_pBack = pNewItem;
5077  m_Count = 1;
5078  }
5079  else
5080  {
5081  pNewItem->pNext = m_pFront;
5082  m_pFront->pPrev = pNewItem;
5083  m_pFront = pNewItem;
5084  ++m_Count;
5085  }
5086  return pNewItem;
5087 }
5088 
5089 template<typename T>
5090 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5091 {
5092  ItemType* const pNewItem = PushBack();
5093  pNewItem->Value = value;
5094  return pNewItem;
5095 }
5096 
5097 template<typename T>
5098 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5099 {
5100  ItemType* const pNewItem = PushFront();
5101  pNewItem->Value = value;
5102  return pNewItem;
5103 }
5104 
5105 template<typename T>
5106 void VmaRawList<T>::PopBack()
5107 {
5108  VMA_HEAVY_ASSERT(m_Count > 0);
5109  ItemType* const pBackItem = m_pBack;
5110  ItemType* const pPrevItem = pBackItem->pPrev;
5111  if(pPrevItem != VMA_NULL)
5112  {
5113  pPrevItem->pNext = VMA_NULL;
5114  }
5115  m_pBack = pPrevItem;
5116  m_ItemAllocator.Free(pBackItem);
5117  --m_Count;
5118 }
5119 
5120 template<typename T>
5121 void VmaRawList<T>::PopFront()
5122 {
5123  VMA_HEAVY_ASSERT(m_Count > 0);
5124  ItemType* const pFrontItem = m_pFront;
5125  ItemType* const pNextItem = pFrontItem->pNext;
5126  if(pNextItem != VMA_NULL)
5127  {
5128  pNextItem->pPrev = VMA_NULL;
5129  }
5130  m_pFront = pNextItem;
5131  m_ItemAllocator.Free(pFrontItem);
5132  --m_Count;
5133 }
5134 
5135 template<typename T>
5136 void VmaRawList<T>::Remove(ItemType* pItem)
5137 {
5138  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5139  VMA_HEAVY_ASSERT(m_Count > 0);
5140 
5141  if(pItem->pPrev != VMA_NULL)
5142  {
5143  pItem->pPrev->pNext = pItem->pNext;
5144  }
5145  else
5146  {
5147  VMA_HEAVY_ASSERT(m_pFront == pItem);
5148  m_pFront = pItem->pNext;
5149  }
5150 
5151  if(pItem->pNext != VMA_NULL)
5152  {
5153  pItem->pNext->pPrev = pItem->pPrev;
5154  }
5155  else
5156  {
5157  VMA_HEAVY_ASSERT(m_pBack == pItem);
5158  m_pBack = pItem->pPrev;
5159  }
5160 
5161  m_ItemAllocator.Free(pItem);
5162  --m_Count;
5163 }
5164 
5165 template<typename T>
5166 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5167 {
5168  if(pItem != VMA_NULL)
5169  {
5170  ItemType* const prevItem = pItem->pPrev;
5171  ItemType* const newItem = m_ItemAllocator.Alloc();
5172  newItem->pPrev = prevItem;
5173  newItem->pNext = pItem;
5174  pItem->pPrev = newItem;
5175  if(prevItem != VMA_NULL)
5176  {
5177  prevItem->pNext = newItem;
5178  }
5179  else
5180  {
5181  VMA_HEAVY_ASSERT(m_pFront == pItem);
5182  m_pFront = newItem;
5183  }
5184  ++m_Count;
5185  return newItem;
5186  }
5187  else
5188  return PushBack();
5189 }
5190 
5191 template<typename T>
5192 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5193 {
5194  if(pItem != VMA_NULL)
5195  {
5196  ItemType* const nextItem = pItem->pNext;
5197  ItemType* const newItem = m_ItemAllocator.Alloc();
5198  newItem->pNext = nextItem;
5199  newItem->pPrev = pItem;
5200  pItem->pNext = newItem;
5201  if(nextItem != VMA_NULL)
5202  {
5203  nextItem->pPrev = newItem;
5204  }
5205  else
5206  {
5207  VMA_HEAVY_ASSERT(m_pBack == pItem);
5208  m_pBack = newItem;
5209  }
5210  ++m_Count;
5211  return newItem;
5212  }
5213  else
5214  return PushFront();
5215 }
5216 
5217 template<typename T>
5218 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5219 {
5220  ItemType* const newItem = InsertBefore(pItem);
5221  newItem->Value = value;
5222  return newItem;
5223 }
5224 
5225 template<typename T>
5226 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5227 {
5228  ItemType* const newItem = InsertAfter(pItem);
5229  newItem->Value = value;
5230  return newItem;
5231 }
5232 
5233 template<typename T, typename AllocatorT>
5234 class VmaList
5235 {
5236  VMA_CLASS_NO_COPY(VmaList)
5237 public:
5238  class iterator
5239  {
5240  public:
5241  iterator() :
5242  m_pList(VMA_NULL),
5243  m_pItem(VMA_NULL)
5244  {
5245  }
5246 
5247  T& operator*() const
5248  {
5249  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5250  return m_pItem->Value;
5251  }
5252  T* operator->() const
5253  {
5254  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5255  return &m_pItem->Value;
5256  }
5257 
5258  iterator& operator++()
5259  {
5260  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5261  m_pItem = m_pItem->pNext;
5262  return *this;
5263  }
5264  iterator& operator--()
5265  {
5266  if(m_pItem != VMA_NULL)
5267  {
5268  m_pItem = m_pItem->pPrev;
5269  }
5270  else
5271  {
5272  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5273  m_pItem = m_pList->Back();
5274  }
5275  return *this;
5276  }
5277 
5278  iterator operator++(int)
5279  {
5280  iterator result = *this;
5281  ++*this;
5282  return result;
5283  }
5284  iterator operator--(int)
5285  {
5286  iterator result = *this;
5287  --*this;
5288  return result;
5289  }
5290 
5291  bool operator==(const iterator& rhs) const
5292  {
5293  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5294  return m_pItem == rhs.m_pItem;
5295  }
5296  bool operator!=(const iterator& rhs) const
5297  {
5298  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5299  return m_pItem != rhs.m_pItem;
5300  }
5301 
5302  private:
5303  VmaRawList<T>* m_pList;
5304  VmaListItem<T>* m_pItem;
5305 
5306  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5307  m_pList(pList),
5308  m_pItem(pItem)
5309  {
5310  }
5311 
5312  friend class VmaList<T, AllocatorT>;
5313  };
5314 
5315  class const_iterator
5316  {
5317  public:
5318  const_iterator() :
5319  m_pList(VMA_NULL),
5320  m_pItem(VMA_NULL)
5321  {
5322  }
5323 
5324  const_iterator(const iterator& src) :
5325  m_pList(src.m_pList),
5326  m_pItem(src.m_pItem)
5327  {
5328  }
5329 
5330  const T& operator*() const
5331  {
5332  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5333  return m_pItem->Value;
5334  }
5335  const T* operator->() const
5336  {
5337  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5338  return &m_pItem->Value;
5339  }
5340 
5341  const_iterator& operator++()
5342  {
5343  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5344  m_pItem = m_pItem->pNext;
5345  return *this;
5346  }
5347  const_iterator& operator--()
5348  {
5349  if(m_pItem != VMA_NULL)
5350  {
5351  m_pItem = m_pItem->pPrev;
5352  }
5353  else
5354  {
5355  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5356  m_pItem = m_pList->Back();
5357  }
5358  return *this;
5359  }
5360 
5361  const_iterator operator++(int)
5362  {
5363  const_iterator result = *this;
5364  ++*this;
5365  return result;
5366  }
5367  const_iterator operator--(int)
5368  {
5369  const_iterator result = *this;
5370  --*this;
5371  return result;
5372  }
5373 
5374  bool operator==(const const_iterator& rhs) const
5375  {
5376  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5377  return m_pItem == rhs.m_pItem;
5378  }
5379  bool operator!=(const const_iterator& rhs) const
5380  {
5381  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5382  return m_pItem != rhs.m_pItem;
5383  }
5384 
5385  private:
5386  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5387  m_pList(pList),
5388  m_pItem(pItem)
5389  {
5390  }
5391 
5392  const VmaRawList<T>* m_pList;
5393  const VmaListItem<T>* m_pItem;
5394 
5395  friend class VmaList<T, AllocatorT>;
5396  };
5397 
5398  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5399 
5400  bool empty() const { return m_RawList.IsEmpty(); }
5401  size_t size() const { return m_RawList.GetCount(); }
5402 
5403  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5404  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5405 
5406  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5407  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5408 
5409  void clear() { m_RawList.Clear(); }
5410  void push_back(const T& value) { m_RawList.PushBack(value); }
5411  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5412  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5413 
5414 private:
5415  VmaRawList<T> m_RawList;
5416 };
5417 
5418 #endif // #if VMA_USE_STL_LIST
5419 
5421 // class VmaMap
5422 
5423 // Unused in this version.
5424 #if 0
5425 
5426 #if VMA_USE_STL_UNORDERED_MAP
5427 
5428 #define VmaPair std::pair
5429 
5430 #define VMA_MAP_TYPE(KeyT, ValueT) \
5431  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5432 
5433 #else // #if VMA_USE_STL_UNORDERED_MAP
5434 
5435 template<typename T1, typename T2>
5436 struct VmaPair
5437 {
5438  T1 first;
5439  T2 second;
5440 
5441  VmaPair() : first(), second() { }
5442  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5443 };
5444 
5445 /* Class compatible with subset of interface of std::unordered_map.
5446 KeyT, ValueT must be POD because they will be stored in VmaVector.
5447 */
5448 template<typename KeyT, typename ValueT>
5449 class VmaMap
5450 {
5451 public:
5452  typedef VmaPair<KeyT, ValueT> PairType;
5453  typedef PairType* iterator;
5454 
5455  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5456 
5457  iterator begin() { return m_Vector.begin(); }
5458  iterator end() { return m_Vector.end(); }
5459 
5460  void insert(const PairType& pair);
5461  iterator find(const KeyT& key);
5462  void erase(iterator it);
5463 
5464 private:
5465  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5466 };
5467 
5468 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5469 
5470 template<typename FirstT, typename SecondT>
5471 struct VmaPairFirstLess
5472 {
5473  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5474  {
5475  return lhs.first < rhs.first;
5476  }
5477  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5478  {
5479  return lhs.first < rhsFirst;
5480  }
5481 };
5482 
5483 template<typename KeyT, typename ValueT>
5484 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5485 {
5486  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5487  m_Vector.data(),
5488  m_Vector.data() + m_Vector.size(),
5489  pair,
5490  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5491  VmaVectorInsert(m_Vector, indexToInsert, pair);
5492 }
5493 
5494 template<typename KeyT, typename ValueT>
5495 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5496 {
5497  PairType* it = VmaBinaryFindFirstNotLess(
5498  m_Vector.data(),
5499  m_Vector.data() + m_Vector.size(),
5500  key,
5501  VmaPairFirstLess<KeyT, ValueT>());
5502  if((it != m_Vector.end()) && (it->first == key))
5503  {
5504  return it;
5505  }
5506  else
5507  {
5508  return m_Vector.end();
5509  }
5510 }
5511 
5512 template<typename KeyT, typename ValueT>
5513 void VmaMap<KeyT, ValueT>::erase(iterator it)
5514 {
5515  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5516 }
5517 
5518 #endif // #if VMA_USE_STL_UNORDERED_MAP
5519 
5520 #endif // #if 0
5521 
5523 
5524 class VmaDeviceMemoryBlock;
5525 
5526 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5527 
5528 struct VmaAllocation_T
5529 {
5530 private:
5531  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5532 
5533  enum FLAGS
5534  {
5535  FLAG_USER_DATA_STRING = 0x01,
5536  };
5537 
5538 public:
5539  enum ALLOCATION_TYPE
5540  {
5541  ALLOCATION_TYPE_NONE,
5542  ALLOCATION_TYPE_BLOCK,
5543  ALLOCATION_TYPE_DEDICATED,
5544  };
5545 
5546  /*
5547  This struct is allocated using VmaPoolAllocator.
5548  */
5549 
5550  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5551  m_Alignment{1},
5552  m_Size{0},
5553  m_pUserData{VMA_NULL},
5554  m_LastUseFrameIndex{currentFrameIndex},
5555  m_MemoryTypeIndex{0},
5556  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5557  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5558  m_MapCount{0},
5559  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5560  {
5561 #if VMA_STATS_STRING_ENABLED
5562  m_CreationFrameIndex = currentFrameIndex;
5563  m_BufferImageUsage = 0;
5564 #endif
5565  }
5566 
5567  ~VmaAllocation_T()
5568  {
5569  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5570 
5571  // Check if owned string was freed.
5572  VMA_ASSERT(m_pUserData == VMA_NULL);
5573  }
5574 
5575  void InitBlockAllocation(
5576  VmaDeviceMemoryBlock* block,
5577  VkDeviceSize offset,
5578  VkDeviceSize alignment,
5579  VkDeviceSize size,
5580  uint32_t memoryTypeIndex,
5581  VmaSuballocationType suballocationType,
5582  bool mapped,
5583  bool canBecomeLost)
5584  {
5585  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5586  VMA_ASSERT(block != VMA_NULL);
5587  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5588  m_Alignment = alignment;
5589  m_Size = size;
5590  m_MemoryTypeIndex = memoryTypeIndex;
5591  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5592  m_SuballocationType = (uint8_t)suballocationType;
5593  m_BlockAllocation.m_Block = block;
5594  m_BlockAllocation.m_Offset = offset;
5595  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5596  }
5597 
5598  void InitLost()
5599  {
5600  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5601  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5602  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5603  m_MemoryTypeIndex = 0;
5604  m_BlockAllocation.m_Block = VMA_NULL;
5605  m_BlockAllocation.m_Offset = 0;
5606  m_BlockAllocation.m_CanBecomeLost = true;
5607  }
5608 
5609  void ChangeBlockAllocation(
5610  VmaAllocator hAllocator,
5611  VmaDeviceMemoryBlock* block,
5612  VkDeviceSize offset);
5613 
5614  void ChangeOffset(VkDeviceSize newOffset);
5615 
5616  // pMappedData not null means allocation is created with MAPPED flag.
5617  void InitDedicatedAllocation(
5618  uint32_t memoryTypeIndex,
5619  VkDeviceMemory hMemory,
5620  VmaSuballocationType suballocationType,
5621  void* pMappedData,
5622  VkDeviceSize size)
5623  {
5624  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5625  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5626  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5627  m_Alignment = 0;
5628  m_Size = size;
5629  m_MemoryTypeIndex = memoryTypeIndex;
5630  m_SuballocationType = (uint8_t)suballocationType;
5631  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5632  m_DedicatedAllocation.m_hMemory = hMemory;
5633  m_DedicatedAllocation.m_pMappedData = pMappedData;
5634  }
5635 
5636  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5637  VkDeviceSize GetAlignment() const { return m_Alignment; }
5638  VkDeviceSize GetSize() const { return m_Size; }
5639  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5640  void* GetUserData() const { return m_pUserData; }
5641  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5642  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5643 
5644  VmaDeviceMemoryBlock* GetBlock() const
5645  {
5646  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5647  return m_BlockAllocation.m_Block;
5648  }
5649  VkDeviceSize GetOffset() const;
5650  VkDeviceMemory GetMemory() const;
5651  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5652  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5653  void* GetMappedData() const;
5654  bool CanBecomeLost() const;
5655 
5656  uint32_t GetLastUseFrameIndex() const
5657  {
5658  return m_LastUseFrameIndex.load();
5659  }
5660  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5661  {
5662  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5663  }
5664  /*
5665  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5666  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5667  - Else, returns false.
5668 
5669  If hAllocation is already lost, assert - you should not call it then.
5670  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5671  */
5672  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5673 
5674  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5675  {
5676  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5677  outInfo.blockCount = 1;
5678  outInfo.allocationCount = 1;
5679  outInfo.unusedRangeCount = 0;
5680  outInfo.usedBytes = m_Size;
5681  outInfo.unusedBytes = 0;
5682  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5683  outInfo.unusedRangeSizeMin = UINT64_MAX;
5684  outInfo.unusedRangeSizeMax = 0;
5685  }
5686 
5687  void BlockAllocMap();
5688  void BlockAllocUnmap();
5689  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5690  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5691 
5692 #if VMA_STATS_STRING_ENABLED
5693  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5694  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5695 
5696  void InitBufferImageUsage(uint32_t bufferImageUsage)
5697  {
5698  VMA_ASSERT(m_BufferImageUsage == 0);
5699  m_BufferImageUsage = bufferImageUsage;
5700  }
5701 
5702  void PrintParameters(class VmaJsonWriter& json) const;
5703 #endif
5704 
5705 private:
5706  VkDeviceSize m_Alignment;
5707  VkDeviceSize m_Size;
5708  void* m_pUserData;
5709  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5710  uint32_t m_MemoryTypeIndex;
5711  uint8_t m_Type; // ALLOCATION_TYPE
5712  uint8_t m_SuballocationType; // VmaSuballocationType
5713  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5714  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5715  uint8_t m_MapCount;
5716  uint8_t m_Flags; // enum FLAGS
5717 
5718  // Allocation out of VmaDeviceMemoryBlock.
5719  struct BlockAllocation
5720  {
5721  VmaDeviceMemoryBlock* m_Block;
5722  VkDeviceSize m_Offset;
5723  bool m_CanBecomeLost;
5724  };
5725 
5726  // Allocation for an object that has its own private VkDeviceMemory.
5727  struct DedicatedAllocation
5728  {
5729  VkDeviceMemory m_hMemory;
5730  void* m_pMappedData; // Not null means memory is mapped.
5731  };
5732 
5733  union
5734  {
5735  // Allocation out of VmaDeviceMemoryBlock.
5736  BlockAllocation m_BlockAllocation;
5737  // Allocation for an object that has its own private VkDeviceMemory.
5738  DedicatedAllocation m_DedicatedAllocation;
5739  };
5740 
5741 #if VMA_STATS_STRING_ENABLED
5742  uint32_t m_CreationFrameIndex;
5743  uint32_t m_BufferImageUsage; // 0 if unknown.
5744 #endif
5745 
5746  void FreeUserDataString(VmaAllocator hAllocator);
5747 };
5748 
5749 /*
5750 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5751 allocated memory block or free.
5752 */
5753 struct VmaSuballocation
5754 {
5755  VkDeviceSize offset;
5756  VkDeviceSize size;
5757  VmaAllocation hAllocation;
5758  VmaSuballocationType type;
5759 };
5760 
5761 // Comparator for offsets.
5762 struct VmaSuballocationOffsetLess
5763 {
5764  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5765  {
5766  return lhs.offset < rhs.offset;
5767  }
5768 };
5769 struct VmaSuballocationOffsetGreater
5770 {
5771  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5772  {
5773  return lhs.offset > rhs.offset;
5774  }
5775 };
5776 
5777 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5778 
5779 // Cost of one additional allocation lost, as equivalent in bytes.
5780 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5781 
5782 enum class VmaAllocationRequestType
5783 {
5784  Normal,
5785  // Used by "Linear" algorithm.
5786  UpperAddress,
5787  EndOf1st,
5788  EndOf2nd,
5789 };
5790 
5791 /*
5792 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5793 
5794 If canMakeOtherLost was false:
5795 - item points to a FREE suballocation.
5796 - itemsToMakeLostCount is 0.
5797 
5798 If canMakeOtherLost was true:
5799 - item points to first of sequence of suballocations, which are either FREE,
5800  or point to VmaAllocations that can become lost.
5801 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5802  the requested allocation to succeed.
5803 */
5804 struct VmaAllocationRequest
5805 {
5806  VkDeviceSize offset;
5807  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5808  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5809  VmaSuballocationList::iterator item;
5810  size_t itemsToMakeLostCount;
5811  void* customData;
5812  VmaAllocationRequestType type;
5813 
5814  VkDeviceSize CalcCost() const
5815  {
5816  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5817  }
5818 };
5819 
5820 /*
5821 Data structure used for bookkeeping of allocations and unused ranges of memory
5822 in a single VkDeviceMemory block.
5823 */
5824 class VmaBlockMetadata
5825 {
5826 public:
5827  VmaBlockMetadata(VmaAllocator hAllocator);
5828  virtual ~VmaBlockMetadata() { }
5829  virtual void Init(VkDeviceSize size) { m_Size = size; }
5830 
5831  // Validates all data structures inside this object. If not valid, returns false.
5832  virtual bool Validate() const = 0;
5833  VkDeviceSize GetSize() const { return m_Size; }
5834  virtual size_t GetAllocationCount() const = 0;
5835  virtual VkDeviceSize GetSumFreeSize() const = 0;
5836  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5837  // Returns true if this block is empty - contains only single free suballocation.
5838  virtual bool IsEmpty() const = 0;
5839 
5840  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5841  // Shouldn't modify blockCount.
5842  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5843 
5844 #if VMA_STATS_STRING_ENABLED
5845  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5846 #endif
5847 
5848  // Tries to find a place for suballocation with given parameters inside this block.
5849  // If succeeded, fills pAllocationRequest and returns true.
5850  // If failed, returns false.
5851  virtual bool CreateAllocationRequest(
5852  uint32_t currentFrameIndex,
5853  uint32_t frameInUseCount,
5854  VkDeviceSize bufferImageGranularity,
5855  VkDeviceSize allocSize,
5856  VkDeviceSize allocAlignment,
5857  bool upperAddress,
5858  VmaSuballocationType allocType,
5859  bool canMakeOtherLost,
5860  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5861  uint32_t strategy,
5862  VmaAllocationRequest* pAllocationRequest) = 0;
5863 
5864  virtual bool MakeRequestedAllocationsLost(
5865  uint32_t currentFrameIndex,
5866  uint32_t frameInUseCount,
5867  VmaAllocationRequest* pAllocationRequest) = 0;
5868 
5869  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5870 
5871  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5872 
5873  // Makes actual allocation based on request. Request must already be checked and valid.
5874  virtual void Alloc(
5875  const VmaAllocationRequest& request,
5876  VmaSuballocationType type,
5877  VkDeviceSize allocSize,
5878  VmaAllocation hAllocation) = 0;
5879 
5880  // Frees suballocation assigned to given memory region.
5881  virtual void Free(const VmaAllocation allocation) = 0;
5882  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5883 
5884 protected:
5885  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5886 
5887 #if VMA_STATS_STRING_ENABLED
5888  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5889  VkDeviceSize unusedBytes,
5890  size_t allocationCount,
5891  size_t unusedRangeCount) const;
5892  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5893  VkDeviceSize offset,
5894  VmaAllocation hAllocation) const;
5895  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5896  VkDeviceSize offset,
5897  VkDeviceSize size) const;
5898  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5899 #endif
5900 
5901 private:
5902  VkDeviceSize m_Size;
5903  const VkAllocationCallbacks* m_pAllocationCallbacks;
5904 };
5905 
5906 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5907  VMA_ASSERT(0 && "Validation failed: " #cond); \
5908  return false; \
5909  } } while(false)
5910 
5911 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5912 {
5913  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5914 public:
5915  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5916  virtual ~VmaBlockMetadata_Generic();
5917  virtual void Init(VkDeviceSize size);
5918 
5919  virtual bool Validate() const;
5920  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5921  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5922  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5923  virtual bool IsEmpty() const;
5924 
5925  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5926  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5927 
5928 #if VMA_STATS_STRING_ENABLED
5929  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5930 #endif
5931 
5932  virtual bool CreateAllocationRequest(
5933  uint32_t currentFrameIndex,
5934  uint32_t frameInUseCount,
5935  VkDeviceSize bufferImageGranularity,
5936  VkDeviceSize allocSize,
5937  VkDeviceSize allocAlignment,
5938  bool upperAddress,
5939  VmaSuballocationType allocType,
5940  bool canMakeOtherLost,
5941  uint32_t strategy,
5942  VmaAllocationRequest* pAllocationRequest);
5943 
5944  virtual bool MakeRequestedAllocationsLost(
5945  uint32_t currentFrameIndex,
5946  uint32_t frameInUseCount,
5947  VmaAllocationRequest* pAllocationRequest);
5948 
5949  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5950 
5951  virtual VkResult CheckCorruption(const void* pBlockData);
5952 
5953  virtual void Alloc(
5954  const VmaAllocationRequest& request,
5955  VmaSuballocationType type,
5956  VkDeviceSize allocSize,
5957  VmaAllocation hAllocation);
5958 
5959  virtual void Free(const VmaAllocation allocation);
5960  virtual void FreeAtOffset(VkDeviceSize offset);
5961 
5963  // For defragmentation
5964 
5965  bool IsBufferImageGranularityConflictPossible(
5966  VkDeviceSize bufferImageGranularity,
5967  VmaSuballocationType& inOutPrevSuballocType) const;
5968 
5969 private:
5970  friend class VmaDefragmentationAlgorithm_Generic;
5971  friend class VmaDefragmentationAlgorithm_Fast;
5972 
5973  uint32_t m_FreeCount;
5974  VkDeviceSize m_SumFreeSize;
5975  VmaSuballocationList m_Suballocations;
5976  // Suballocations that are free and have size greater than certain threshold.
5977  // Sorted by size, ascending.
5978  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5979 
5980  bool ValidateFreeSuballocationList() const;
5981 
5982  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5983  // If yes, fills pOffset and returns true. If no, returns false.
5984  bool CheckAllocation(
5985  uint32_t currentFrameIndex,
5986  uint32_t frameInUseCount,
5987  VkDeviceSize bufferImageGranularity,
5988  VkDeviceSize allocSize,
5989  VkDeviceSize allocAlignment,
5990  VmaSuballocationType allocType,
5991  VmaSuballocationList::const_iterator suballocItem,
5992  bool canMakeOtherLost,
5993  VkDeviceSize* pOffset,
5994  size_t* itemsToMakeLostCount,
5995  VkDeviceSize* pSumFreeSize,
5996  VkDeviceSize* pSumItemSize) const;
5997  // Given free suballocation, it merges it with following one, which must also be free.
5998  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5999  // Releases given suballocation, making it free.
6000  // Merges it with adjacent free suballocations if applicable.
6001  // Returns iterator to new free suballocation at this place.
6002  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6003  // Given free suballocation, it inserts it into sorted list of
6004  // m_FreeSuballocationsBySize if it's suitable.
6005  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6006  // Given free suballocation, it removes it from sorted list of
6007  // m_FreeSuballocationsBySize if it's suitable.
6008  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6009 };
6010 
6011 /*
6012 Allocations and their references in internal data structure look like this:
6013 
6014 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6015 
6016  0 +-------+
6017  | |
6018  | |
6019  | |
6020  +-------+
6021  | Alloc | 1st[m_1stNullItemsBeginCount]
6022  +-------+
6023  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6024  +-------+
6025  | ... |
6026  +-------+
6027  | Alloc | 1st[1st.size() - 1]
6028  +-------+
6029  | |
6030  | |
6031  | |
6032 GetSize() +-------+
6033 
6034 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6035 
6036  0 +-------+
6037  | Alloc | 2nd[0]
6038  +-------+
6039  | Alloc | 2nd[1]
6040  +-------+
6041  | ... |
6042  +-------+
6043  | Alloc | 2nd[2nd.size() - 1]
6044  +-------+
6045  | |
6046  | |
6047  | |
6048  +-------+
6049  | Alloc | 1st[m_1stNullItemsBeginCount]
6050  +-------+
6051  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6052  +-------+
6053  | ... |
6054  +-------+
6055  | Alloc | 1st[1st.size() - 1]
6056  +-------+
6057  | |
6058 GetSize() +-------+
6059 
6060 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6061 
6062  0 +-------+
6063  | |
6064  | |
6065  | |
6066  +-------+
6067  | Alloc | 1st[m_1stNullItemsBeginCount]
6068  +-------+
6069  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6070  +-------+
6071  | ... |
6072  +-------+
6073  | Alloc | 1st[1st.size() - 1]
6074  +-------+
6075  | |
6076  | |
6077  | |
6078  +-------+
6079  | Alloc | 2nd[2nd.size() - 1]
6080  +-------+
6081  | ... |
6082  +-------+
6083  | Alloc | 2nd[1]
6084  +-------+
6085  | Alloc | 2nd[0]
6086 GetSize() +-------+
6087 
6088 */
6089 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6090 {
6091  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6092 public:
6093  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6094  virtual ~VmaBlockMetadata_Linear();
6095  virtual void Init(VkDeviceSize size);
6096 
6097  virtual bool Validate() const;
6098  virtual size_t GetAllocationCount() const;
6099  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6100  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6101  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6102 
6103  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6104  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6105 
6106 #if VMA_STATS_STRING_ENABLED
6107  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6108 #endif
6109 
6110  virtual bool CreateAllocationRequest(
6111  uint32_t currentFrameIndex,
6112  uint32_t frameInUseCount,
6113  VkDeviceSize bufferImageGranularity,
6114  VkDeviceSize allocSize,
6115  VkDeviceSize allocAlignment,
6116  bool upperAddress,
6117  VmaSuballocationType allocType,
6118  bool canMakeOtherLost,
6119  uint32_t strategy,
6120  VmaAllocationRequest* pAllocationRequest);
6121 
6122  virtual bool MakeRequestedAllocationsLost(
6123  uint32_t currentFrameIndex,
6124  uint32_t frameInUseCount,
6125  VmaAllocationRequest* pAllocationRequest);
6126 
6127  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6128 
6129  virtual VkResult CheckCorruption(const void* pBlockData);
6130 
6131  virtual void Alloc(
6132  const VmaAllocationRequest& request,
6133  VmaSuballocationType type,
6134  VkDeviceSize allocSize,
6135  VmaAllocation hAllocation);
6136 
6137  virtual void Free(const VmaAllocation allocation);
6138  virtual void FreeAtOffset(VkDeviceSize offset);
6139 
6140 private:
6141  /*
6142  There are two suballocation vectors, used in ping-pong way.
6143  The one with index m_1stVectorIndex is called 1st.
6144  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6145  2nd can be non-empty only when 1st is not empty.
6146  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6147  */
6148  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6149 
6150  enum SECOND_VECTOR_MODE
6151  {
6152  SECOND_VECTOR_EMPTY,
6153  /*
6154  Suballocations in 2nd vector are created later than the ones in 1st, but they
6155  all have smaller offset.
6156  */
6157  SECOND_VECTOR_RING_BUFFER,
6158  /*
6159  Suballocations in 2nd vector are upper side of double stack.
6160  They all have offsets higher than those in 1st vector.
6161  Top of this stack means smaller offsets, but higher indices in this vector.
6162  */
6163  SECOND_VECTOR_DOUBLE_STACK,
6164  };
6165 
6166  VkDeviceSize m_SumFreeSize;
6167  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6168  uint32_t m_1stVectorIndex;
6169  SECOND_VECTOR_MODE m_2ndVectorMode;
6170 
6171  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6172  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6173  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6174  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6175 
6176  // Number of items in 1st vector with hAllocation = null at the beginning.
6177  size_t m_1stNullItemsBeginCount;
6178  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6179  size_t m_1stNullItemsMiddleCount;
6180  // Number of items in 2nd vector with hAllocation = null.
6181  size_t m_2ndNullItemsCount;
6182 
6183  bool ShouldCompact1st() const;
6184  void CleanupAfterFree();
6185 
6186  bool CreateAllocationRequest_LowerAddress(
6187  uint32_t currentFrameIndex,
6188  uint32_t frameInUseCount,
6189  VkDeviceSize bufferImageGranularity,
6190  VkDeviceSize allocSize,
6191  VkDeviceSize allocAlignment,
6192  VmaSuballocationType allocType,
6193  bool canMakeOtherLost,
6194  uint32_t strategy,
6195  VmaAllocationRequest* pAllocationRequest);
6196  bool CreateAllocationRequest_UpperAddress(
6197  uint32_t currentFrameIndex,
6198  uint32_t frameInUseCount,
6199  VkDeviceSize bufferImageGranularity,
6200  VkDeviceSize allocSize,
6201  VkDeviceSize allocAlignment,
6202  VmaSuballocationType allocType,
6203  bool canMakeOtherLost,
6204  uint32_t strategy,
6205  VmaAllocationRequest* pAllocationRequest);
6206 };
6207 
6208 /*
6209 - GetSize() is the original size of allocated memory block.
6210 - m_UsableSize is this size aligned down to a power of two.
6211  All allocations and calculations happen relative to m_UsableSize.
6212 - GetUnusableSize() is the difference between them.
6213  It is repoted as separate, unused range, not available for allocations.
6214 
6215 Node at level 0 has size = m_UsableSize.
6216 Each next level contains nodes with size 2 times smaller than current level.
6217 m_LevelCount is the maximum number of levels to use in the current object.
6218 */
6219 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6220 {
6221  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6222 public:
6223  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6224  virtual ~VmaBlockMetadata_Buddy();
6225  virtual void Init(VkDeviceSize size);
6226 
6227  virtual bool Validate() const;
6228  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6229  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6230  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6231  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6232 
6233  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6234  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6235 
6236 #if VMA_STATS_STRING_ENABLED
6237  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6238 #endif
6239 
6240  virtual bool CreateAllocationRequest(
6241  uint32_t currentFrameIndex,
6242  uint32_t frameInUseCount,
6243  VkDeviceSize bufferImageGranularity,
6244  VkDeviceSize allocSize,
6245  VkDeviceSize allocAlignment,
6246  bool upperAddress,
6247  VmaSuballocationType allocType,
6248  bool canMakeOtherLost,
6249  uint32_t strategy,
6250  VmaAllocationRequest* pAllocationRequest);
6251 
6252  virtual bool MakeRequestedAllocationsLost(
6253  uint32_t currentFrameIndex,
6254  uint32_t frameInUseCount,
6255  VmaAllocationRequest* pAllocationRequest);
6256 
6257  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6258 
6259  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6260 
6261  virtual void Alloc(
6262  const VmaAllocationRequest& request,
6263  VmaSuballocationType type,
6264  VkDeviceSize allocSize,
6265  VmaAllocation hAllocation);
6266 
6267  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6268  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6269 
6270 private:
6271  static const VkDeviceSize MIN_NODE_SIZE = 32;
6272  static const size_t MAX_LEVELS = 30;
6273 
6274  struct ValidationContext
6275  {
6276  size_t calculatedAllocationCount;
6277  size_t calculatedFreeCount;
6278  VkDeviceSize calculatedSumFreeSize;
6279 
6280  ValidationContext() :
6281  calculatedAllocationCount(0),
6282  calculatedFreeCount(0),
6283  calculatedSumFreeSize(0) { }
6284  };
6285 
6286  struct Node
6287  {
6288  VkDeviceSize offset;
6289  enum TYPE
6290  {
6291  TYPE_FREE,
6292  TYPE_ALLOCATION,
6293  TYPE_SPLIT,
6294  TYPE_COUNT
6295  } type;
6296  Node* parent;
6297  Node* buddy;
6298 
6299  union
6300  {
6301  struct
6302  {
6303  Node* prev;
6304  Node* next;
6305  } free;
6306  struct
6307  {
6308  VmaAllocation alloc;
6309  } allocation;
6310  struct
6311  {
6312  Node* leftChild;
6313  } split;
6314  };
6315  };
6316 
6317  // Size of the memory block aligned down to a power of two.
6318  VkDeviceSize m_UsableSize;
6319  uint32_t m_LevelCount;
6320 
6321  Node* m_Root;
6322  struct {
6323  Node* front;
6324  Node* back;
6325  } m_FreeList[MAX_LEVELS];
6326  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6327  size_t m_AllocationCount;
6328  // Number of nodes in the tree with type == TYPE_FREE.
6329  size_t m_FreeCount;
6330  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6331  VkDeviceSize m_SumFreeSize;
6332 
6333  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6334  void DeleteNode(Node* node);
6335  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6336  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6337  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6338  // Alloc passed just for validation. Can be null.
6339  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6340  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6341  // Adds node to the front of FreeList at given level.
6342  // node->type must be FREE.
6343  // node->free.prev, next can be undefined.
6344  void AddToFreeListFront(uint32_t level, Node* node);
6345  // Removes node from FreeList at given level.
6346  // node->type must be FREE.
6347  // node->free.prev, next stay untouched.
6348  void RemoveFromFreeList(uint32_t level, Node* node);
6349 
6350 #if VMA_STATS_STRING_ENABLED
6351  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6352 #endif
6353 };
6354 
6355 /*
6356 Represents a single block of device memory (`VkDeviceMemory`) with all the
6357 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6358 
6359 Thread-safety: This class must be externally synchronized.
6360 */
6361 class VmaDeviceMemoryBlock
6362 {
6363  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6364 public:
6365  VmaBlockMetadata* m_pMetadata;
6366 
6367  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6368 
6369  ~VmaDeviceMemoryBlock()
6370  {
6371  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6372  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6373  }
6374 
6375  // Always call after construction.
6376  void Init(
6377  VmaAllocator hAllocator,
6378  VmaPool hParentPool,
6379  uint32_t newMemoryTypeIndex,
6380  VkDeviceMemory newMemory,
6381  VkDeviceSize newSize,
6382  uint32_t id,
6383  uint32_t algorithm);
6384  // Always call before destruction.
6385  void Destroy(VmaAllocator allocator);
6386 
6387  VmaPool GetParentPool() const { return m_hParentPool; }
6388  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6389  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6390  uint32_t GetId() const { return m_Id; }
6391  void* GetMappedData() const { return m_pMappedData; }
6392 
6393  // Validates all data structures inside this object. If not valid, returns false.
6394  bool Validate() const;
6395 
6396  VkResult CheckCorruption(VmaAllocator hAllocator);
6397 
6398  // ppData can be null.
6399  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6400  void Unmap(VmaAllocator hAllocator, uint32_t count);
6401 
6402  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6403  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6404 
6405  VkResult BindBufferMemory(
6406  const VmaAllocator hAllocator,
6407  const VmaAllocation hAllocation,
6408  VkDeviceSize allocationLocalOffset,
6409  VkBuffer hBuffer,
6410  const void* pNext);
6411  VkResult BindImageMemory(
6412  const VmaAllocator hAllocator,
6413  const VmaAllocation hAllocation,
6414  VkDeviceSize allocationLocalOffset,
6415  VkImage hImage,
6416  const void* pNext);
6417 
6418 private:
6419  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6420  uint32_t m_MemoryTypeIndex;
6421  uint32_t m_Id;
6422  VkDeviceMemory m_hMemory;
6423 
6424  /*
6425  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6426  Also protects m_MapCount, m_pMappedData.
6427  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6428  */
6429  VMA_MUTEX m_Mutex;
6430  uint32_t m_MapCount;
6431  void* m_pMappedData;
6432 };
6433 
6434 struct VmaPointerLess
6435 {
6436  bool operator()(const void* lhs, const void* rhs) const
6437  {
6438  return lhs < rhs;
6439  }
6440 };
6441 
6442 struct VmaDefragmentationMove
6443 {
6444  size_t srcBlockIndex;
6445  size_t dstBlockIndex;
6446  VkDeviceSize srcOffset;
6447  VkDeviceSize dstOffset;
6448  VkDeviceSize size;
6449  VmaAllocation hAllocation;
6450  VmaDeviceMemoryBlock* pSrcBlock;
6451  VmaDeviceMemoryBlock* pDstBlock;
6452 };
6453 
6454 class VmaDefragmentationAlgorithm;
6455 
6456 /*
6457 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6458 Vulkan memory type.
6459 
6460 Synchronized internally with a mutex.
6461 */
6462 struct VmaBlockVector
6463 {
6464  VMA_CLASS_NO_COPY(VmaBlockVector)
6465 public:
6466  VmaBlockVector(
6467  VmaAllocator hAllocator,
6468  VmaPool hParentPool,
6469  uint32_t memoryTypeIndex,
6470  VkDeviceSize preferredBlockSize,
6471  size_t minBlockCount,
6472  size_t maxBlockCount,
6473  VkDeviceSize bufferImageGranularity,
6474  uint32_t frameInUseCount,
6475  bool explicitBlockSize,
6476  uint32_t algorithm);
6477  ~VmaBlockVector();
6478 
6479  VkResult CreateMinBlocks();
6480 
6481  VmaAllocator GetAllocator() const { return m_hAllocator; }
6482  VmaPool GetParentPool() const { return m_hParentPool; }
6483  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6484  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6485  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6486  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6487  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6488  uint32_t GetAlgorithm() const { return m_Algorithm; }
6489 
6490  void GetPoolStats(VmaPoolStats* pStats);
6491 
6492  bool IsEmpty();
6493  bool IsCorruptionDetectionEnabled() const;
6494 
6495  VkResult Allocate(
6496  uint32_t currentFrameIndex,
6497  VkDeviceSize size,
6498  VkDeviceSize alignment,
6499  const VmaAllocationCreateInfo& createInfo,
6500  VmaSuballocationType suballocType,
6501  size_t allocationCount,
6502  VmaAllocation* pAllocations);
6503 
6504  void Free(const VmaAllocation hAllocation);
6505 
6506  // Adds statistics of this BlockVector to pStats.
6507  void AddStats(VmaStats* pStats);
6508 
6509 #if VMA_STATS_STRING_ENABLED
6510  void PrintDetailedMap(class VmaJsonWriter& json);
6511 #endif
6512 
6513  void MakePoolAllocationsLost(
6514  uint32_t currentFrameIndex,
6515  size_t* pLostAllocationCount);
6516  VkResult CheckCorruption();
6517 
6518  // Saves results in pCtx->res.
6519  void Defragment(
6520  class VmaBlockVectorDefragmentationContext* pCtx,
6522  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6523  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6524  VkCommandBuffer commandBuffer);
6525  void DefragmentationEnd(
6526  class VmaBlockVectorDefragmentationContext* pCtx,
6527  VmaDefragmentationStats* pStats);
6528 
6529  uint32_t ProcessDefragmentations(
6530  class VmaBlockVectorDefragmentationContext *pCtx,
6531  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6532 
6533  void CommitDefragmentations(
6534  class VmaBlockVectorDefragmentationContext *pCtx,
6535  VmaDefragmentationStats* pStats);
6536 
6538  // To be used only while the m_Mutex is locked. Used during defragmentation.
6539 
6540  size_t GetBlockCount() const { return m_Blocks.size(); }
6541  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6542  size_t CalcAllocationCount() const;
6543  bool IsBufferImageGranularityConflictPossible() const;
6544 
6545 private:
6546  friend class VmaDefragmentationAlgorithm_Generic;
6547 
6548  const VmaAllocator m_hAllocator;
6549  const VmaPool m_hParentPool;
6550  const uint32_t m_MemoryTypeIndex;
6551  const VkDeviceSize m_PreferredBlockSize;
6552  const size_t m_MinBlockCount;
6553  const size_t m_MaxBlockCount;
6554  const VkDeviceSize m_BufferImageGranularity;
6555  const uint32_t m_FrameInUseCount;
6556  const bool m_ExplicitBlockSize;
6557  const uint32_t m_Algorithm;
6558  VMA_RW_MUTEX m_Mutex;
6559 
6560  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6561  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6562  bool m_HasEmptyBlock;
6563  // Incrementally sorted by sumFreeSize, ascending.
6564  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6565  uint32_t m_NextBlockId;
6566 
6567  VkDeviceSize CalcMaxBlockSize() const;
6568 
6569  // Finds and removes given block from vector.
6570  void Remove(VmaDeviceMemoryBlock* pBlock);
6571 
6572  // Performs single step in sorting m_Blocks. They may not be fully sorted
6573  // after this call.
6574  void IncrementallySortBlocks();
6575 
6576  VkResult AllocatePage(
6577  uint32_t currentFrameIndex,
6578  VkDeviceSize size,
6579  VkDeviceSize alignment,
6580  const VmaAllocationCreateInfo& createInfo,
6581  VmaSuballocationType suballocType,
6582  VmaAllocation* pAllocation);
6583 
6584  // To be used only without CAN_MAKE_OTHER_LOST flag.
6585  VkResult AllocateFromBlock(
6586  VmaDeviceMemoryBlock* pBlock,
6587  uint32_t currentFrameIndex,
6588  VkDeviceSize size,
6589  VkDeviceSize alignment,
6590  VmaAllocationCreateFlags allocFlags,
6591  void* pUserData,
6592  VmaSuballocationType suballocType,
6593  uint32_t strategy,
6594  VmaAllocation* pAllocation);
6595 
6596  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6597 
6598  // Saves result to pCtx->res.
6599  void ApplyDefragmentationMovesCpu(
6600  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6601  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6602  // Saves result to pCtx->res.
6603  void ApplyDefragmentationMovesGpu(
6604  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6605  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6606  VkCommandBuffer commandBuffer);
6607 
6608  /*
6609  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6610  - updated with new data.
6611  */
6612  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6613 
6614  void UpdateHasEmptyBlock();
6615 };
6616 
6617 struct VmaPool_T
6618 {
6619  VMA_CLASS_NO_COPY(VmaPool_T)
6620 public:
6621  VmaBlockVector m_BlockVector;
6622 
6623  VmaPool_T(
6624  VmaAllocator hAllocator,
6625  const VmaPoolCreateInfo& createInfo,
6626  VkDeviceSize preferredBlockSize);
6627  ~VmaPool_T();
6628 
6629  uint32_t GetId() const { return m_Id; }
6630  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6631 
6632  const char* GetName() const { return m_Name; }
6633  void SetName(const char* pName);
6634 
6635 #if VMA_STATS_STRING_ENABLED
6636  //void PrintDetailedMap(class VmaStringBuilder& sb);
6637 #endif
6638 
6639 private:
6640  uint32_t m_Id;
6641  char* m_Name;
6642 };
6643 
6644 /*
6645 Performs defragmentation:
6646 
6647 - Updates `pBlockVector->m_pMetadata`.
6648 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6649 - Does not move actual data, only returns requested moves as `moves`.
6650 */
6651 class VmaDefragmentationAlgorithm
6652 {
6653  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6654 public:
6655  VmaDefragmentationAlgorithm(
6656  VmaAllocator hAllocator,
6657  VmaBlockVector* pBlockVector,
6658  uint32_t currentFrameIndex) :
6659  m_hAllocator(hAllocator),
6660  m_pBlockVector(pBlockVector),
6661  m_CurrentFrameIndex(currentFrameIndex)
6662  {
6663  }
6664  virtual ~VmaDefragmentationAlgorithm()
6665  {
6666  }
6667 
6668  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6669  virtual void AddAll() = 0;
6670 
6671  virtual VkResult Defragment(
6672  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6673  VkDeviceSize maxBytesToMove,
6674  uint32_t maxAllocationsToMove,
6675  VmaDefragmentationFlags flags) = 0;
6676 
6677  virtual VkDeviceSize GetBytesMoved() const = 0;
6678  virtual uint32_t GetAllocationsMoved() const = 0;
6679 
6680 protected:
6681  VmaAllocator const m_hAllocator;
6682  VmaBlockVector* const m_pBlockVector;
6683  const uint32_t m_CurrentFrameIndex;
6684 
6685  struct AllocationInfo
6686  {
6687  VmaAllocation m_hAllocation;
6688  VkBool32* m_pChanged;
6689 
6690  AllocationInfo() :
6691  m_hAllocation(VK_NULL_HANDLE),
6692  m_pChanged(VMA_NULL)
6693  {
6694  }
6695  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6696  m_hAllocation(hAlloc),
6697  m_pChanged(pChanged)
6698  {
6699  }
6700  };
6701 };
6702 
6703 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6704 {
6705  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6706 public:
6707  VmaDefragmentationAlgorithm_Generic(
6708  VmaAllocator hAllocator,
6709  VmaBlockVector* pBlockVector,
6710  uint32_t currentFrameIndex,
6711  bool overlappingMoveSupported);
6712  virtual ~VmaDefragmentationAlgorithm_Generic();
6713 
6714  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6715  virtual void AddAll() { m_AllAllocations = true; }
6716 
6717  virtual VkResult Defragment(
6718  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6719  VkDeviceSize maxBytesToMove,
6720  uint32_t maxAllocationsToMove,
6721  VmaDefragmentationFlags flags);
6722 
6723  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6724  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6725 
6726 private:
6727  uint32_t m_AllocationCount;
6728  bool m_AllAllocations;
6729 
6730  VkDeviceSize m_BytesMoved;
6731  uint32_t m_AllocationsMoved;
6732 
6733  struct AllocationInfoSizeGreater
6734  {
6735  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6736  {
6737  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6738  }
6739  };
6740 
6741  struct AllocationInfoOffsetGreater
6742  {
6743  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6744  {
6745  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6746  }
6747  };
6748 
6749  struct BlockInfo
6750  {
6751  size_t m_OriginalBlockIndex;
6752  VmaDeviceMemoryBlock* m_pBlock;
6753  bool m_HasNonMovableAllocations;
6754  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6755 
6756  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6757  m_OriginalBlockIndex(SIZE_MAX),
6758  m_pBlock(VMA_NULL),
6759  m_HasNonMovableAllocations(true),
6760  m_Allocations(pAllocationCallbacks)
6761  {
6762  }
6763 
6764  void CalcHasNonMovableAllocations()
6765  {
6766  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6767  const size_t defragmentAllocCount = m_Allocations.size();
6768  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6769  }
6770 
6771  void SortAllocationsBySizeDescending()
6772  {
6773  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6774  }
6775 
6776  void SortAllocationsByOffsetDescending()
6777  {
6778  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6779  }
6780  };
6781 
6782  struct BlockPointerLess
6783  {
6784  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6785  {
6786  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6787  }
6788  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6789  {
6790  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6791  }
6792  };
6793 
6794  // 1. Blocks with some non-movable allocations go first.
6795  // 2. Blocks with smaller sumFreeSize go first.
6796  struct BlockInfoCompareMoveDestination
6797  {
6798  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6799  {
6800  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6801  {
6802  return true;
6803  }
6804  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6805  {
6806  return false;
6807  }
6808  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6809  {
6810  return true;
6811  }
6812  return false;
6813  }
6814  };
6815 
6816  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6817  BlockInfoVector m_Blocks;
6818 
6819  VkResult DefragmentRound(
6820  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6821  VkDeviceSize maxBytesToMove,
6822  uint32_t maxAllocationsToMove,
6823  bool freeOldAllocations);
6824 
6825  size_t CalcBlocksWithNonMovableCount() const;
6826 
6827  static bool MoveMakesSense(
6828  size_t dstBlockIndex, VkDeviceSize dstOffset,
6829  size_t srcBlockIndex, VkDeviceSize srcOffset);
6830 };
6831 
6832 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6833 {
6834  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6835 public:
6836  VmaDefragmentationAlgorithm_Fast(
6837  VmaAllocator hAllocator,
6838  VmaBlockVector* pBlockVector,
6839  uint32_t currentFrameIndex,
6840  bool overlappingMoveSupported);
6841  virtual ~VmaDefragmentationAlgorithm_Fast();
6842 
6843  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6844  virtual void AddAll() { m_AllAllocations = true; }
6845 
6846  virtual VkResult Defragment(
6847  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6848  VkDeviceSize maxBytesToMove,
6849  uint32_t maxAllocationsToMove,
6850  VmaDefragmentationFlags flags);
6851 
6852  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6853  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6854 
6855 private:
6856  struct BlockInfo
6857  {
6858  size_t origBlockIndex;
6859  };
6860 
6861  class FreeSpaceDatabase
6862  {
6863  public:
6864  FreeSpaceDatabase()
6865  {
6866  FreeSpace s = {};
6867  s.blockInfoIndex = SIZE_MAX;
6868  for(size_t i = 0; i < MAX_COUNT; ++i)
6869  {
6870  m_FreeSpaces[i] = s;
6871  }
6872  }
6873 
6874  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6875  {
6876  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6877  {
6878  return;
6879  }
6880 
6881  // Find first invalid or the smallest structure.
6882  size_t bestIndex = SIZE_MAX;
6883  for(size_t i = 0; i < MAX_COUNT; ++i)
6884  {
6885  // Empty structure.
6886  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6887  {
6888  bestIndex = i;
6889  break;
6890  }
6891  if(m_FreeSpaces[i].size < size &&
6892  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6893  {
6894  bestIndex = i;
6895  }
6896  }
6897 
6898  if(bestIndex != SIZE_MAX)
6899  {
6900  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6901  m_FreeSpaces[bestIndex].offset = offset;
6902  m_FreeSpaces[bestIndex].size = size;
6903  }
6904  }
6905 
6906  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6907  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6908  {
6909  size_t bestIndex = SIZE_MAX;
6910  VkDeviceSize bestFreeSpaceAfter = 0;
6911  for(size_t i = 0; i < MAX_COUNT; ++i)
6912  {
6913  // Structure is valid.
6914  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6915  {
6916  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6917  // Allocation fits into this structure.
6918  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6919  {
6920  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6921  (dstOffset + size);
6922  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6923  {
6924  bestIndex = i;
6925  bestFreeSpaceAfter = freeSpaceAfter;
6926  }
6927  }
6928  }
6929  }
6930 
6931  if(bestIndex != SIZE_MAX)
6932  {
6933  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6934  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6935 
6936  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6937  {
6938  // Leave this structure for remaining empty space.
6939  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6940  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6941  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6942  }
6943  else
6944  {
6945  // This structure becomes invalid.
6946  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6947  }
6948 
6949  return true;
6950  }
6951 
6952  return false;
6953  }
6954 
6955  private:
6956  static const size_t MAX_COUNT = 4;
6957 
6958  struct FreeSpace
6959  {
6960  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6961  VkDeviceSize offset;
6962  VkDeviceSize size;
6963  } m_FreeSpaces[MAX_COUNT];
6964  };
6965 
6966  const bool m_OverlappingMoveSupported;
6967 
6968  uint32_t m_AllocationCount;
6969  bool m_AllAllocations;
6970 
6971  VkDeviceSize m_BytesMoved;
6972  uint32_t m_AllocationsMoved;
6973 
6974  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6975 
6976  void PreprocessMetadata();
6977  void PostprocessMetadata();
6978  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6979 };
6980 
6981 struct VmaBlockDefragmentationContext
6982 {
6983  enum BLOCK_FLAG
6984  {
6985  BLOCK_FLAG_USED = 0x00000001,
6986  };
6987  uint32_t flags;
6988  VkBuffer hBuffer;
6989 };
6990 
6991 class VmaBlockVectorDefragmentationContext
6992 {
6993  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6994 public:
6995  VkResult res;
6996  bool mutexLocked;
6997  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6998  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
6999  uint32_t defragmentationMovesProcessed;
7000  uint32_t defragmentationMovesCommitted;
7001  bool hasDefragmentationPlan;
7002 
7003  VmaBlockVectorDefragmentationContext(
7004  VmaAllocator hAllocator,
7005  VmaPool hCustomPool, // Optional.
7006  VmaBlockVector* pBlockVector,
7007  uint32_t currFrameIndex);
7008  ~VmaBlockVectorDefragmentationContext();
7009 
7010  VmaPool GetCustomPool() const { return m_hCustomPool; }
7011  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7012  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7013 
7014  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7015  void AddAll() { m_AllAllocations = true; }
7016 
7017  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7018 
7019 private:
7020  const VmaAllocator m_hAllocator;
7021  // Null if not from custom pool.
7022  const VmaPool m_hCustomPool;
7023  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7024  VmaBlockVector* const m_pBlockVector;
7025  const uint32_t m_CurrFrameIndex;
7026  // Owner of this object.
7027  VmaDefragmentationAlgorithm* m_pAlgorithm;
7028 
7029  struct AllocInfo
7030  {
7031  VmaAllocation hAlloc;
7032  VkBool32* pChanged;
7033  };
7034  // Used between constructor and Begin.
7035  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7036  bool m_AllAllocations;
7037 };
7038 
7039 struct VmaDefragmentationContext_T
7040 {
7041 private:
7042  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7043 public:
7044  VmaDefragmentationContext_T(
7045  VmaAllocator hAllocator,
7046  uint32_t currFrameIndex,
7047  uint32_t flags,
7048  VmaDefragmentationStats* pStats);
7049  ~VmaDefragmentationContext_T();
7050 
7051  void AddPools(uint32_t poolCount, VmaPool* pPools);
7052  void AddAllocations(
7053  uint32_t allocationCount,
7054  VmaAllocation* pAllocations,
7055  VkBool32* pAllocationsChanged);
7056 
7057  /*
7058  Returns:
7059  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7060  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7061  - Negative value if error occured and object can be destroyed immediately.
7062  */
7063  VkResult Defragment(
7064  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7065  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7066  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7067 
7068  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7069  VkResult DefragmentPassEnd();
7070 
7071 private:
7072  const VmaAllocator m_hAllocator;
7073  const uint32_t m_CurrFrameIndex;
7074  const uint32_t m_Flags;
7075  VmaDefragmentationStats* const m_pStats;
7076 
7077  VkDeviceSize m_MaxCpuBytesToMove;
7078  uint32_t m_MaxCpuAllocationsToMove;
7079  VkDeviceSize m_MaxGpuBytesToMove;
7080  uint32_t m_MaxGpuAllocationsToMove;
7081 
7082  // Owner of these objects.
7083  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7084  // Owner of these objects.
7085  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7086 };
7087 
7088 #if VMA_RECORDING_ENABLED
7089 
7090 class VmaRecorder
7091 {
7092 public:
7093  VmaRecorder();
7094  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7095  void WriteConfiguration(
7096  const VkPhysicalDeviceProperties& devProps,
7097  const VkPhysicalDeviceMemoryProperties& memProps,
7098  uint32_t vulkanApiVersion,
7099  bool dedicatedAllocationExtensionEnabled,
7100  bool bindMemory2ExtensionEnabled,
7101  bool memoryBudgetExtensionEnabled,
7102  bool deviceCoherentMemoryExtensionEnabled);
7103  ~VmaRecorder();
7104 
7105  void RecordCreateAllocator(uint32_t frameIndex);
7106  void RecordDestroyAllocator(uint32_t frameIndex);
7107  void RecordCreatePool(uint32_t frameIndex,
7108  const VmaPoolCreateInfo& createInfo,
7109  VmaPool pool);
7110  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7111  void RecordAllocateMemory(uint32_t frameIndex,
7112  const VkMemoryRequirements& vkMemReq,
7113  const VmaAllocationCreateInfo& createInfo,
7114  VmaAllocation allocation);
7115  void RecordAllocateMemoryPages(uint32_t frameIndex,
7116  const VkMemoryRequirements& vkMemReq,
7117  const VmaAllocationCreateInfo& createInfo,
7118  uint64_t allocationCount,
7119  const VmaAllocation* pAllocations);
7120  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7121  const VkMemoryRequirements& vkMemReq,
7122  bool requiresDedicatedAllocation,
7123  bool prefersDedicatedAllocation,
7124  const VmaAllocationCreateInfo& createInfo,
7125  VmaAllocation allocation);
7126  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7127  const VkMemoryRequirements& vkMemReq,
7128  bool requiresDedicatedAllocation,
7129  bool prefersDedicatedAllocation,
7130  const VmaAllocationCreateInfo& createInfo,
7131  VmaAllocation allocation);
7132  void RecordFreeMemory(uint32_t frameIndex,
7133  VmaAllocation allocation);
7134  void RecordFreeMemoryPages(uint32_t frameIndex,
7135  uint64_t allocationCount,
7136  const VmaAllocation* pAllocations);
7137  void RecordSetAllocationUserData(uint32_t frameIndex,
7138  VmaAllocation allocation,
7139  const void* pUserData);
7140  void RecordCreateLostAllocation(uint32_t frameIndex,
7141  VmaAllocation allocation);
7142  void RecordMapMemory(uint32_t frameIndex,
7143  VmaAllocation allocation);
7144  void RecordUnmapMemory(uint32_t frameIndex,
7145  VmaAllocation allocation);
7146  void RecordFlushAllocation(uint32_t frameIndex,
7147  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7148  void RecordInvalidateAllocation(uint32_t frameIndex,
7149  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7150  void RecordCreateBuffer(uint32_t frameIndex,
7151  const VkBufferCreateInfo& bufCreateInfo,
7152  const VmaAllocationCreateInfo& allocCreateInfo,
7153  VmaAllocation allocation);
7154  void RecordCreateImage(uint32_t frameIndex,
7155  const VkImageCreateInfo& imageCreateInfo,
7156  const VmaAllocationCreateInfo& allocCreateInfo,
7157  VmaAllocation allocation);
7158  void RecordDestroyBuffer(uint32_t frameIndex,
7159  VmaAllocation allocation);
7160  void RecordDestroyImage(uint32_t frameIndex,
7161  VmaAllocation allocation);
7162  void RecordTouchAllocation(uint32_t frameIndex,
7163  VmaAllocation allocation);
7164  void RecordGetAllocationInfo(uint32_t frameIndex,
7165  VmaAllocation allocation);
7166  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7167  VmaPool pool);
7168  void RecordDefragmentationBegin(uint32_t frameIndex,
7169  const VmaDefragmentationInfo2& info,
7171  void RecordDefragmentationEnd(uint32_t frameIndex,
7173  void RecordSetPoolName(uint32_t frameIndex,
7174  VmaPool pool,
7175  const char* name);
7176 
7177 private:
7178  struct CallParams
7179  {
7180  uint32_t threadId;
7181  double time;
7182  };
7183 
7184  class UserDataString
7185  {
7186  public:
7187  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7188  const char* GetString() const { return m_Str; }
7189 
7190  private:
7191  char m_PtrStr[17];
7192  const char* m_Str;
7193  };
7194 
7195  bool m_UseMutex;
7196  VmaRecordFlags m_Flags;
7197  FILE* m_File;
7198  VMA_MUTEX m_FileMutex;
7199  int64_t m_Freq;
7200  int64_t m_StartCounter;
7201 
7202  void GetBasicParams(CallParams& outParams);
7203 
7204  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7205  template<typename T>
7206  void PrintPointerList(uint64_t count, const T* pItems)
7207  {
7208  if(count)
7209  {
7210  fprintf(m_File, "%p", pItems[0]);
7211  for(uint64_t i = 1; i < count; ++i)
7212  {
7213  fprintf(m_File, " %p", pItems[i]);
7214  }
7215  }
7216  }
7217 
7218  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7219  void Flush();
7220 };
7221 
7222 #endif // #if VMA_RECORDING_ENABLED
7223 
7224 /*
7225 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7226 */
7227 class VmaAllocationObjectAllocator
7228 {
7229  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7230 public:
7231  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7232 
7233  template<typename... Types> VmaAllocation Allocate(Types... args);
7234  void Free(VmaAllocation hAlloc);
7235 
7236 private:
7237  VMA_MUTEX m_Mutex;
7238  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7239 };
7240 
7241 struct VmaCurrentBudgetData
7242 {
7243  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7244  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7245 
7246 #if VMA_MEMORY_BUDGET
7247  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7248  VMA_RW_MUTEX m_BudgetMutex;
7249  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7250  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7251  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7252 #endif // #if VMA_MEMORY_BUDGET
7253 
7254  VmaCurrentBudgetData()
7255  {
7256  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7257  {
7258  m_BlockBytes[heapIndex] = 0;
7259  m_AllocationBytes[heapIndex] = 0;
7260 #if VMA_MEMORY_BUDGET
7261  m_VulkanUsage[heapIndex] = 0;
7262  m_VulkanBudget[heapIndex] = 0;
7263  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7264 #endif
7265  }
7266 
7267 #if VMA_MEMORY_BUDGET
7268  m_OperationsSinceBudgetFetch = 0;
7269 #endif
7270  }
7271 
7272  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7273  {
7274  m_AllocationBytes[heapIndex] += allocationSize;
7275 #if VMA_MEMORY_BUDGET
7276  ++m_OperationsSinceBudgetFetch;
7277 #endif
7278  }
7279 
7280  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7281  {
7282  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7283  m_AllocationBytes[heapIndex] -= allocationSize;
7284 #if VMA_MEMORY_BUDGET
7285  ++m_OperationsSinceBudgetFetch;
7286 #endif
7287  }
7288 };
7289 
7290 // Main allocator object.
7291 struct VmaAllocator_T
7292 {
7293  VMA_CLASS_NO_COPY(VmaAllocator_T)
7294 public:
7295  bool m_UseMutex;
7296  uint32_t m_VulkanApiVersion;
7297  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7298  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7299  bool m_UseExtMemoryBudget;
7300  bool m_UseAmdDeviceCoherentMemory;
7301  bool m_UseKhrBufferDeviceAddress;
7302  VkDevice m_hDevice;
7303  VkInstance m_hInstance;
7304  bool m_AllocationCallbacksSpecified;
7305  VkAllocationCallbacks m_AllocationCallbacks;
7306  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7307  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7308 
7309  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7310  uint32_t m_HeapSizeLimitMask;
7311 
7312  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7313  VkPhysicalDeviceMemoryProperties m_MemProps;
7314 
7315  // Default pools.
7316  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7317 
7318  // Each vector is sorted by memory (handle value).
7319  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7320  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7321  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7322 
7323  VmaCurrentBudgetData m_Budget;
7324 
7325  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7326  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7327  ~VmaAllocator_T();
7328 
7329  const VkAllocationCallbacks* GetAllocationCallbacks() const
7330  {
7331  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7332  }
7333  const VmaVulkanFunctions& GetVulkanFunctions() const
7334  {
7335  return m_VulkanFunctions;
7336  }
7337 
7338  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7339 
7340  VkDeviceSize GetBufferImageGranularity() const
7341  {
7342  return VMA_MAX(
7343  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7344  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7345  }
7346 
7347  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7348  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7349 
7350  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7351  {
7352  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7353  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7354  }
7355  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7356  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7357  {
7358  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7359  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7360  }
7361  // Minimum alignment for all allocations in specific memory type.
7362  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7363  {
7364  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7365  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7366  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7367  }
7368 
7369  bool IsIntegratedGpu() const
7370  {
7371  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7372  }
7373 
7374  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7375 
7376 #if VMA_RECORDING_ENABLED
7377  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7378 #endif
7379 
7380  void GetBufferMemoryRequirements(
7381  VkBuffer hBuffer,
7382  VkMemoryRequirements& memReq,
7383  bool& requiresDedicatedAllocation,
7384  bool& prefersDedicatedAllocation) const;
7385  void GetImageMemoryRequirements(
7386  VkImage hImage,
7387  VkMemoryRequirements& memReq,
7388  bool& requiresDedicatedAllocation,
7389  bool& prefersDedicatedAllocation) const;
7390 
7391  // Main allocation function.
7392  VkResult AllocateMemory(
7393  const VkMemoryRequirements& vkMemReq,
7394  bool requiresDedicatedAllocation,
7395  bool prefersDedicatedAllocation,
7396  VkBuffer dedicatedBuffer,
7397  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7398  VkImage dedicatedImage,
7399  const VmaAllocationCreateInfo& createInfo,
7400  VmaSuballocationType suballocType,
7401  size_t allocationCount,
7402  VmaAllocation* pAllocations);
7403 
7404  // Main deallocation function.
7405  void FreeMemory(
7406  size_t allocationCount,
7407  const VmaAllocation* pAllocations);
7408 
7409  VkResult ResizeAllocation(
7410  const VmaAllocation alloc,
7411  VkDeviceSize newSize);
7412 
7413  void CalculateStats(VmaStats* pStats);
7414 
7415  void GetBudget(
7416  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7417 
7418 #if VMA_STATS_STRING_ENABLED
7419  void PrintDetailedMap(class VmaJsonWriter& json);
7420 #endif
7421 
7422  VkResult DefragmentationBegin(
7423  const VmaDefragmentationInfo2& info,
7424  VmaDefragmentationStats* pStats,
7425  VmaDefragmentationContext* pContext);
7426  VkResult DefragmentationEnd(
7427  VmaDefragmentationContext context);
7428 
7429  VkResult DefragmentationPassBegin(
7431  VmaDefragmentationContext context);
7432  VkResult DefragmentationPassEnd(
7433  VmaDefragmentationContext context);
7434 
7435  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7436  bool TouchAllocation(VmaAllocation hAllocation);
7437 
7438  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7439  void DestroyPool(VmaPool pool);
7440  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7441 
7442  void SetCurrentFrameIndex(uint32_t frameIndex);
7443  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7444 
7445  void MakePoolAllocationsLost(
7446  VmaPool hPool,
7447  size_t* pLostAllocationCount);
7448  VkResult CheckPoolCorruption(VmaPool hPool);
7449  VkResult CheckCorruption(uint32_t memoryTypeBits);
7450 
7451  void CreateLostAllocation(VmaAllocation* pAllocation);
7452 
7453  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7454  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7455  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7456  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7457  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7458  VkResult BindVulkanBuffer(
7459  VkDeviceMemory memory,
7460  VkDeviceSize memoryOffset,
7461  VkBuffer buffer,
7462  const void* pNext);
7463  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7464  VkResult BindVulkanImage(
7465  VkDeviceMemory memory,
7466  VkDeviceSize memoryOffset,
7467  VkImage image,
7468  const void* pNext);
7469 
7470  VkResult Map(VmaAllocation hAllocation, void** ppData);
7471  void Unmap(VmaAllocation hAllocation);
7472 
7473  VkResult BindBufferMemory(
7474  VmaAllocation hAllocation,
7475  VkDeviceSize allocationLocalOffset,
7476  VkBuffer hBuffer,
7477  const void* pNext);
7478  VkResult BindImageMemory(
7479  VmaAllocation hAllocation,
7480  VkDeviceSize allocationLocalOffset,
7481  VkImage hImage,
7482  const void* pNext);
7483 
7484  void FlushOrInvalidateAllocation(
7485  VmaAllocation hAllocation,
7486  VkDeviceSize offset, VkDeviceSize size,
7487  VMA_CACHE_OPERATION op);
7488 
7489  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7490 
7491  /*
7492  Returns bit mask of memory types that can support defragmentation on GPU as
7493  they support creation of required buffer for copy operations.
7494  */
7495  uint32_t GetGpuDefragmentationMemoryTypeBits();
7496 
7497 private:
7498  VkDeviceSize m_PreferredLargeHeapBlockSize;
7499 
7500  VkPhysicalDevice m_PhysicalDevice;
7501  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7502  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7503 
7504  VMA_RW_MUTEX m_PoolsMutex;
7505  // Protected by m_PoolsMutex. Sorted by pointer value.
7506  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7507  uint32_t m_NextPoolId;
7508 
7509  VmaVulkanFunctions m_VulkanFunctions;
7510 
7511  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7512  uint32_t m_GlobalMemoryTypeBits;
7513 
7514 #if VMA_RECORDING_ENABLED
7515  VmaRecorder* m_pRecorder;
7516 #endif
7517 
7518  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7519 
7520  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7521 
7522  VkResult AllocateMemoryOfType(
7523  VkDeviceSize size,
7524  VkDeviceSize alignment,
7525  bool dedicatedAllocation,
7526  VkBuffer dedicatedBuffer,
7527  VkBufferUsageFlags dedicatedBufferUsage,
7528  VkImage dedicatedImage,
7529  const VmaAllocationCreateInfo& createInfo,
7530  uint32_t memTypeIndex,
7531  VmaSuballocationType suballocType,
7532  size_t allocationCount,
7533  VmaAllocation* pAllocations);
7534 
7535  // Helper function only to be used inside AllocateDedicatedMemory.
7536  VkResult AllocateDedicatedMemoryPage(
7537  VkDeviceSize size,
7538  VmaSuballocationType suballocType,
7539  uint32_t memTypeIndex,
7540  const VkMemoryAllocateInfo& allocInfo,
7541  bool map,
7542  bool isUserDataString,
7543  void* pUserData,
7544  VmaAllocation* pAllocation);
7545 
7546  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7547  VkResult AllocateDedicatedMemory(
7548  VkDeviceSize size,
7549  VmaSuballocationType suballocType,
7550  uint32_t memTypeIndex,
7551  bool withinBudget,
7552  bool map,
7553  bool isUserDataString,
7554  void* pUserData,
7555  VkBuffer dedicatedBuffer,
7556  VkBufferUsageFlags dedicatedBufferUsage,
7557  VkImage dedicatedImage,
7558  size_t allocationCount,
7559  VmaAllocation* pAllocations);
7560 
7561  void FreeDedicatedMemory(const VmaAllocation allocation);
7562 
7563  /*
7564  Calculates and returns bit mask of memory types that can support defragmentation
7565  on GPU as they support creation of required buffer for copy operations.
7566  */
7567  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7568 
7569  uint32_t CalculateGlobalMemoryTypeBits() const;
7570 
7571 #if VMA_MEMORY_BUDGET
7572  void UpdateVulkanBudget();
7573 #endif // #if VMA_MEMORY_BUDGET
7574 };
7575 
7577 // Memory allocation #2 after VmaAllocator_T definition
7578 
7579 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7580 {
7581  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7582 }
7583 
7584 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7585 {
7586  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7587 }
7588 
7589 template<typename T>
7590 static T* VmaAllocate(VmaAllocator hAllocator)
7591 {
7592  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7593 }
7594 
7595 template<typename T>
7596 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7597 {
7598  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7599 }
7600 
7601 template<typename T>
7602 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7603 {
7604  if(ptr != VMA_NULL)
7605  {
7606  ptr->~T();
7607  VmaFree(hAllocator, ptr);
7608  }
7609 }
7610 
7611 template<typename T>
7612 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7613 {
7614  if(ptr != VMA_NULL)
7615  {
7616  for(size_t i = count; i--; )
7617  ptr[i].~T();
7618  VmaFree(hAllocator, ptr);
7619  }
7620 }
7621 
7623 // VmaStringBuilder
7624 
7625 #if VMA_STATS_STRING_ENABLED
7626 
7627 class VmaStringBuilder
7628 {
7629 public:
7630  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7631  size_t GetLength() const { return m_Data.size(); }
7632  const char* GetData() const { return m_Data.data(); }
7633 
7634  void Add(char ch) { m_Data.push_back(ch); }
7635  void Add(const char* pStr);
7636  void AddNewLine() { Add('\n'); }
7637  void AddNumber(uint32_t num);
7638  void AddNumber(uint64_t num);
7639  void AddPointer(const void* ptr);
7640 
7641 private:
7642  VmaVector< char, VmaStlAllocator<char> > m_Data;
7643 };
7644 
7645 void VmaStringBuilder::Add(const char* pStr)
7646 {
7647  const size_t strLen = strlen(pStr);
7648  if(strLen > 0)
7649  {
7650  const size_t oldCount = m_Data.size();
7651  m_Data.resize(oldCount + strLen);
7652  memcpy(m_Data.data() + oldCount, pStr, strLen);
7653  }
7654 }
7655 
7656 void VmaStringBuilder::AddNumber(uint32_t num)
7657 {
7658  char buf[11];
7659  buf[10] = '\0';
7660  char *p = &buf[10];
7661  do
7662  {
7663  *--p = '0' + (num % 10);
7664  num /= 10;
7665  }
7666  while(num);
7667  Add(p);
7668 }
7669 
7670 void VmaStringBuilder::AddNumber(uint64_t num)
7671 {
7672  char buf[21];
7673  buf[20] = '\0';
7674  char *p = &buf[20];
7675  do
7676  {
7677  *--p = '0' + (num % 10);
7678  num /= 10;
7679  }
7680  while(num);
7681  Add(p);
7682 }
7683 
7684 void VmaStringBuilder::AddPointer(const void* ptr)
7685 {
7686  char buf[21];
7687  VmaPtrToStr(buf, sizeof(buf), ptr);
7688  Add(buf);
7689 }
7690 
7691 #endif // #if VMA_STATS_STRING_ENABLED
7692 
7694 // VmaJsonWriter
7695 
7696 #if VMA_STATS_STRING_ENABLED
7697 
7698 class VmaJsonWriter
7699 {
7700  VMA_CLASS_NO_COPY(VmaJsonWriter)
7701 public:
7702  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7703  ~VmaJsonWriter();
7704 
7705  void BeginObject(bool singleLine = false);
7706  void EndObject();
7707 
7708  void BeginArray(bool singleLine = false);
7709  void EndArray();
7710 
7711  void WriteString(const char* pStr);
7712  void BeginString(const char* pStr = VMA_NULL);
7713  void ContinueString(const char* pStr);
7714  void ContinueString(uint32_t n);
7715  void ContinueString(uint64_t n);
7716  void ContinueString_Pointer(const void* ptr);
7717  void EndString(const char* pStr = VMA_NULL);
7718 
7719  void WriteNumber(uint32_t n);
7720  void WriteNumber(uint64_t n);
7721  void WriteBool(bool b);
7722  void WriteNull();
7723 
7724 private:
7725  static const char* const INDENT;
7726 
7727  enum COLLECTION_TYPE
7728  {
7729  COLLECTION_TYPE_OBJECT,
7730  COLLECTION_TYPE_ARRAY,
7731  };
7732  struct StackItem
7733  {
7734  COLLECTION_TYPE type;
7735  uint32_t valueCount;
7736  bool singleLineMode;
7737  };
7738 
7739  VmaStringBuilder& m_SB;
7740  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7741  bool m_InsideString;
7742 
7743  void BeginValue(bool isString);
7744  void WriteIndent(bool oneLess = false);
7745 };
7746 
7747 const char* const VmaJsonWriter::INDENT = " ";
7748 
7749 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7750  m_SB(sb),
7751  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7752  m_InsideString(false)
7753 {
7754 }
7755 
7756 VmaJsonWriter::~VmaJsonWriter()
7757 {
7758  VMA_ASSERT(!m_InsideString);
7759  VMA_ASSERT(m_Stack.empty());
7760 }
7761 
7762 void VmaJsonWriter::BeginObject(bool singleLine)
7763 {
7764  VMA_ASSERT(!m_InsideString);
7765 
7766  BeginValue(false);
7767  m_SB.Add('{');
7768 
7769  StackItem item;
7770  item.type = COLLECTION_TYPE_OBJECT;
7771  item.valueCount = 0;
7772  item.singleLineMode = singleLine;
7773  m_Stack.push_back(item);
7774 }
7775 
7776 void VmaJsonWriter::EndObject()
7777 {
7778  VMA_ASSERT(!m_InsideString);
7779 
7780  WriteIndent(true);
7781  m_SB.Add('}');
7782 
7783  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7784  m_Stack.pop_back();
7785 }
7786 
7787 void VmaJsonWriter::BeginArray(bool singleLine)
7788 {
7789  VMA_ASSERT(!m_InsideString);
7790 
7791  BeginValue(false);
7792  m_SB.Add('[');
7793 
7794  StackItem item;
7795  item.type = COLLECTION_TYPE_ARRAY;
7796  item.valueCount = 0;
7797  item.singleLineMode = singleLine;
7798  m_Stack.push_back(item);
7799 }
7800 
7801 void VmaJsonWriter::EndArray()
7802 {
7803  VMA_ASSERT(!m_InsideString);
7804 
7805  WriteIndent(true);
7806  m_SB.Add(']');
7807 
7808  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7809  m_Stack.pop_back();
7810 }
7811 
7812 void VmaJsonWriter::WriteString(const char* pStr)
7813 {
7814  BeginString(pStr);
7815  EndString();
7816 }
7817 
7818 void VmaJsonWriter::BeginString(const char* pStr)
7819 {
7820  VMA_ASSERT(!m_InsideString);
7821 
7822  BeginValue(true);
7823  m_SB.Add('"');
7824  m_InsideString = true;
7825  if(pStr != VMA_NULL && pStr[0] != '\0')
7826  {
7827  ContinueString(pStr);
7828  }
7829 }
7830 
7831 void VmaJsonWriter::ContinueString(const char* pStr)
7832 {
7833  VMA_ASSERT(m_InsideString);
7834 
7835  const size_t strLen = strlen(pStr);
7836  for(size_t i = 0; i < strLen; ++i)
7837  {
7838  char ch = pStr[i];
7839  if(ch == '\\')
7840  {
7841  m_SB.Add("\\\\");
7842  }
7843  else if(ch == '"')
7844  {
7845  m_SB.Add("\\\"");
7846  }
7847  else if(ch >= 32)
7848  {
7849  m_SB.Add(ch);
7850  }
7851  else switch(ch)
7852  {
7853  case '\b':
7854  m_SB.Add("\\b");
7855  break;
7856  case '\f':
7857  m_SB.Add("\\f");
7858  break;
7859  case '\n':
7860  m_SB.Add("\\n");
7861  break;
7862  case '\r':
7863  m_SB.Add("\\r");
7864  break;
7865  case '\t':
7866  m_SB.Add("\\t");
7867  break;
7868  default:
7869  VMA_ASSERT(0 && "Character not currently supported.");
7870  break;
7871  }
7872  }
7873 }
7874 
7875 void VmaJsonWriter::ContinueString(uint32_t n)
7876 {
7877  VMA_ASSERT(m_InsideString);
7878  m_SB.AddNumber(n);
7879 }
7880 
7881 void VmaJsonWriter::ContinueString(uint64_t n)
7882 {
7883  VMA_ASSERT(m_InsideString);
7884  m_SB.AddNumber(n);
7885 }
7886 
7887 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7888 {
7889  VMA_ASSERT(m_InsideString);
7890  m_SB.AddPointer(ptr);
7891 }
7892 
7893 void VmaJsonWriter::EndString(const char* pStr)
7894 {
7895  VMA_ASSERT(m_InsideString);
7896  if(pStr != VMA_NULL && pStr[0] != '\0')
7897  {
7898  ContinueString(pStr);
7899  }
7900  m_SB.Add('"');
7901  m_InsideString = false;
7902 }
7903 
7904 void VmaJsonWriter::WriteNumber(uint32_t n)
7905 {
7906  VMA_ASSERT(!m_InsideString);
7907  BeginValue(false);
7908  m_SB.AddNumber(n);
7909 }
7910 
7911 void VmaJsonWriter::WriteNumber(uint64_t n)
7912 {
7913  VMA_ASSERT(!m_InsideString);
7914  BeginValue(false);
7915  m_SB.AddNumber(n);
7916 }
7917 
7918 void VmaJsonWriter::WriteBool(bool b)
7919 {
7920  VMA_ASSERT(!m_InsideString);
7921  BeginValue(false);
7922  m_SB.Add(b ? "true" : "false");
7923 }
7924 
7925 void VmaJsonWriter::WriteNull()
7926 {
7927  VMA_ASSERT(!m_InsideString);
7928  BeginValue(false);
7929  m_SB.Add("null");
7930 }
7931 
7932 void VmaJsonWriter::BeginValue(bool isString)
7933 {
7934  if(!m_Stack.empty())
7935  {
7936  StackItem& currItem = m_Stack.back();
7937  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7938  currItem.valueCount % 2 == 0)
7939  {
7940  VMA_ASSERT(isString);
7941  }
7942 
7943  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7944  currItem.valueCount % 2 != 0)
7945  {
7946  m_SB.Add(": ");
7947  }
7948  else if(currItem.valueCount > 0)
7949  {
7950  m_SB.Add(", ");
7951  WriteIndent();
7952  }
7953  else
7954  {
7955  WriteIndent();
7956  }
7957  ++currItem.valueCount;
7958  }
7959 }
7960 
7961 void VmaJsonWriter::WriteIndent(bool oneLess)
7962 {
7963  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7964  {
7965  m_SB.AddNewLine();
7966 
7967  size_t count = m_Stack.size();
7968  if(count > 0 && oneLess)
7969  {
7970  --count;
7971  }
7972  for(size_t i = 0; i < count; ++i)
7973  {
7974  m_SB.Add(INDENT);
7975  }
7976  }
7977 }
7978 
7979 #endif // #if VMA_STATS_STRING_ENABLED
7980 
7982 
7983 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7984 {
7985  if(IsUserDataString())
7986  {
7987  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7988 
7989  FreeUserDataString(hAllocator);
7990 
7991  if(pUserData != VMA_NULL)
7992  {
7993  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
7994  }
7995  }
7996  else
7997  {
7998  m_pUserData = pUserData;
7999  }
8000 }
8001 
8002 void VmaAllocation_T::ChangeBlockAllocation(
8003  VmaAllocator hAllocator,
8004  VmaDeviceMemoryBlock* block,
8005  VkDeviceSize offset)
8006 {
8007  VMA_ASSERT(block != VMA_NULL);
8008  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8009 
8010  // Move mapping reference counter from old block to new block.
8011  if(block != m_BlockAllocation.m_Block)
8012  {
8013  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8014  if(IsPersistentMap())
8015  ++mapRefCount;
8016  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8017  block->Map(hAllocator, mapRefCount, VMA_NULL);
8018  }
8019 
8020  m_BlockAllocation.m_Block = block;
8021  m_BlockAllocation.m_Offset = offset;
8022 }
8023 
8024 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8025 {
8026  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8027  m_BlockAllocation.m_Offset = newOffset;
8028 }
8029 
8030 VkDeviceSize VmaAllocation_T::GetOffset() const
8031 {
8032  switch(m_Type)
8033  {
8034  case ALLOCATION_TYPE_BLOCK:
8035  return m_BlockAllocation.m_Offset;
8036  case ALLOCATION_TYPE_DEDICATED:
8037  return 0;
8038  default:
8039  VMA_ASSERT(0);
8040  return 0;
8041  }
8042 }
8043 
8044 VkDeviceMemory VmaAllocation_T::GetMemory() const
8045 {
8046  switch(m_Type)
8047  {
8048  case ALLOCATION_TYPE_BLOCK:
8049  return m_BlockAllocation.m_Block->GetDeviceMemory();
8050  case ALLOCATION_TYPE_DEDICATED:
8051  return m_DedicatedAllocation.m_hMemory;
8052  default:
8053  VMA_ASSERT(0);
8054  return VK_NULL_HANDLE;
8055  }
8056 }
8057 
8058 void* VmaAllocation_T::GetMappedData() const
8059 {
8060  switch(m_Type)
8061  {
8062  case ALLOCATION_TYPE_BLOCK:
8063  if(m_MapCount != 0)
8064  {
8065  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8066  VMA_ASSERT(pBlockData != VMA_NULL);
8067  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8068  }
8069  else
8070  {
8071  return VMA_NULL;
8072  }
8073  break;
8074  case ALLOCATION_TYPE_DEDICATED:
8075  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8076  return m_DedicatedAllocation.m_pMappedData;
8077  default:
8078  VMA_ASSERT(0);
8079  return VMA_NULL;
8080  }
8081 }
8082 
8083 bool VmaAllocation_T::CanBecomeLost() const
8084 {
8085  switch(m_Type)
8086  {
8087  case ALLOCATION_TYPE_BLOCK:
8088  return m_BlockAllocation.m_CanBecomeLost;
8089  case ALLOCATION_TYPE_DEDICATED:
8090  return false;
8091  default:
8092  VMA_ASSERT(0);
8093  return false;
8094  }
8095 }
8096 
8097 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8098 {
8099  VMA_ASSERT(CanBecomeLost());
8100 
8101  /*
8102  Warning: This is a carefully designed algorithm.
8103  Do not modify unless you really know what you're doing :)
8104  */
8105  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8106  for(;;)
8107  {
8108  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8109  {
8110  VMA_ASSERT(0);
8111  return false;
8112  }
8113  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8114  {
8115  return false;
8116  }
8117  else // Last use time earlier than current time.
8118  {
8119  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8120  {
8121  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8122  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8123  return true;
8124  }
8125  }
8126  }
8127 }
8128 
8129 #if VMA_STATS_STRING_ENABLED
8130 
8131 // Correspond to values of enum VmaSuballocationType.
8132 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8133  "FREE",
8134  "UNKNOWN",
8135  "BUFFER",
8136  "IMAGE_UNKNOWN",
8137  "IMAGE_LINEAR",
8138  "IMAGE_OPTIMAL",
8139 };
8140 
8141 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8142 {
8143  json.WriteString("Type");
8144  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8145 
8146  json.WriteString("Size");
8147  json.WriteNumber(m_Size);
8148 
8149  if(m_pUserData != VMA_NULL)
8150  {
8151  json.WriteString("UserData");
8152  if(IsUserDataString())
8153  {
8154  json.WriteString((const char*)m_pUserData);
8155  }
8156  else
8157  {
8158  json.BeginString();
8159  json.ContinueString_Pointer(m_pUserData);
8160  json.EndString();
8161  }
8162  }
8163 
8164  json.WriteString("CreationFrameIndex");
8165  json.WriteNumber(m_CreationFrameIndex);
8166 
8167  json.WriteString("LastUseFrameIndex");
8168  json.WriteNumber(GetLastUseFrameIndex());
8169 
8170  if(m_BufferImageUsage != 0)
8171  {
8172  json.WriteString("Usage");
8173  json.WriteNumber(m_BufferImageUsage);
8174  }
8175 }
8176 
8177 #endif
8178 
8179 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8180 {
8181  VMA_ASSERT(IsUserDataString());
8182  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8183  m_pUserData = VMA_NULL;
8184 }
8185 
8186 void VmaAllocation_T::BlockAllocMap()
8187 {
8188  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8189 
8190  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8191  {
8192  ++m_MapCount;
8193  }
8194  else
8195  {
8196  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8197  }
8198 }
8199 
8200 void VmaAllocation_T::BlockAllocUnmap()
8201 {
8202  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8203 
8204  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8205  {
8206  --m_MapCount;
8207  }
8208  else
8209  {
8210  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8211  }
8212 }
8213 
8214 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8215 {
8216  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8217 
8218  if(m_MapCount != 0)
8219  {
8220  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8221  {
8222  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8223  *ppData = m_DedicatedAllocation.m_pMappedData;
8224  ++m_MapCount;
8225  return VK_SUCCESS;
8226  }
8227  else
8228  {
8229  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8230  return VK_ERROR_MEMORY_MAP_FAILED;
8231  }
8232  }
8233  else
8234  {
8235  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8236  hAllocator->m_hDevice,
8237  m_DedicatedAllocation.m_hMemory,
8238  0, // offset
8239  VK_WHOLE_SIZE,
8240  0, // flags
8241  ppData);
8242  if(result == VK_SUCCESS)
8243  {
8244  m_DedicatedAllocation.m_pMappedData = *ppData;
8245  m_MapCount = 1;
8246  }
8247  return result;
8248  }
8249 }
8250 
8251 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8252 {
8253  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8254 
8255  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8256  {
8257  --m_MapCount;
8258  if(m_MapCount == 0)
8259  {
8260  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8261  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8262  hAllocator->m_hDevice,
8263  m_DedicatedAllocation.m_hMemory);
8264  }
8265  }
8266  else
8267  {
8268  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8269  }
8270 }
8271 
8272 #if VMA_STATS_STRING_ENABLED
8273 
8274 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8275 {
8276  json.BeginObject();
8277 
8278  json.WriteString("Blocks");
8279  json.WriteNumber(stat.blockCount);
8280 
8281  json.WriteString("Allocations");
8282  json.WriteNumber(stat.allocationCount);
8283 
8284  json.WriteString("UnusedRanges");
8285  json.WriteNumber(stat.unusedRangeCount);
8286 
8287  json.WriteString("UsedBytes");
8288  json.WriteNumber(stat.usedBytes);
8289 
8290  json.WriteString("UnusedBytes");
8291  json.WriteNumber(stat.unusedBytes);
8292 
8293  if(stat.allocationCount > 1)
8294  {
8295  json.WriteString("AllocationSize");
8296  json.BeginObject(true);
8297  json.WriteString("Min");
8298  json.WriteNumber(stat.allocationSizeMin);
8299  json.WriteString("Avg");
8300  json.WriteNumber(stat.allocationSizeAvg);
8301  json.WriteString("Max");
8302  json.WriteNumber(stat.allocationSizeMax);
8303  json.EndObject();
8304  }
8305 
8306  if(stat.unusedRangeCount > 1)
8307  {
8308  json.WriteString("UnusedRangeSize");
8309  json.BeginObject(true);
8310  json.WriteString("Min");
8311  json.WriteNumber(stat.unusedRangeSizeMin);
8312  json.WriteString("Avg");
8313  json.WriteNumber(stat.unusedRangeSizeAvg);
8314  json.WriteString("Max");
8315  json.WriteNumber(stat.unusedRangeSizeMax);
8316  json.EndObject();
8317  }
8318 
8319  json.EndObject();
8320 }
8321 
8322 #endif // #if VMA_STATS_STRING_ENABLED
8323 
8324 struct VmaSuballocationItemSizeLess
8325 {
8326  bool operator()(
8327  const VmaSuballocationList::iterator lhs,
8328  const VmaSuballocationList::iterator rhs) const
8329  {
8330  return lhs->size < rhs->size;
8331  }
8332  bool operator()(
8333  const VmaSuballocationList::iterator lhs,
8334  VkDeviceSize rhsSize) const
8335  {
8336  return lhs->size < rhsSize;
8337  }
8338 };
8339 
8340 
8342 // class VmaBlockMetadata
8343 
8344 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8345  m_Size(0),
8346  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8347 {
8348 }
8349 
8350 #if VMA_STATS_STRING_ENABLED
8351 
8352 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8353  VkDeviceSize unusedBytes,
8354  size_t allocationCount,
8355  size_t unusedRangeCount) const
8356 {
8357  json.BeginObject();
8358 
8359  json.WriteString("TotalBytes");
8360  json.WriteNumber(GetSize());
8361 
8362  json.WriteString("UnusedBytes");
8363  json.WriteNumber(unusedBytes);
8364 
8365  json.WriteString("Allocations");
8366  json.WriteNumber((uint64_t)allocationCount);
8367 
8368  json.WriteString("UnusedRanges");
8369  json.WriteNumber((uint64_t)unusedRangeCount);
8370 
8371  json.WriteString("Suballocations");
8372  json.BeginArray();
8373 }
8374 
8375 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8376  VkDeviceSize offset,
8377  VmaAllocation hAllocation) const
8378 {
8379  json.BeginObject(true);
8380 
8381  json.WriteString("Offset");
8382  json.WriteNumber(offset);
8383 
8384  hAllocation->PrintParameters(json);
8385 
8386  json.EndObject();
8387 }
8388 
8389 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8390  VkDeviceSize offset,
8391  VkDeviceSize size) const
8392 {
8393  json.BeginObject(true);
8394 
8395  json.WriteString("Offset");
8396  json.WriteNumber(offset);
8397 
8398  json.WriteString("Type");
8399  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8400 
8401  json.WriteString("Size");
8402  json.WriteNumber(size);
8403 
8404  json.EndObject();
8405 }
8406 
8407 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8408 {
8409  json.EndArray();
8410  json.EndObject();
8411 }
8412 
8413 #endif // #if VMA_STATS_STRING_ENABLED
8414 
8416 // class VmaBlockMetadata_Generic
8417 
8418 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8419  VmaBlockMetadata(hAllocator),
8420  m_FreeCount(0),
8421  m_SumFreeSize(0),
8422  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8423  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8424 {
8425 }
8426 
8427 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8428 {
8429 }
8430 
8431 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8432 {
8433  VmaBlockMetadata::Init(size);
8434 
8435  m_FreeCount = 1;
8436  m_SumFreeSize = size;
8437 
8438  VmaSuballocation suballoc = {};
8439  suballoc.offset = 0;
8440  suballoc.size = size;
8441  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8442  suballoc.hAllocation = VK_NULL_HANDLE;
8443 
8444  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8445  m_Suballocations.push_back(suballoc);
8446  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8447  --suballocItem;
8448  m_FreeSuballocationsBySize.push_back(suballocItem);
8449 }
8450 
8451 bool VmaBlockMetadata_Generic::Validate() const
8452 {
8453  VMA_VALIDATE(!m_Suballocations.empty());
8454 
8455  // Expected offset of new suballocation as calculated from previous ones.
8456  VkDeviceSize calculatedOffset = 0;
8457  // Expected number of free suballocations as calculated from traversing their list.
8458  uint32_t calculatedFreeCount = 0;
8459  // Expected sum size of free suballocations as calculated from traversing their list.
8460  VkDeviceSize calculatedSumFreeSize = 0;
8461  // Expected number of free suballocations that should be registered in
8462  // m_FreeSuballocationsBySize calculated from traversing their list.
8463  size_t freeSuballocationsToRegister = 0;
8464  // True if previous visited suballocation was free.
8465  bool prevFree = false;
8466 
8467  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8468  suballocItem != m_Suballocations.cend();
8469  ++suballocItem)
8470  {
8471  const VmaSuballocation& subAlloc = *suballocItem;
8472 
8473  // Actual offset of this suballocation doesn't match expected one.
8474  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8475 
8476  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8477  // Two adjacent free suballocations are invalid. They should be merged.
8478  VMA_VALIDATE(!prevFree || !currFree);
8479 
8480  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8481 
8482  if(currFree)
8483  {
8484  calculatedSumFreeSize += subAlloc.size;
8485  ++calculatedFreeCount;
8486  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8487  {
8488  ++freeSuballocationsToRegister;
8489  }
8490 
8491  // Margin required between allocations - every free space must be at least that large.
8492  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8493  }
8494  else
8495  {
8496  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8497  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8498 
8499  // Margin required between allocations - previous allocation must be free.
8500  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8501  }
8502 
8503  calculatedOffset += subAlloc.size;
8504  prevFree = currFree;
8505  }
8506 
8507  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8508  // match expected one.
8509  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8510 
8511  VkDeviceSize lastSize = 0;
8512  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8513  {
8514  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8515 
8516  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8517  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8518  // They must be sorted by size ascending.
8519  VMA_VALIDATE(suballocItem->size >= lastSize);
8520 
8521  lastSize = suballocItem->size;
8522  }
8523 
8524  // Check if totals match calculacted values.
8525  VMA_VALIDATE(ValidateFreeSuballocationList());
8526  VMA_VALIDATE(calculatedOffset == GetSize());
8527  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8528  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8529 
8530  return true;
8531 }
8532 
8533 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8534 {
8535  if(!m_FreeSuballocationsBySize.empty())
8536  {
8537  return m_FreeSuballocationsBySize.back()->size;
8538  }
8539  else
8540  {
8541  return 0;
8542  }
8543 }
8544 
8545 bool VmaBlockMetadata_Generic::IsEmpty() const
8546 {
8547  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8548 }
8549 
8550 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8551 {
8552  outInfo.blockCount = 1;
8553 
8554  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8555  outInfo.allocationCount = rangeCount - m_FreeCount;
8556  outInfo.unusedRangeCount = m_FreeCount;
8557 
8558  outInfo.unusedBytes = m_SumFreeSize;
8559  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8560 
8561  outInfo.allocationSizeMin = UINT64_MAX;
8562  outInfo.allocationSizeMax = 0;
8563  outInfo.unusedRangeSizeMin = UINT64_MAX;
8564  outInfo.unusedRangeSizeMax = 0;
8565 
8566  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8567  suballocItem != m_Suballocations.cend();
8568  ++suballocItem)
8569  {
8570  const VmaSuballocation& suballoc = *suballocItem;
8571  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8572  {
8573  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8574  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8575  }
8576  else
8577  {
8578  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8579  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8580  }
8581  }
8582 }
8583 
8584 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8585 {
8586  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8587 
8588  inoutStats.size += GetSize();
8589  inoutStats.unusedSize += m_SumFreeSize;
8590  inoutStats.allocationCount += rangeCount - m_FreeCount;
8591  inoutStats.unusedRangeCount += m_FreeCount;
8592  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8593 }
8594 
8595 #if VMA_STATS_STRING_ENABLED
8596 
8597 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8598 {
8599  PrintDetailedMap_Begin(json,
8600  m_SumFreeSize, // unusedBytes
8601  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8602  m_FreeCount); // unusedRangeCount
8603 
8604  size_t i = 0;
8605  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8606  suballocItem != m_Suballocations.cend();
8607  ++suballocItem, ++i)
8608  {
8609  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8610  {
8611  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8612  }
8613  else
8614  {
8615  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8616  }
8617  }
8618 
8619  PrintDetailedMap_End(json);
8620 }
8621 
8622 #endif // #if VMA_STATS_STRING_ENABLED
8623 
8624 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8625  uint32_t currentFrameIndex,
8626  uint32_t frameInUseCount,
8627  VkDeviceSize bufferImageGranularity,
8628  VkDeviceSize allocSize,
8629  VkDeviceSize allocAlignment,
8630  bool upperAddress,
8631  VmaSuballocationType allocType,
8632  bool canMakeOtherLost,
8633  uint32_t strategy,
8634  VmaAllocationRequest* pAllocationRequest)
8635 {
8636  VMA_ASSERT(allocSize > 0);
8637  VMA_ASSERT(!upperAddress);
8638  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8639  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8640  VMA_HEAVY_ASSERT(Validate());
8641 
8642  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8643 
8644  // There is not enough total free space in this block to fullfill the request: Early return.
8645  if(canMakeOtherLost == false &&
8646  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8647  {
8648  return false;
8649  }
8650 
8651  // New algorithm, efficiently searching freeSuballocationsBySize.
8652  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8653  if(freeSuballocCount > 0)
8654  {
8656  {
8657  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8658  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8659  m_FreeSuballocationsBySize.data(),
8660  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8661  allocSize + 2 * VMA_DEBUG_MARGIN,
8662  VmaSuballocationItemSizeLess());
8663  size_t index = it - m_FreeSuballocationsBySize.data();
8664  for(; index < freeSuballocCount; ++index)
8665  {
8666  if(CheckAllocation(
8667  currentFrameIndex,
8668  frameInUseCount,
8669  bufferImageGranularity,
8670  allocSize,
8671  allocAlignment,
8672  allocType,
8673  m_FreeSuballocationsBySize[index],
8674  false, // canMakeOtherLost
8675  &pAllocationRequest->offset,
8676  &pAllocationRequest->itemsToMakeLostCount,
8677  &pAllocationRequest->sumFreeSize,
8678  &pAllocationRequest->sumItemSize))
8679  {
8680  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8681  return true;
8682  }
8683  }
8684  }
8685  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8686  {
8687  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8688  it != m_Suballocations.end();
8689  ++it)
8690  {
8691  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8692  currentFrameIndex,
8693  frameInUseCount,
8694  bufferImageGranularity,
8695  allocSize,
8696  allocAlignment,
8697  allocType,
8698  it,
8699  false, // canMakeOtherLost
8700  &pAllocationRequest->offset,
8701  &pAllocationRequest->itemsToMakeLostCount,
8702  &pAllocationRequest->sumFreeSize,
8703  &pAllocationRequest->sumItemSize))
8704  {
8705  pAllocationRequest->item = it;
8706  return true;
8707  }
8708  }
8709  }
8710  else // WORST_FIT, FIRST_FIT
8711  {
8712  // Search staring from biggest suballocations.
8713  for(size_t index = freeSuballocCount; index--; )
8714  {
8715  if(CheckAllocation(
8716  currentFrameIndex,
8717  frameInUseCount,
8718  bufferImageGranularity,
8719  allocSize,
8720  allocAlignment,
8721  allocType,
8722  m_FreeSuballocationsBySize[index],
8723  false, // canMakeOtherLost
8724  &pAllocationRequest->offset,
8725  &pAllocationRequest->itemsToMakeLostCount,
8726  &pAllocationRequest->sumFreeSize,
8727  &pAllocationRequest->sumItemSize))
8728  {
8729  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8730  return true;
8731  }
8732  }
8733  }
8734  }
8735 
8736  if(canMakeOtherLost)
8737  {
8738  // Brute-force algorithm. TODO: Come up with something better.
8739 
8740  bool found = false;
8741  VmaAllocationRequest tmpAllocRequest = {};
8742  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8743  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8744  suballocIt != m_Suballocations.end();
8745  ++suballocIt)
8746  {
8747  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8748  suballocIt->hAllocation->CanBecomeLost())
8749  {
8750  if(CheckAllocation(
8751  currentFrameIndex,
8752  frameInUseCount,
8753  bufferImageGranularity,
8754  allocSize,
8755  allocAlignment,
8756  allocType,
8757  suballocIt,
8758  canMakeOtherLost,
8759  &tmpAllocRequest.offset,
8760  &tmpAllocRequest.itemsToMakeLostCount,
8761  &tmpAllocRequest.sumFreeSize,
8762  &tmpAllocRequest.sumItemSize))
8763  {
8765  {
8766  *pAllocationRequest = tmpAllocRequest;
8767  pAllocationRequest->item = suballocIt;
8768  break;
8769  }
8770  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8771  {
8772  *pAllocationRequest = tmpAllocRequest;
8773  pAllocationRequest->item = suballocIt;
8774  found = true;
8775  }
8776  }
8777  }
8778  }
8779 
8780  return found;
8781  }
8782 
8783  return false;
8784 }
8785 
8786 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8787  uint32_t currentFrameIndex,
8788  uint32_t frameInUseCount,
8789  VmaAllocationRequest* pAllocationRequest)
8790 {
8791  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8792 
8793  while(pAllocationRequest->itemsToMakeLostCount > 0)
8794  {
8795  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8796  {
8797  ++pAllocationRequest->item;
8798  }
8799  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8800  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8801  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8802  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8803  {
8804  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8805  --pAllocationRequest->itemsToMakeLostCount;
8806  }
8807  else
8808  {
8809  return false;
8810  }
8811  }
8812 
8813  VMA_HEAVY_ASSERT(Validate());
8814  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8815  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8816 
8817  return true;
8818 }
8819 
8820 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8821 {
8822  uint32_t lostAllocationCount = 0;
8823  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8824  it != m_Suballocations.end();
8825  ++it)
8826  {
8827  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8828  it->hAllocation->CanBecomeLost() &&
8829  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8830  {
8831  it = FreeSuballocation(it);
8832  ++lostAllocationCount;
8833  }
8834  }
8835  return lostAllocationCount;
8836 }
8837 
8838 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8839 {
8840  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8841  it != m_Suballocations.end();
8842  ++it)
8843  {
8844  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8845  {
8846  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8847  {
8848  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8849  return VK_ERROR_VALIDATION_FAILED_EXT;
8850  }
8851  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8852  {
8853  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8854  return VK_ERROR_VALIDATION_FAILED_EXT;
8855  }
8856  }
8857  }
8858 
8859  return VK_SUCCESS;
8860 }
8861 
8862 void VmaBlockMetadata_Generic::Alloc(
8863  const VmaAllocationRequest& request,
8864  VmaSuballocationType type,
8865  VkDeviceSize allocSize,
8866  VmaAllocation hAllocation)
8867 {
8868  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8869  VMA_ASSERT(request.item != m_Suballocations.end());
8870  VmaSuballocation& suballoc = *request.item;
8871  // Given suballocation is a free block.
8872  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8873  // Given offset is inside this suballocation.
8874  VMA_ASSERT(request.offset >= suballoc.offset);
8875  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8876  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8877  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8878 
8879  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8880  // it to become used.
8881  UnregisterFreeSuballocation(request.item);
8882 
8883  suballoc.offset = request.offset;
8884  suballoc.size = allocSize;
8885  suballoc.type = type;
8886  suballoc.hAllocation = hAllocation;
8887 
8888  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8889  if(paddingEnd)
8890  {
8891  VmaSuballocation paddingSuballoc = {};
8892  paddingSuballoc.offset = request.offset + allocSize;
8893  paddingSuballoc.size = paddingEnd;
8894  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8895  VmaSuballocationList::iterator next = request.item;
8896  ++next;
8897  const VmaSuballocationList::iterator paddingEndItem =
8898  m_Suballocations.insert(next, paddingSuballoc);
8899  RegisterFreeSuballocation(paddingEndItem);
8900  }
8901 
8902  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8903  if(paddingBegin)
8904  {
8905  VmaSuballocation paddingSuballoc = {};
8906  paddingSuballoc.offset = request.offset - paddingBegin;
8907  paddingSuballoc.size = paddingBegin;
8908  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8909  const VmaSuballocationList::iterator paddingBeginItem =
8910  m_Suballocations.insert(request.item, paddingSuballoc);
8911  RegisterFreeSuballocation(paddingBeginItem);
8912  }
8913 
8914  // Update totals.
8915  m_FreeCount = m_FreeCount - 1;
8916  if(paddingBegin > 0)
8917  {
8918  ++m_FreeCount;
8919  }
8920  if(paddingEnd > 0)
8921  {
8922  ++m_FreeCount;
8923  }
8924  m_SumFreeSize -= allocSize;
8925 }
8926 
8927 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8928 {
8929  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8930  suballocItem != m_Suballocations.end();
8931  ++suballocItem)
8932  {
8933  VmaSuballocation& suballoc = *suballocItem;
8934  if(suballoc.hAllocation == allocation)
8935  {
8936  FreeSuballocation(suballocItem);
8937  VMA_HEAVY_ASSERT(Validate());
8938  return;
8939  }
8940  }
8941  VMA_ASSERT(0 && "Not found!");
8942 }
8943 
8944 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8945 {
8946  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8947  suballocItem != m_Suballocations.end();
8948  ++suballocItem)
8949  {
8950  VmaSuballocation& suballoc = *suballocItem;
8951  if(suballoc.offset == offset)
8952  {
8953  FreeSuballocation(suballocItem);
8954  return;
8955  }
8956  }
8957  VMA_ASSERT(0 && "Not found!");
8958 }
8959 
8960 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8961 {
8962  VkDeviceSize lastSize = 0;
8963  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8964  {
8965  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8966 
8967  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8968  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8969  VMA_VALIDATE(it->size >= lastSize);
8970  lastSize = it->size;
8971  }
8972  return true;
8973 }
8974 
8975 bool VmaBlockMetadata_Generic::CheckAllocation(
8976  uint32_t currentFrameIndex,
8977  uint32_t frameInUseCount,
8978  VkDeviceSize bufferImageGranularity,
8979  VkDeviceSize allocSize,
8980  VkDeviceSize allocAlignment,
8981  VmaSuballocationType allocType,
8982  VmaSuballocationList::const_iterator suballocItem,
8983  bool canMakeOtherLost,
8984  VkDeviceSize* pOffset,
8985  size_t* itemsToMakeLostCount,
8986  VkDeviceSize* pSumFreeSize,
8987  VkDeviceSize* pSumItemSize) const
8988 {
8989  VMA_ASSERT(allocSize > 0);
8990  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8991  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8992  VMA_ASSERT(pOffset != VMA_NULL);
8993 
8994  *itemsToMakeLostCount = 0;
8995  *pSumFreeSize = 0;
8996  *pSumItemSize = 0;
8997 
8998  if(canMakeOtherLost)
8999  {
9000  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9001  {
9002  *pSumFreeSize = suballocItem->size;
9003  }
9004  else
9005  {
9006  if(suballocItem->hAllocation->CanBecomeLost() &&
9007  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9008  {
9009  ++*itemsToMakeLostCount;
9010  *pSumItemSize = suballocItem->size;
9011  }
9012  else
9013  {
9014  return false;
9015  }
9016  }
9017 
9018  // Remaining size is too small for this request: Early return.
9019  if(GetSize() - suballocItem->offset < allocSize)
9020  {
9021  return false;
9022  }
9023 
9024  // Start from offset equal to beginning of this suballocation.
9025  *pOffset = suballocItem->offset;
9026 
9027  // Apply VMA_DEBUG_MARGIN at the beginning.
9028  if(VMA_DEBUG_MARGIN > 0)
9029  {
9030  *pOffset += VMA_DEBUG_MARGIN;
9031  }
9032 
9033  // Apply alignment.
9034  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9035 
9036  // Check previous suballocations for BufferImageGranularity conflicts.
9037  // Make bigger alignment if necessary.
9038  if(bufferImageGranularity > 1)
9039  {
9040  bool bufferImageGranularityConflict = false;
9041  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9042  while(prevSuballocItem != m_Suballocations.cbegin())
9043  {
9044  --prevSuballocItem;
9045  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9046  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9047  {
9048  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9049  {
9050  bufferImageGranularityConflict = true;
9051  break;
9052  }
9053  }
9054  else
9055  // Already on previous page.
9056  break;
9057  }
9058  if(bufferImageGranularityConflict)
9059  {
9060  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9061  }
9062  }
9063 
9064  // Now that we have final *pOffset, check if we are past suballocItem.
9065  // If yes, return false - this function should be called for another suballocItem as starting point.
9066  if(*pOffset >= suballocItem->offset + suballocItem->size)
9067  {
9068  return false;
9069  }
9070 
9071  // Calculate padding at the beginning based on current offset.
9072  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9073 
9074  // Calculate required margin at the end.
9075  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9076 
9077  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9078  // Another early return check.
9079  if(suballocItem->offset + totalSize > GetSize())
9080  {
9081  return false;
9082  }
9083 
9084  // Advance lastSuballocItem until desired size is reached.
9085  // Update itemsToMakeLostCount.
9086  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9087  if(totalSize > suballocItem->size)
9088  {
9089  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9090  while(remainingSize > 0)
9091  {
9092  ++lastSuballocItem;
9093  if(lastSuballocItem == m_Suballocations.cend())
9094  {
9095  return false;
9096  }
9097  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9098  {
9099  *pSumFreeSize += lastSuballocItem->size;
9100  }
9101  else
9102  {
9103  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9104  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9105  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9106  {
9107  ++*itemsToMakeLostCount;
9108  *pSumItemSize += lastSuballocItem->size;
9109  }
9110  else
9111  {
9112  return false;
9113  }
9114  }
9115  remainingSize = (lastSuballocItem->size < remainingSize) ?
9116  remainingSize - lastSuballocItem->size : 0;
9117  }
9118  }
9119 
9120  // Check next suballocations for BufferImageGranularity conflicts.
9121  // If conflict exists, we must mark more allocations lost or fail.
9122  if(bufferImageGranularity > 1)
9123  {
9124  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9125  ++nextSuballocItem;
9126  while(nextSuballocItem != m_Suballocations.cend())
9127  {
9128  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9129  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9130  {
9131  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9132  {
9133  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9134  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9135  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9136  {
9137  ++*itemsToMakeLostCount;
9138  }
9139  else
9140  {
9141  return false;
9142  }
9143  }
9144  }
9145  else
9146  {
9147  // Already on next page.
9148  break;
9149  }
9150  ++nextSuballocItem;
9151  }
9152  }
9153  }
9154  else
9155  {
9156  const VmaSuballocation& suballoc = *suballocItem;
9157  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9158 
9159  *pSumFreeSize = suballoc.size;
9160 
9161  // Size of this suballocation is too small for this request: Early return.
9162  if(suballoc.size < allocSize)
9163  {
9164  return false;
9165  }
9166 
9167  // Start from offset equal to beginning of this suballocation.
9168  *pOffset = suballoc.offset;
9169 
9170  // Apply VMA_DEBUG_MARGIN at the beginning.
9171  if(VMA_DEBUG_MARGIN > 0)
9172  {
9173  *pOffset += VMA_DEBUG_MARGIN;
9174  }
9175 
9176  // Apply alignment.
9177  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9178 
9179  // Check previous suballocations for BufferImageGranularity conflicts.
9180  // Make bigger alignment if necessary.
9181  if(bufferImageGranularity > 1)
9182  {
9183  bool bufferImageGranularityConflict = false;
9184  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9185  while(prevSuballocItem != m_Suballocations.cbegin())
9186  {
9187  --prevSuballocItem;
9188  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9189  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9190  {
9191  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9192  {
9193  bufferImageGranularityConflict = true;
9194  break;
9195  }
9196  }
9197  else
9198  // Already on previous page.
9199  break;
9200  }
9201  if(bufferImageGranularityConflict)
9202  {
9203  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9204  }
9205  }
9206 
9207  // Calculate padding at the beginning based on current offset.
9208  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9209 
9210  // Calculate required margin at the end.
9211  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9212 
9213  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9214  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9215  {
9216  return false;
9217  }
9218 
9219  // Check next suballocations for BufferImageGranularity conflicts.
9220  // If conflict exists, allocation cannot be made here.
9221  if(bufferImageGranularity > 1)
9222  {
9223  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9224  ++nextSuballocItem;
9225  while(nextSuballocItem != m_Suballocations.cend())
9226  {
9227  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9228  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9229  {
9230  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9231  {
9232  return false;
9233  }
9234  }
9235  else
9236  {
9237  // Already on next page.
9238  break;
9239  }
9240  ++nextSuballocItem;
9241  }
9242  }
9243  }
9244 
9245  // All tests passed: Success. pOffset is already filled.
9246  return true;
9247 }
9248 
9249 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9250 {
9251  VMA_ASSERT(item != m_Suballocations.end());
9252  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9253 
9254  VmaSuballocationList::iterator nextItem = item;
9255  ++nextItem;
9256  VMA_ASSERT(nextItem != m_Suballocations.end());
9257  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9258 
9259  item->size += nextItem->size;
9260  --m_FreeCount;
9261  m_Suballocations.erase(nextItem);
9262 }
9263 
9264 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9265 {
9266  // Change this suballocation to be marked as free.
9267  VmaSuballocation& suballoc = *suballocItem;
9268  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9269  suballoc.hAllocation = VK_NULL_HANDLE;
9270 
9271  // Update totals.
9272  ++m_FreeCount;
9273  m_SumFreeSize += suballoc.size;
9274 
9275  // Merge with previous and/or next suballocation if it's also free.
9276  bool mergeWithNext = false;
9277  bool mergeWithPrev = false;
9278 
9279  VmaSuballocationList::iterator nextItem = suballocItem;
9280  ++nextItem;
9281  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9282  {
9283  mergeWithNext = true;
9284  }
9285 
9286  VmaSuballocationList::iterator prevItem = suballocItem;
9287  if(suballocItem != m_Suballocations.begin())
9288  {
9289  --prevItem;
9290  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9291  {
9292  mergeWithPrev = true;
9293  }
9294  }
9295 
9296  if(mergeWithNext)
9297  {
9298  UnregisterFreeSuballocation(nextItem);
9299  MergeFreeWithNext(suballocItem);
9300  }
9301 
9302  if(mergeWithPrev)
9303  {
9304  UnregisterFreeSuballocation(prevItem);
9305  MergeFreeWithNext(prevItem);
9306  RegisterFreeSuballocation(prevItem);
9307  return prevItem;
9308  }
9309  else
9310  {
9311  RegisterFreeSuballocation(suballocItem);
9312  return suballocItem;
9313  }
9314 }
9315 
9316 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9317 {
9318  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9319  VMA_ASSERT(item->size > 0);
9320 
9321  // You may want to enable this validation at the beginning or at the end of
9322  // this function, depending on what do you want to check.
9323  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9324 
9325  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9326  {
9327  if(m_FreeSuballocationsBySize.empty())
9328  {
9329  m_FreeSuballocationsBySize.push_back(item);
9330  }
9331  else
9332  {
9333  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9334  }
9335  }
9336 
9337  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9338 }
9339 
9340 
9341 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9342 {
9343  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9344  VMA_ASSERT(item->size > 0);
9345 
9346  // You may want to enable this validation at the beginning or at the end of
9347  // this function, depending on what do you want to check.
9348  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9349 
9350  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9351  {
9352  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9353  m_FreeSuballocationsBySize.data(),
9354  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9355  item,
9356  VmaSuballocationItemSizeLess());
9357  for(size_t index = it - m_FreeSuballocationsBySize.data();
9358  index < m_FreeSuballocationsBySize.size();
9359  ++index)
9360  {
9361  if(m_FreeSuballocationsBySize[index] == item)
9362  {
9363  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9364  return;
9365  }
9366  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9367  }
9368  VMA_ASSERT(0 && "Not found.");
9369  }
9370 
9371  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9372 }
9373 
9374 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9375  VkDeviceSize bufferImageGranularity,
9376  VmaSuballocationType& inOutPrevSuballocType) const
9377 {
9378  if(bufferImageGranularity == 1 || IsEmpty())
9379  {
9380  return false;
9381  }
9382 
9383  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9384  bool typeConflictFound = false;
9385  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9386  it != m_Suballocations.cend();
9387  ++it)
9388  {
9389  const VmaSuballocationType suballocType = it->type;
9390  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9391  {
9392  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9393  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9394  {
9395  typeConflictFound = true;
9396  }
9397  inOutPrevSuballocType = suballocType;
9398  }
9399  }
9400 
9401  return typeConflictFound || minAlignment >= bufferImageGranularity;
9402 }
9403 
9405 // class VmaBlockMetadata_Linear
9406 
9407 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9408  VmaBlockMetadata(hAllocator),
9409  m_SumFreeSize(0),
9410  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9411  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9412  m_1stVectorIndex(0),
9413  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9414  m_1stNullItemsBeginCount(0),
9415  m_1stNullItemsMiddleCount(0),
9416  m_2ndNullItemsCount(0)
9417 {
9418 }
9419 
9420 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9421 {
9422 }
9423 
9424 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9425 {
9426  VmaBlockMetadata::Init(size);
9427  m_SumFreeSize = size;
9428 }
9429 
9430 bool VmaBlockMetadata_Linear::Validate() const
9431 {
9432  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9433  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9434 
9435  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9436  VMA_VALIDATE(!suballocations1st.empty() ||
9437  suballocations2nd.empty() ||
9438  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9439 
9440  if(!suballocations1st.empty())
9441  {
9442  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9443  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9444  // Null item at the end should be just pop_back().
9445  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9446  }
9447  if(!suballocations2nd.empty())
9448  {
9449  // Null item at the end should be just pop_back().
9450  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9451  }
9452 
9453  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9454  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9455 
9456  VkDeviceSize sumUsedSize = 0;
9457  const size_t suballoc1stCount = suballocations1st.size();
9458  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9459 
9460  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9461  {
9462  const size_t suballoc2ndCount = suballocations2nd.size();
9463  size_t nullItem2ndCount = 0;
9464  for(size_t i = 0; i < suballoc2ndCount; ++i)
9465  {
9466  const VmaSuballocation& suballoc = suballocations2nd[i];
9467  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9468 
9469  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9470  VMA_VALIDATE(suballoc.offset >= offset);
9471 
9472  if(!currFree)
9473  {
9474  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9475  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9476  sumUsedSize += suballoc.size;
9477  }
9478  else
9479  {
9480  ++nullItem2ndCount;
9481  }
9482 
9483  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9484  }
9485 
9486  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9487  }
9488 
9489  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9490  {
9491  const VmaSuballocation& suballoc = suballocations1st[i];
9492  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9493  suballoc.hAllocation == VK_NULL_HANDLE);
9494  }
9495 
9496  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9497 
9498  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9499  {
9500  const VmaSuballocation& suballoc = suballocations1st[i];
9501  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9502 
9503  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9504  VMA_VALIDATE(suballoc.offset >= offset);
9505  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9506 
9507  if(!currFree)
9508  {
9509  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9510  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9511  sumUsedSize += suballoc.size;
9512  }
9513  else
9514  {
9515  ++nullItem1stCount;
9516  }
9517 
9518  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9519  }
9520  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9521 
9522  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9523  {
9524  const size_t suballoc2ndCount = suballocations2nd.size();
9525  size_t nullItem2ndCount = 0;
9526  for(size_t i = suballoc2ndCount; i--; )
9527  {
9528  const VmaSuballocation& suballoc = suballocations2nd[i];
9529  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9530 
9531  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9532  VMA_VALIDATE(suballoc.offset >= offset);
9533 
9534  if(!currFree)
9535  {
9536  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9537  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9538  sumUsedSize += suballoc.size;
9539  }
9540  else
9541  {
9542  ++nullItem2ndCount;
9543  }
9544 
9545  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9546  }
9547 
9548  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9549  }
9550 
9551  VMA_VALIDATE(offset <= GetSize());
9552  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9553 
9554  return true;
9555 }
9556 
9557 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9558 {
9559  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9560  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9561 }
9562 
9563 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9564 {
9565  const VkDeviceSize size = GetSize();
9566 
9567  /*
9568  We don't consider gaps inside allocation vectors with freed allocations because
9569  they are not suitable for reuse in linear allocator. We consider only space that
9570  is available for new allocations.
9571  */
9572  if(IsEmpty())
9573  {
9574  return size;
9575  }
9576 
9577  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9578 
9579  switch(m_2ndVectorMode)
9580  {
9581  case SECOND_VECTOR_EMPTY:
9582  /*
9583  Available space is after end of 1st, as well as before beginning of 1st (which
9584  whould make it a ring buffer).
9585  */
9586  {
9587  const size_t suballocations1stCount = suballocations1st.size();
9588  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9589  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9590  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9591  return VMA_MAX(
9592  firstSuballoc.offset,
9593  size - (lastSuballoc.offset + lastSuballoc.size));
9594  }
9595  break;
9596 
9597  case SECOND_VECTOR_RING_BUFFER:
9598  /*
9599  Available space is only between end of 2nd and beginning of 1st.
9600  */
9601  {
9602  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9603  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9604  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9605  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9606  }
9607  break;
9608 
9609  case SECOND_VECTOR_DOUBLE_STACK:
9610  /*
9611  Available space is only between end of 1st and top of 2nd.
9612  */
9613  {
9614  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9615  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9616  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9617  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9618  }
9619  break;
9620 
9621  default:
9622  VMA_ASSERT(0);
9623  return 0;
9624  }
9625 }
9626 
9627 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9628 {
9629  const VkDeviceSize size = GetSize();
9630  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9631  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9632  const size_t suballoc1stCount = suballocations1st.size();
9633  const size_t suballoc2ndCount = suballocations2nd.size();
9634 
9635  outInfo.blockCount = 1;
9636  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9637  outInfo.unusedRangeCount = 0;
9638  outInfo.usedBytes = 0;
9639  outInfo.allocationSizeMin = UINT64_MAX;
9640  outInfo.allocationSizeMax = 0;
9641  outInfo.unusedRangeSizeMin = UINT64_MAX;
9642  outInfo.unusedRangeSizeMax = 0;
9643 
9644  VkDeviceSize lastOffset = 0;
9645 
9646  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9647  {
9648  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9649  size_t nextAlloc2ndIndex = 0;
9650  while(lastOffset < freeSpace2ndTo1stEnd)
9651  {
9652  // Find next non-null allocation or move nextAllocIndex to the end.
9653  while(nextAlloc2ndIndex < suballoc2ndCount &&
9654  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9655  {
9656  ++nextAlloc2ndIndex;
9657  }
9658 
9659  // Found non-null allocation.
9660  if(nextAlloc2ndIndex < suballoc2ndCount)
9661  {
9662  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9663 
9664  // 1. Process free space before this allocation.
9665  if(lastOffset < suballoc.offset)
9666  {
9667  // There is free space from lastOffset to suballoc.offset.
9668  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9669  ++outInfo.unusedRangeCount;
9670  outInfo.unusedBytes += unusedRangeSize;
9671  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9672  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9673  }
9674 
9675  // 2. Process this allocation.
9676  // There is allocation with suballoc.offset, suballoc.size.
9677  outInfo.usedBytes += suballoc.size;
9678  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9679  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9680 
9681  // 3. Prepare for next iteration.
9682  lastOffset = suballoc.offset + suballoc.size;
9683  ++nextAlloc2ndIndex;
9684  }
9685  // We are at the end.
9686  else
9687  {
9688  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9689  if(lastOffset < freeSpace2ndTo1stEnd)
9690  {
9691  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9692  ++outInfo.unusedRangeCount;
9693  outInfo.unusedBytes += unusedRangeSize;
9694  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9695  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9696  }
9697 
9698  // End of loop.
9699  lastOffset = freeSpace2ndTo1stEnd;
9700  }
9701  }
9702  }
9703 
9704  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9705  const VkDeviceSize freeSpace1stTo2ndEnd =
9706  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9707  while(lastOffset < freeSpace1stTo2ndEnd)
9708  {
9709  // Find next non-null allocation or move nextAllocIndex to the end.
9710  while(nextAlloc1stIndex < suballoc1stCount &&
9711  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9712  {
9713  ++nextAlloc1stIndex;
9714  }
9715 
9716  // Found non-null allocation.
9717  if(nextAlloc1stIndex < suballoc1stCount)
9718  {
9719  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9720 
9721  // 1. Process free space before this allocation.
9722  if(lastOffset < suballoc.offset)
9723  {
9724  // There is free space from lastOffset to suballoc.offset.
9725  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9726  ++outInfo.unusedRangeCount;
9727  outInfo.unusedBytes += unusedRangeSize;
9728  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9729  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9730  }
9731 
9732  // 2. Process this allocation.
9733  // There is allocation with suballoc.offset, suballoc.size.
9734  outInfo.usedBytes += suballoc.size;
9735  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9736  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9737 
9738  // 3. Prepare for next iteration.
9739  lastOffset = suballoc.offset + suballoc.size;
9740  ++nextAlloc1stIndex;
9741  }
9742  // We are at the end.
9743  else
9744  {
9745  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9746  if(lastOffset < freeSpace1stTo2ndEnd)
9747  {
9748  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9749  ++outInfo.unusedRangeCount;
9750  outInfo.unusedBytes += unusedRangeSize;
9751  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9752  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9753  }
9754 
9755  // End of loop.
9756  lastOffset = freeSpace1stTo2ndEnd;
9757  }
9758  }
9759 
9760  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9761  {
9762  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9763  while(lastOffset < size)
9764  {
9765  // Find next non-null allocation or move nextAllocIndex to the end.
9766  while(nextAlloc2ndIndex != SIZE_MAX &&
9767  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9768  {
9769  --nextAlloc2ndIndex;
9770  }
9771 
9772  // Found non-null allocation.
9773  if(nextAlloc2ndIndex != SIZE_MAX)
9774  {
9775  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9776 
9777  // 1. Process free space before this allocation.
9778  if(lastOffset < suballoc.offset)
9779  {
9780  // There is free space from lastOffset to suballoc.offset.
9781  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9782  ++outInfo.unusedRangeCount;
9783  outInfo.unusedBytes += unusedRangeSize;
9784  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9785  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9786  }
9787 
9788  // 2. Process this allocation.
9789  // There is allocation with suballoc.offset, suballoc.size.
9790  outInfo.usedBytes += suballoc.size;
9791  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9792  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9793 
9794  // 3. Prepare for next iteration.
9795  lastOffset = suballoc.offset + suballoc.size;
9796  --nextAlloc2ndIndex;
9797  }
9798  // We are at the end.
9799  else
9800  {
9801  // There is free space from lastOffset to size.
9802  if(lastOffset < size)
9803  {
9804  const VkDeviceSize unusedRangeSize = size - lastOffset;
9805  ++outInfo.unusedRangeCount;
9806  outInfo.unusedBytes += unusedRangeSize;
9807  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9808  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9809  }
9810 
9811  // End of loop.
9812  lastOffset = size;
9813  }
9814  }
9815  }
9816 
9817  outInfo.unusedBytes = size - outInfo.usedBytes;
9818 }
9819 
9820 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9821 {
9822  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9823  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9824  const VkDeviceSize size = GetSize();
9825  const size_t suballoc1stCount = suballocations1st.size();
9826  const size_t suballoc2ndCount = suballocations2nd.size();
9827 
9828  inoutStats.size += size;
9829 
9830  VkDeviceSize lastOffset = 0;
9831 
9832  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9833  {
9834  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9835  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9836  while(lastOffset < freeSpace2ndTo1stEnd)
9837  {
9838  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9839  while(nextAlloc2ndIndex < suballoc2ndCount &&
9840  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9841  {
9842  ++nextAlloc2ndIndex;
9843  }
9844 
9845  // Found non-null allocation.
9846  if(nextAlloc2ndIndex < suballoc2ndCount)
9847  {
9848  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9849 
9850  // 1. Process free space before this allocation.
9851  if(lastOffset < suballoc.offset)
9852  {
9853  // There is free space from lastOffset to suballoc.offset.
9854  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9855  inoutStats.unusedSize += unusedRangeSize;
9856  ++inoutStats.unusedRangeCount;
9857  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9858  }
9859 
9860  // 2. Process this allocation.
9861  // There is allocation with suballoc.offset, suballoc.size.
9862  ++inoutStats.allocationCount;
9863 
9864  // 3. Prepare for next iteration.
9865  lastOffset = suballoc.offset + suballoc.size;
9866  ++nextAlloc2ndIndex;
9867  }
9868  // We are at the end.
9869  else
9870  {
9871  if(lastOffset < freeSpace2ndTo1stEnd)
9872  {
9873  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9874  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9875  inoutStats.unusedSize += unusedRangeSize;
9876  ++inoutStats.unusedRangeCount;
9877  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9878  }
9879 
9880  // End of loop.
9881  lastOffset = freeSpace2ndTo1stEnd;
9882  }
9883  }
9884  }
9885 
9886  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9887  const VkDeviceSize freeSpace1stTo2ndEnd =
9888  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9889  while(lastOffset < freeSpace1stTo2ndEnd)
9890  {
9891  // Find next non-null allocation or move nextAllocIndex to the end.
9892  while(nextAlloc1stIndex < suballoc1stCount &&
9893  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9894  {
9895  ++nextAlloc1stIndex;
9896  }
9897 
9898  // Found non-null allocation.
9899  if(nextAlloc1stIndex < suballoc1stCount)
9900  {
9901  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9902 
9903  // 1. Process free space before this allocation.
9904  if(lastOffset < suballoc.offset)
9905  {
9906  // There is free space from lastOffset to suballoc.offset.
9907  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9908  inoutStats.unusedSize += unusedRangeSize;
9909  ++inoutStats.unusedRangeCount;
9910  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9911  }
9912 
9913  // 2. Process this allocation.
9914  // There is allocation with suballoc.offset, suballoc.size.
9915  ++inoutStats.allocationCount;
9916 
9917  // 3. Prepare for next iteration.
9918  lastOffset = suballoc.offset + suballoc.size;
9919  ++nextAlloc1stIndex;
9920  }
9921  // We are at the end.
9922  else
9923  {
9924  if(lastOffset < freeSpace1stTo2ndEnd)
9925  {
9926  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9927  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9928  inoutStats.unusedSize += unusedRangeSize;
9929  ++inoutStats.unusedRangeCount;
9930  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9931  }
9932 
9933  // End of loop.
9934  lastOffset = freeSpace1stTo2ndEnd;
9935  }
9936  }
9937 
9938  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9939  {
9940  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9941  while(lastOffset < size)
9942  {
9943  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9944  while(nextAlloc2ndIndex != SIZE_MAX &&
9945  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9946  {
9947  --nextAlloc2ndIndex;
9948  }
9949 
9950  // Found non-null allocation.
9951  if(nextAlloc2ndIndex != SIZE_MAX)
9952  {
9953  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9954 
9955  // 1. Process free space before this allocation.
9956  if(lastOffset < suballoc.offset)
9957  {
9958  // There is free space from lastOffset to suballoc.offset.
9959  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9960  inoutStats.unusedSize += unusedRangeSize;
9961  ++inoutStats.unusedRangeCount;
9962  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9963  }
9964 
9965  // 2. Process this allocation.
9966  // There is allocation with suballoc.offset, suballoc.size.
9967  ++inoutStats.allocationCount;
9968 
9969  // 3. Prepare for next iteration.
9970  lastOffset = suballoc.offset + suballoc.size;
9971  --nextAlloc2ndIndex;
9972  }
9973  // We are at the end.
9974  else
9975  {
9976  if(lastOffset < size)
9977  {
9978  // There is free space from lastOffset to size.
9979  const VkDeviceSize unusedRangeSize = size - lastOffset;
9980  inoutStats.unusedSize += unusedRangeSize;
9981  ++inoutStats.unusedRangeCount;
9982  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9983  }
9984 
9985  // End of loop.
9986  lastOffset = size;
9987  }
9988  }
9989  }
9990 }
9991 
9992 #if VMA_STATS_STRING_ENABLED
9993 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9994 {
9995  const VkDeviceSize size = GetSize();
9996  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9997  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9998  const size_t suballoc1stCount = suballocations1st.size();
9999  const size_t suballoc2ndCount = suballocations2nd.size();
10000 
10001  // FIRST PASS
10002 
10003  size_t unusedRangeCount = 0;
10004  VkDeviceSize usedBytes = 0;
10005 
10006  VkDeviceSize lastOffset = 0;
10007 
10008  size_t alloc2ndCount = 0;
10009  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10010  {
10011  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10012  size_t nextAlloc2ndIndex = 0;
10013  while(lastOffset < freeSpace2ndTo1stEnd)
10014  {
10015  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10016  while(nextAlloc2ndIndex < suballoc2ndCount &&
10017  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10018  {
10019  ++nextAlloc2ndIndex;
10020  }
10021 
10022  // Found non-null allocation.
10023  if(nextAlloc2ndIndex < suballoc2ndCount)
10024  {
10025  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10026 
10027  // 1. Process free space before this allocation.
10028  if(lastOffset < suballoc.offset)
10029  {
10030  // There is free space from lastOffset to suballoc.offset.
10031  ++unusedRangeCount;
10032  }
10033 
10034  // 2. Process this allocation.
10035  // There is allocation with suballoc.offset, suballoc.size.
10036  ++alloc2ndCount;
10037  usedBytes += suballoc.size;
10038 
10039  // 3. Prepare for next iteration.
10040  lastOffset = suballoc.offset + suballoc.size;
10041  ++nextAlloc2ndIndex;
10042  }
10043  // We are at the end.
10044  else
10045  {
10046  if(lastOffset < freeSpace2ndTo1stEnd)
10047  {
10048  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10049  ++unusedRangeCount;
10050  }
10051 
10052  // End of loop.
10053  lastOffset = freeSpace2ndTo1stEnd;
10054  }
10055  }
10056  }
10057 
10058  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10059  size_t alloc1stCount = 0;
10060  const VkDeviceSize freeSpace1stTo2ndEnd =
10061  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10062  while(lastOffset < freeSpace1stTo2ndEnd)
10063  {
10064  // Find next non-null allocation or move nextAllocIndex to the end.
10065  while(nextAlloc1stIndex < suballoc1stCount &&
10066  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10067  {
10068  ++nextAlloc1stIndex;
10069  }
10070 
10071  // Found non-null allocation.
10072  if(nextAlloc1stIndex < suballoc1stCount)
10073  {
10074  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10075 
10076  // 1. Process free space before this allocation.
10077  if(lastOffset < suballoc.offset)
10078  {
10079  // There is free space from lastOffset to suballoc.offset.
10080  ++unusedRangeCount;
10081  }
10082 
10083  // 2. Process this allocation.
10084  // There is allocation with suballoc.offset, suballoc.size.
10085  ++alloc1stCount;
10086  usedBytes += suballoc.size;
10087 
10088  // 3. Prepare for next iteration.
10089  lastOffset = suballoc.offset + suballoc.size;
10090  ++nextAlloc1stIndex;
10091  }
10092  // We are at the end.
10093  else
10094  {
10095  if(lastOffset < size)
10096  {
10097  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10098  ++unusedRangeCount;
10099  }
10100 
10101  // End of loop.
10102  lastOffset = freeSpace1stTo2ndEnd;
10103  }
10104  }
10105 
10106  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10107  {
10108  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10109  while(lastOffset < size)
10110  {
10111  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10112  while(nextAlloc2ndIndex != SIZE_MAX &&
10113  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10114  {
10115  --nextAlloc2ndIndex;
10116  }
10117 
10118  // Found non-null allocation.
10119  if(nextAlloc2ndIndex != SIZE_MAX)
10120  {
10121  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10122 
10123  // 1. Process free space before this allocation.
10124  if(lastOffset < suballoc.offset)
10125  {
10126  // There is free space from lastOffset to suballoc.offset.
10127  ++unusedRangeCount;
10128  }
10129 
10130  // 2. Process this allocation.
10131  // There is allocation with suballoc.offset, suballoc.size.
10132  ++alloc2ndCount;
10133  usedBytes += suballoc.size;
10134 
10135  // 3. Prepare for next iteration.
10136  lastOffset = suballoc.offset + suballoc.size;
10137  --nextAlloc2ndIndex;
10138  }
10139  // We are at the end.
10140  else
10141  {
10142  if(lastOffset < size)
10143  {
10144  // There is free space from lastOffset to size.
10145  ++unusedRangeCount;
10146  }
10147 
10148  // End of loop.
10149  lastOffset = size;
10150  }
10151  }
10152  }
10153 
10154  const VkDeviceSize unusedBytes = size - usedBytes;
10155  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10156 
10157  // SECOND PASS
10158  lastOffset = 0;
10159 
10160  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10161  {
10162  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10163  size_t nextAlloc2ndIndex = 0;
10164  while(lastOffset < freeSpace2ndTo1stEnd)
10165  {
10166  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10167  while(nextAlloc2ndIndex < suballoc2ndCount &&
10168  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10169  {
10170  ++nextAlloc2ndIndex;
10171  }
10172 
10173  // Found non-null allocation.
10174  if(nextAlloc2ndIndex < suballoc2ndCount)
10175  {
10176  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10177 
10178  // 1. Process free space before this allocation.
10179  if(lastOffset < suballoc.offset)
10180  {
10181  // There is free space from lastOffset to suballoc.offset.
10182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10183  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10184  }
10185 
10186  // 2. Process this allocation.
10187  // There is allocation with suballoc.offset, suballoc.size.
10188  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10189 
10190  // 3. Prepare for next iteration.
10191  lastOffset = suballoc.offset + suballoc.size;
10192  ++nextAlloc2ndIndex;
10193  }
10194  // We are at the end.
10195  else
10196  {
10197  if(lastOffset < freeSpace2ndTo1stEnd)
10198  {
10199  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10200  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10201  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10202  }
10203 
10204  // End of loop.
10205  lastOffset = freeSpace2ndTo1stEnd;
10206  }
10207  }
10208  }
10209 
10210  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10211  while(lastOffset < freeSpace1stTo2ndEnd)
10212  {
10213  // Find next non-null allocation or move nextAllocIndex to the end.
10214  while(nextAlloc1stIndex < suballoc1stCount &&
10215  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10216  {
10217  ++nextAlloc1stIndex;
10218  }
10219 
10220  // Found non-null allocation.
10221  if(nextAlloc1stIndex < suballoc1stCount)
10222  {
10223  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10224 
10225  // 1. Process free space before this allocation.
10226  if(lastOffset < suballoc.offset)
10227  {
10228  // There is free space from lastOffset to suballoc.offset.
10229  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10230  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10231  }
10232 
10233  // 2. Process this allocation.
10234  // There is allocation with suballoc.offset, suballoc.size.
10235  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10236 
10237  // 3. Prepare for next iteration.
10238  lastOffset = suballoc.offset + suballoc.size;
10239  ++nextAlloc1stIndex;
10240  }
10241  // We are at the end.
10242  else
10243  {
10244  if(lastOffset < freeSpace1stTo2ndEnd)
10245  {
10246  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10247  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10248  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10249  }
10250 
10251  // End of loop.
10252  lastOffset = freeSpace1stTo2ndEnd;
10253  }
10254  }
10255 
10256  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10257  {
10258  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10259  while(lastOffset < size)
10260  {
10261  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10262  while(nextAlloc2ndIndex != SIZE_MAX &&
10263  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10264  {
10265  --nextAlloc2ndIndex;
10266  }
10267 
10268  // Found non-null allocation.
10269  if(nextAlloc2ndIndex != SIZE_MAX)
10270  {
10271  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10272 
10273  // 1. Process free space before this allocation.
10274  if(lastOffset < suballoc.offset)
10275  {
10276  // There is free space from lastOffset to suballoc.offset.
10277  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10278  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10279  }
10280 
10281  // 2. Process this allocation.
10282  // There is allocation with suballoc.offset, suballoc.size.
10283  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10284 
10285  // 3. Prepare for next iteration.
10286  lastOffset = suballoc.offset + suballoc.size;
10287  --nextAlloc2ndIndex;
10288  }
10289  // We are at the end.
10290  else
10291  {
10292  if(lastOffset < size)
10293  {
10294  // There is free space from lastOffset to size.
10295  const VkDeviceSize unusedRangeSize = size - lastOffset;
10296  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10297  }
10298 
10299  // End of loop.
10300  lastOffset = size;
10301  }
10302  }
10303  }
10304 
10305  PrintDetailedMap_End(json);
10306 }
10307 #endif // #if VMA_STATS_STRING_ENABLED
10308 
10309 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10310  uint32_t currentFrameIndex,
10311  uint32_t frameInUseCount,
10312  VkDeviceSize bufferImageGranularity,
10313  VkDeviceSize allocSize,
10314  VkDeviceSize allocAlignment,
10315  bool upperAddress,
10316  VmaSuballocationType allocType,
10317  bool canMakeOtherLost,
10318  uint32_t strategy,
10319  VmaAllocationRequest* pAllocationRequest)
10320 {
10321  VMA_ASSERT(allocSize > 0);
10322  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10323  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10324  VMA_HEAVY_ASSERT(Validate());
10325  return upperAddress ?
10326  CreateAllocationRequest_UpperAddress(
10327  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10328  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10329  CreateAllocationRequest_LowerAddress(
10330  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10331  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10332 }
10333 
10334 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10335  uint32_t currentFrameIndex,
10336  uint32_t frameInUseCount,
10337  VkDeviceSize bufferImageGranularity,
10338  VkDeviceSize allocSize,
10339  VkDeviceSize allocAlignment,
10340  VmaSuballocationType allocType,
10341  bool canMakeOtherLost,
10342  uint32_t strategy,
10343  VmaAllocationRequest* pAllocationRequest)
10344 {
10345  const VkDeviceSize size = GetSize();
10346  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10347  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10348 
10349  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10350  {
10351  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10352  return false;
10353  }
10354 
10355  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10356  if(allocSize > size)
10357  {
10358  return false;
10359  }
10360  VkDeviceSize resultBaseOffset = size - allocSize;
10361  if(!suballocations2nd.empty())
10362  {
10363  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10364  resultBaseOffset = lastSuballoc.offset - allocSize;
10365  if(allocSize > lastSuballoc.offset)
10366  {
10367  return false;
10368  }
10369  }
10370 
10371  // Start from offset equal to end of free space.
10372  VkDeviceSize resultOffset = resultBaseOffset;
10373 
10374  // Apply VMA_DEBUG_MARGIN at the end.
10375  if(VMA_DEBUG_MARGIN > 0)
10376  {
10377  if(resultOffset < VMA_DEBUG_MARGIN)
10378  {
10379  return false;
10380  }
10381  resultOffset -= VMA_DEBUG_MARGIN;
10382  }
10383 
10384  // Apply alignment.
10385  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10386 
10387  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10388  // Make bigger alignment if necessary.
10389  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10390  {
10391  bool bufferImageGranularityConflict = false;
10392  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10393  {
10394  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10395  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10396  {
10397  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10398  {
10399  bufferImageGranularityConflict = true;
10400  break;
10401  }
10402  }
10403  else
10404  // Already on previous page.
10405  break;
10406  }
10407  if(bufferImageGranularityConflict)
10408  {
10409  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10410  }
10411  }
10412 
10413  // There is enough free space.
10414  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10415  suballocations1st.back().offset + suballocations1st.back().size :
10416  0;
10417  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10418  {
10419  // Check previous suballocations for BufferImageGranularity conflicts.
10420  // If conflict exists, allocation cannot be made here.
10421  if(bufferImageGranularity > 1)
10422  {
10423  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10424  {
10425  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10426  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10427  {
10428  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10429  {
10430  return false;
10431  }
10432  }
10433  else
10434  {
10435  // Already on next page.
10436  break;
10437  }
10438  }
10439  }
10440 
10441  // All tests passed: Success.
10442  pAllocationRequest->offset = resultOffset;
10443  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10444  pAllocationRequest->sumItemSize = 0;
10445  // pAllocationRequest->item unused.
10446  pAllocationRequest->itemsToMakeLostCount = 0;
10447  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10448  return true;
10449  }
10450 
10451  return false;
10452 }
10453 
10454 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10455  uint32_t currentFrameIndex,
10456  uint32_t frameInUseCount,
10457  VkDeviceSize bufferImageGranularity,
10458  VkDeviceSize allocSize,
10459  VkDeviceSize allocAlignment,
10460  VmaSuballocationType allocType,
10461  bool canMakeOtherLost,
10462  uint32_t strategy,
10463  VmaAllocationRequest* pAllocationRequest)
10464 {
10465  const VkDeviceSize size = GetSize();
10466  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10467  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10468 
10469  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10470  {
10471  // Try to allocate at the end of 1st vector.
10472 
10473  VkDeviceSize resultBaseOffset = 0;
10474  if(!suballocations1st.empty())
10475  {
10476  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10477  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10478  }
10479 
10480  // Start from offset equal to beginning of free space.
10481  VkDeviceSize resultOffset = resultBaseOffset;
10482 
10483  // Apply VMA_DEBUG_MARGIN at the beginning.
10484  if(VMA_DEBUG_MARGIN > 0)
10485  {
10486  resultOffset += VMA_DEBUG_MARGIN;
10487  }
10488 
10489  // Apply alignment.
10490  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10491 
10492  // Check previous suballocations for BufferImageGranularity conflicts.
10493  // Make bigger alignment if necessary.
10494  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10495  {
10496  bool bufferImageGranularityConflict = false;
10497  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10498  {
10499  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10500  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10501  {
10502  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10503  {
10504  bufferImageGranularityConflict = true;
10505  break;
10506  }
10507  }
10508  else
10509  // Already on previous page.
10510  break;
10511  }
10512  if(bufferImageGranularityConflict)
10513  {
10514  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10515  }
10516  }
10517 
10518  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10519  suballocations2nd.back().offset : size;
10520 
10521  // There is enough free space at the end after alignment.
10522  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10523  {
10524  // Check next suballocations for BufferImageGranularity conflicts.
10525  // If conflict exists, allocation cannot be made here.
10526  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10527  {
10528  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10529  {
10530  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10531  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10532  {
10533  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10534  {
10535  return false;
10536  }
10537  }
10538  else
10539  {
10540  // Already on previous page.
10541  break;
10542  }
10543  }
10544  }
10545 
10546  // All tests passed: Success.
10547  pAllocationRequest->offset = resultOffset;
10548  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10549  pAllocationRequest->sumItemSize = 0;
10550  // pAllocationRequest->item, customData unused.
10551  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10552  pAllocationRequest->itemsToMakeLostCount = 0;
10553  return true;
10554  }
10555  }
10556 
10557  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10558  // beginning of 1st vector as the end of free space.
10559  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10560  {
10561  VMA_ASSERT(!suballocations1st.empty());
10562 
10563  VkDeviceSize resultBaseOffset = 0;
10564  if(!suballocations2nd.empty())
10565  {
10566  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10567  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10568  }
10569 
10570  // Start from offset equal to beginning of free space.
10571  VkDeviceSize resultOffset = resultBaseOffset;
10572 
10573  // Apply VMA_DEBUG_MARGIN at the beginning.
10574  if(VMA_DEBUG_MARGIN > 0)
10575  {
10576  resultOffset += VMA_DEBUG_MARGIN;
10577  }
10578 
10579  // Apply alignment.
10580  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10581 
10582  // Check previous suballocations for BufferImageGranularity conflicts.
10583  // Make bigger alignment if necessary.
10584  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10585  {
10586  bool bufferImageGranularityConflict = false;
10587  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10588  {
10589  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10590  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10591  {
10592  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10593  {
10594  bufferImageGranularityConflict = true;
10595  break;
10596  }
10597  }
10598  else
10599  // Already on previous page.
10600  break;
10601  }
10602  if(bufferImageGranularityConflict)
10603  {
10604  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10605  }
10606  }
10607 
10608  pAllocationRequest->itemsToMakeLostCount = 0;
10609  pAllocationRequest->sumItemSize = 0;
10610  size_t index1st = m_1stNullItemsBeginCount;
10611 
10612  if(canMakeOtherLost)
10613  {
10614  while(index1st < suballocations1st.size() &&
10615  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10616  {
10617  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10618  const VmaSuballocation& suballoc = suballocations1st[index1st];
10619  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10620  {
10621  // No problem.
10622  }
10623  else
10624  {
10625  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10626  if(suballoc.hAllocation->CanBecomeLost() &&
10627  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10628  {
10629  ++pAllocationRequest->itemsToMakeLostCount;
10630  pAllocationRequest->sumItemSize += suballoc.size;
10631  }
10632  else
10633  {
10634  return false;
10635  }
10636  }
10637  ++index1st;
10638  }
10639 
10640  // Check next suballocations for BufferImageGranularity conflicts.
10641  // If conflict exists, we must mark more allocations lost or fail.
10642  if(bufferImageGranularity > 1)
10643  {
10644  while(index1st < suballocations1st.size())
10645  {
10646  const VmaSuballocation& suballoc = suballocations1st[index1st];
10647  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10648  {
10649  if(suballoc.hAllocation != VK_NULL_HANDLE)
10650  {
10651  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10652  if(suballoc.hAllocation->CanBecomeLost() &&
10653  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10654  {
10655  ++pAllocationRequest->itemsToMakeLostCount;
10656  pAllocationRequest->sumItemSize += suballoc.size;
10657  }
10658  else
10659  {
10660  return false;
10661  }
10662  }
10663  }
10664  else
10665  {
10666  // Already on next page.
10667  break;
10668  }
10669  ++index1st;
10670  }
10671  }
10672 
10673  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10674  if(index1st == suballocations1st.size() &&
10675  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10676  {
10677  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10678  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10679  }
10680  }
10681 
10682  // There is enough free space at the end after alignment.
10683  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10684  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10685  {
10686  // Check next suballocations for BufferImageGranularity conflicts.
10687  // If conflict exists, allocation cannot be made here.
10688  if(bufferImageGranularity > 1)
10689  {
10690  for(size_t nextSuballocIndex = index1st;
10691  nextSuballocIndex < suballocations1st.size();
10692  nextSuballocIndex++)
10693  {
10694  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10695  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10696  {
10697  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10698  {
10699  return false;
10700  }
10701  }
10702  else
10703  {
10704  // Already on next page.
10705  break;
10706  }
10707  }
10708  }
10709 
10710  // All tests passed: Success.
10711  pAllocationRequest->offset = resultOffset;
10712  pAllocationRequest->sumFreeSize =
10713  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10714  - resultBaseOffset
10715  - pAllocationRequest->sumItemSize;
10716  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10717  // pAllocationRequest->item, customData unused.
10718  return true;
10719  }
10720  }
10721 
10722  return false;
10723 }
10724 
10725 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10726  uint32_t currentFrameIndex,
10727  uint32_t frameInUseCount,
10728  VmaAllocationRequest* pAllocationRequest)
10729 {
10730  if(pAllocationRequest->itemsToMakeLostCount == 0)
10731  {
10732  return true;
10733  }
10734 
10735  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10736 
10737  // We always start from 1st.
10738  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10739  size_t index = m_1stNullItemsBeginCount;
10740  size_t madeLostCount = 0;
10741  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10742  {
10743  if(index == suballocations->size())
10744  {
10745  index = 0;
10746  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10747  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10748  {
10749  suballocations = &AccessSuballocations2nd();
10750  }
10751  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10752  // suballocations continues pointing at AccessSuballocations1st().
10753  VMA_ASSERT(!suballocations->empty());
10754  }
10755  VmaSuballocation& suballoc = (*suballocations)[index];
10756  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10757  {
10758  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10759  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10760  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10761  {
10762  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10763  suballoc.hAllocation = VK_NULL_HANDLE;
10764  m_SumFreeSize += suballoc.size;
10765  if(suballocations == &AccessSuballocations1st())
10766  {
10767  ++m_1stNullItemsMiddleCount;
10768  }
10769  else
10770  {
10771  ++m_2ndNullItemsCount;
10772  }
10773  ++madeLostCount;
10774  }
10775  else
10776  {
10777  return false;
10778  }
10779  }
10780  ++index;
10781  }
10782 
10783  CleanupAfterFree();
10784  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10785 
10786  return true;
10787 }
10788 
10789 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10790 {
10791  uint32_t lostAllocationCount = 0;
10792 
10793  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10794  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10795  {
10796  VmaSuballocation& suballoc = suballocations1st[i];
10797  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10798  suballoc.hAllocation->CanBecomeLost() &&
10799  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10800  {
10801  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10802  suballoc.hAllocation = VK_NULL_HANDLE;
10803  ++m_1stNullItemsMiddleCount;
10804  m_SumFreeSize += suballoc.size;
10805  ++lostAllocationCount;
10806  }
10807  }
10808 
10809  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10810  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10811  {
10812  VmaSuballocation& suballoc = suballocations2nd[i];
10813  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10814  suballoc.hAllocation->CanBecomeLost() &&
10815  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10816  {
10817  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10818  suballoc.hAllocation = VK_NULL_HANDLE;
10819  ++m_2ndNullItemsCount;
10820  m_SumFreeSize += suballoc.size;
10821  ++lostAllocationCount;
10822  }
10823  }
10824 
10825  if(lostAllocationCount)
10826  {
10827  CleanupAfterFree();
10828  }
10829 
10830  return lostAllocationCount;
10831 }
10832 
10833 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10834 {
10835  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10836  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10837  {
10838  const VmaSuballocation& suballoc = suballocations1st[i];
10839  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10840  {
10841  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10842  {
10843  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10844  return VK_ERROR_VALIDATION_FAILED_EXT;
10845  }
10846  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10847  {
10848  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10849  return VK_ERROR_VALIDATION_FAILED_EXT;
10850  }
10851  }
10852  }
10853 
10854  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10855  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10856  {
10857  const VmaSuballocation& suballoc = suballocations2nd[i];
10858  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10859  {
10860  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10861  {
10862  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10863  return VK_ERROR_VALIDATION_FAILED_EXT;
10864  }
10865  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10866  {
10867  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10868  return VK_ERROR_VALIDATION_FAILED_EXT;
10869  }
10870  }
10871  }
10872 
10873  return VK_SUCCESS;
10874 }
10875 
10876 void VmaBlockMetadata_Linear::Alloc(
10877  const VmaAllocationRequest& request,
10878  VmaSuballocationType type,
10879  VkDeviceSize allocSize,
10880  VmaAllocation hAllocation)
10881 {
10882  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10883 
10884  switch(request.type)
10885  {
10886  case VmaAllocationRequestType::UpperAddress:
10887  {
10888  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10889  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10890  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10891  suballocations2nd.push_back(newSuballoc);
10892  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10893  }
10894  break;
10895  case VmaAllocationRequestType::EndOf1st:
10896  {
10897  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10898 
10899  VMA_ASSERT(suballocations1st.empty() ||
10900  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10901  // Check if it fits before the end of the block.
10902  VMA_ASSERT(request.offset + allocSize <= GetSize());
10903 
10904  suballocations1st.push_back(newSuballoc);
10905  }
10906  break;
10907  case VmaAllocationRequestType::EndOf2nd:
10908  {
10909  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10910  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10911  VMA_ASSERT(!suballocations1st.empty() &&
10912  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10913  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10914 
10915  switch(m_2ndVectorMode)
10916  {
10917  case SECOND_VECTOR_EMPTY:
10918  // First allocation from second part ring buffer.
10919  VMA_ASSERT(suballocations2nd.empty());
10920  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10921  break;
10922  case SECOND_VECTOR_RING_BUFFER:
10923  // 2-part ring buffer is already started.
10924  VMA_ASSERT(!suballocations2nd.empty());
10925  break;
10926  case SECOND_VECTOR_DOUBLE_STACK:
10927  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10928  break;
10929  default:
10930  VMA_ASSERT(0);
10931  }
10932 
10933  suballocations2nd.push_back(newSuballoc);
10934  }
10935  break;
10936  default:
10937  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10938  }
10939 
10940  m_SumFreeSize -= newSuballoc.size;
10941 }
10942 
10943 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10944 {
10945  FreeAtOffset(allocation->GetOffset());
10946 }
10947 
10948 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10949 {
10950  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10951  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10952 
10953  if(!suballocations1st.empty())
10954  {
10955  // First allocation: Mark it as next empty at the beginning.
10956  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10957  if(firstSuballoc.offset == offset)
10958  {
10959  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10960  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10961  m_SumFreeSize += firstSuballoc.size;
10962  ++m_1stNullItemsBeginCount;
10963  CleanupAfterFree();
10964  return;
10965  }
10966  }
10967 
10968  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10969  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10970  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10971  {
10972  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10973  if(lastSuballoc.offset == offset)
10974  {
10975  m_SumFreeSize += lastSuballoc.size;
10976  suballocations2nd.pop_back();
10977  CleanupAfterFree();
10978  return;
10979  }
10980  }
10981  // Last allocation in 1st vector.
10982  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10983  {
10984  VmaSuballocation& lastSuballoc = suballocations1st.back();
10985  if(lastSuballoc.offset == offset)
10986  {
10987  m_SumFreeSize += lastSuballoc.size;
10988  suballocations1st.pop_back();
10989  CleanupAfterFree();
10990  return;
10991  }
10992  }
10993 
10994  // Item from the middle of 1st vector.
10995  {
10996  VmaSuballocation refSuballoc;
10997  refSuballoc.offset = offset;
10998  // Rest of members stays uninitialized intentionally for better performance.
10999  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11000  suballocations1st.begin() + m_1stNullItemsBeginCount,
11001  suballocations1st.end(),
11002  refSuballoc,
11003  VmaSuballocationOffsetLess());
11004  if(it != suballocations1st.end())
11005  {
11006  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11007  it->hAllocation = VK_NULL_HANDLE;
11008  ++m_1stNullItemsMiddleCount;
11009  m_SumFreeSize += it->size;
11010  CleanupAfterFree();
11011  return;
11012  }
11013  }
11014 
11015  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11016  {
11017  // Item from the middle of 2nd vector.
11018  VmaSuballocation refSuballoc;
11019  refSuballoc.offset = offset;
11020  // Rest of members stays uninitialized intentionally for better performance.
11021  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11022  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11023  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11024  if(it != suballocations2nd.end())
11025  {
11026  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11027  it->hAllocation = VK_NULL_HANDLE;
11028  ++m_2ndNullItemsCount;
11029  m_SumFreeSize += it->size;
11030  CleanupAfterFree();
11031  return;
11032  }
11033  }
11034 
11035  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11036 }
11037 
11038 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11039 {
11040  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11041  const size_t suballocCount = AccessSuballocations1st().size();
11042  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11043 }
11044 
11045 void VmaBlockMetadata_Linear::CleanupAfterFree()
11046 {
11047  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11048  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11049 
11050  if(IsEmpty())
11051  {
11052  suballocations1st.clear();
11053  suballocations2nd.clear();
11054  m_1stNullItemsBeginCount = 0;
11055  m_1stNullItemsMiddleCount = 0;
11056  m_2ndNullItemsCount = 0;
11057  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11058  }
11059  else
11060  {
11061  const size_t suballoc1stCount = suballocations1st.size();
11062  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11063  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11064 
11065  // Find more null items at the beginning of 1st vector.
11066  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11067  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11068  {
11069  ++m_1stNullItemsBeginCount;
11070  --m_1stNullItemsMiddleCount;
11071  }
11072 
11073  // Find more null items at the end of 1st vector.
11074  while(m_1stNullItemsMiddleCount > 0 &&
11075  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11076  {
11077  --m_1stNullItemsMiddleCount;
11078  suballocations1st.pop_back();
11079  }
11080 
11081  // Find more null items at the end of 2nd vector.
11082  while(m_2ndNullItemsCount > 0 &&
11083  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11084  {
11085  --m_2ndNullItemsCount;
11086  suballocations2nd.pop_back();
11087  }
11088 
11089  // Find more null items at the beginning of 2nd vector.
11090  while(m_2ndNullItemsCount > 0 &&
11091  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11092  {
11093  --m_2ndNullItemsCount;
11094  VmaVectorRemove(suballocations2nd, 0);
11095  }
11096 
11097  if(ShouldCompact1st())
11098  {
11099  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11100  size_t srcIndex = m_1stNullItemsBeginCount;
11101  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11102  {
11103  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11104  {
11105  ++srcIndex;
11106  }
11107  if(dstIndex != srcIndex)
11108  {
11109  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11110  }
11111  ++srcIndex;
11112  }
11113  suballocations1st.resize(nonNullItemCount);
11114  m_1stNullItemsBeginCount = 0;
11115  m_1stNullItemsMiddleCount = 0;
11116  }
11117 
11118  // 2nd vector became empty.
11119  if(suballocations2nd.empty())
11120  {
11121  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11122  }
11123 
11124  // 1st vector became empty.
11125  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11126  {
11127  suballocations1st.clear();
11128  m_1stNullItemsBeginCount = 0;
11129 
11130  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11131  {
11132  // Swap 1st with 2nd. Now 2nd is empty.
11133  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11134  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11135  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11136  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11137  {
11138  ++m_1stNullItemsBeginCount;
11139  --m_1stNullItemsMiddleCount;
11140  }
11141  m_2ndNullItemsCount = 0;
11142  m_1stVectorIndex ^= 1;
11143  }
11144  }
11145  }
11146 
11147  VMA_HEAVY_ASSERT(Validate());
11148 }
11149 
11150 
11152 // class VmaBlockMetadata_Buddy
11153 
11154 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11155  VmaBlockMetadata(hAllocator),
11156  m_Root(VMA_NULL),
11157  m_AllocationCount(0),
11158  m_FreeCount(1),
11159  m_SumFreeSize(0)
11160 {
11161  memset(m_FreeList, 0, sizeof(m_FreeList));
11162 }
11163 
11164 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11165 {
11166  DeleteNode(m_Root);
11167 }
11168 
11169 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11170 {
11171  VmaBlockMetadata::Init(size);
11172 
11173  m_UsableSize = VmaPrevPow2(size);
11174  m_SumFreeSize = m_UsableSize;
11175 
11176  // Calculate m_LevelCount.
11177  m_LevelCount = 1;
11178  while(m_LevelCount < MAX_LEVELS &&
11179  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11180  {
11181  ++m_LevelCount;
11182  }
11183 
11184  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11185  rootNode->offset = 0;
11186  rootNode->type = Node::TYPE_FREE;
11187  rootNode->parent = VMA_NULL;
11188  rootNode->buddy = VMA_NULL;
11189 
11190  m_Root = rootNode;
11191  AddToFreeListFront(0, rootNode);
11192 }
11193 
11194 bool VmaBlockMetadata_Buddy::Validate() const
11195 {
11196  // Validate tree.
11197  ValidationContext ctx;
11198  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11199  {
11200  VMA_VALIDATE(false && "ValidateNode failed.");
11201  }
11202  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11203  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11204 
11205  // Validate free node lists.
11206  for(uint32_t level = 0; level < m_LevelCount; ++level)
11207  {
11208  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11209  m_FreeList[level].front->free.prev == VMA_NULL);
11210 
11211  for(Node* node = m_FreeList[level].front;
11212  node != VMA_NULL;
11213  node = node->free.next)
11214  {
11215  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11216 
11217  if(node->free.next == VMA_NULL)
11218  {
11219  VMA_VALIDATE(m_FreeList[level].back == node);
11220  }
11221  else
11222  {
11223  VMA_VALIDATE(node->free.next->free.prev == node);
11224  }
11225  }
11226  }
11227 
11228  // Validate that free lists ar higher levels are empty.
11229  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11230  {
11231  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11232  }
11233 
11234  return true;
11235 }
11236 
11237 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11238 {
11239  for(uint32_t level = 0; level < m_LevelCount; ++level)
11240  {
11241  if(m_FreeList[level].front != VMA_NULL)
11242  {
11243  return LevelToNodeSize(level);
11244  }
11245  }
11246  return 0;
11247 }
11248 
11249 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11250 {
11251  const VkDeviceSize unusableSize = GetUnusableSize();
11252 
11253  outInfo.blockCount = 1;
11254 
11255  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11256  outInfo.usedBytes = outInfo.unusedBytes = 0;
11257 
11258  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11259  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11260  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11261 
11262  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11263 
11264  if(unusableSize > 0)
11265  {
11266  ++outInfo.unusedRangeCount;
11267  outInfo.unusedBytes += unusableSize;
11268  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11269  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11270  }
11271 }
11272 
11273 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11274 {
11275  const VkDeviceSize unusableSize = GetUnusableSize();
11276 
11277  inoutStats.size += GetSize();
11278  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11279  inoutStats.allocationCount += m_AllocationCount;
11280  inoutStats.unusedRangeCount += m_FreeCount;
11281  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11282 
11283  if(unusableSize > 0)
11284  {
11285  ++inoutStats.unusedRangeCount;
11286  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11287  }
11288 }
11289 
11290 #if VMA_STATS_STRING_ENABLED
11291 
11292 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11293 {
11294  // TODO optimize
11295  VmaStatInfo stat;
11296  CalcAllocationStatInfo(stat);
11297 
11298  PrintDetailedMap_Begin(
11299  json,
11300  stat.unusedBytes,
11301  stat.allocationCount,
11302  stat.unusedRangeCount);
11303 
11304  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11305 
11306  const VkDeviceSize unusableSize = GetUnusableSize();
11307  if(unusableSize > 0)
11308  {
11309  PrintDetailedMap_UnusedRange(json,
11310  m_UsableSize, // offset
11311  unusableSize); // size
11312  }
11313 
11314  PrintDetailedMap_End(json);
11315 }
11316 
11317 #endif // #if VMA_STATS_STRING_ENABLED
11318 
11319 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11320  uint32_t currentFrameIndex,
11321  uint32_t frameInUseCount,
11322  VkDeviceSize bufferImageGranularity,
11323  VkDeviceSize allocSize,
11324  VkDeviceSize allocAlignment,
11325  bool upperAddress,
11326  VmaSuballocationType allocType,
11327  bool canMakeOtherLost,
11328  uint32_t strategy,
11329  VmaAllocationRequest* pAllocationRequest)
11330 {
11331  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11332 
11333  // Simple way to respect bufferImageGranularity. May be optimized some day.
11334  // Whenever it might be an OPTIMAL image...
11335  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11336  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11337  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11338  {
11339  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11340  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11341  }
11342 
11343  if(allocSize > m_UsableSize)
11344  {
11345  return false;
11346  }
11347 
11348  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11349  for(uint32_t level = targetLevel + 1; level--; )
11350  {
11351  for(Node* freeNode = m_FreeList[level].front;
11352  freeNode != VMA_NULL;
11353  freeNode = freeNode->free.next)
11354  {
11355  if(freeNode->offset % allocAlignment == 0)
11356  {
11357  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11358  pAllocationRequest->offset = freeNode->offset;
11359  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11360  pAllocationRequest->sumItemSize = 0;
11361  pAllocationRequest->itemsToMakeLostCount = 0;
11362  pAllocationRequest->customData = (void*)(uintptr_t)level;
11363  return true;
11364  }
11365  }
11366  }
11367 
11368  return false;
11369 }
11370 
11371 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11372  uint32_t currentFrameIndex,
11373  uint32_t frameInUseCount,
11374  VmaAllocationRequest* pAllocationRequest)
11375 {
11376  /*
11377  Lost allocations are not supported in buddy allocator at the moment.
11378  Support might be added in the future.
11379  */
11380  return pAllocationRequest->itemsToMakeLostCount == 0;
11381 }
11382 
11383 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11384 {
11385  /*
11386  Lost allocations are not supported in buddy allocator at the moment.
11387  Support might be added in the future.
11388  */
11389  return 0;
11390 }
11391 
11392 void VmaBlockMetadata_Buddy::Alloc(
11393  const VmaAllocationRequest& request,
11394  VmaSuballocationType type,
11395  VkDeviceSize allocSize,
11396  VmaAllocation hAllocation)
11397 {
11398  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11399 
11400  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11401  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11402 
11403  Node* currNode = m_FreeList[currLevel].front;
11404  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11405  while(currNode->offset != request.offset)
11406  {
11407  currNode = currNode->free.next;
11408  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11409  }
11410 
11411  // Go down, splitting free nodes.
11412  while(currLevel < targetLevel)
11413  {
11414  // currNode is already first free node at currLevel.
11415  // Remove it from list of free nodes at this currLevel.
11416  RemoveFromFreeList(currLevel, currNode);
11417 
11418  const uint32_t childrenLevel = currLevel + 1;
11419 
11420  // Create two free sub-nodes.
11421  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11422  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11423 
11424  leftChild->offset = currNode->offset;
11425  leftChild->type = Node::TYPE_FREE;
11426  leftChild->parent = currNode;
11427  leftChild->buddy = rightChild;
11428 
11429  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11430  rightChild->type = Node::TYPE_FREE;
11431  rightChild->parent = currNode;
11432  rightChild->buddy = leftChild;
11433 
11434  // Convert current currNode to split type.
11435  currNode->type = Node::TYPE_SPLIT;
11436  currNode->split.leftChild = leftChild;
11437 
11438  // Add child nodes to free list. Order is important!
11439  AddToFreeListFront(childrenLevel, rightChild);
11440  AddToFreeListFront(childrenLevel, leftChild);
11441 
11442  ++m_FreeCount;
11443  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11444  ++currLevel;
11445  currNode = m_FreeList[currLevel].front;
11446 
11447  /*
11448  We can be sure that currNode, as left child of node previously split,
11449  also fullfills the alignment requirement.
11450  */
11451  }
11452 
11453  // Remove from free list.
11454  VMA_ASSERT(currLevel == targetLevel &&
11455  currNode != VMA_NULL &&
11456  currNode->type == Node::TYPE_FREE);
11457  RemoveFromFreeList(currLevel, currNode);
11458 
11459  // Convert to allocation node.
11460  currNode->type = Node::TYPE_ALLOCATION;
11461  currNode->allocation.alloc = hAllocation;
11462 
11463  ++m_AllocationCount;
11464  --m_FreeCount;
11465  m_SumFreeSize -= allocSize;
11466 }
11467 
11468 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11469 {
11470  if(node->type == Node::TYPE_SPLIT)
11471  {
11472  DeleteNode(node->split.leftChild->buddy);
11473  DeleteNode(node->split.leftChild);
11474  }
11475 
11476  vma_delete(GetAllocationCallbacks(), node);
11477 }
11478 
11479 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11480 {
11481  VMA_VALIDATE(level < m_LevelCount);
11482  VMA_VALIDATE(curr->parent == parent);
11483  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11484  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11485  switch(curr->type)
11486  {
11487  case Node::TYPE_FREE:
11488  // curr->free.prev, next are validated separately.
11489  ctx.calculatedSumFreeSize += levelNodeSize;
11490  ++ctx.calculatedFreeCount;
11491  break;
11492  case Node::TYPE_ALLOCATION:
11493  ++ctx.calculatedAllocationCount;
11494  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11495  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11496  break;
11497  case Node::TYPE_SPLIT:
11498  {
11499  const uint32_t childrenLevel = level + 1;
11500  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11501  const Node* const leftChild = curr->split.leftChild;
11502  VMA_VALIDATE(leftChild != VMA_NULL);
11503  VMA_VALIDATE(leftChild->offset == curr->offset);
11504  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11505  {
11506  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11507  }
11508  const Node* const rightChild = leftChild->buddy;
11509  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11510  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11511  {
11512  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11513  }
11514  }
11515  break;
11516  default:
11517  return false;
11518  }
11519 
11520  return true;
11521 }
11522 
11523 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11524 {
11525  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11526  uint32_t level = 0;
11527  VkDeviceSize currLevelNodeSize = m_UsableSize;
11528  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11529  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11530  {
11531  ++level;
11532  currLevelNodeSize = nextLevelNodeSize;
11533  nextLevelNodeSize = currLevelNodeSize >> 1;
11534  }
11535  return level;
11536 }
11537 
11538 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11539 {
11540  // Find node and level.
11541  Node* node = m_Root;
11542  VkDeviceSize nodeOffset = 0;
11543  uint32_t level = 0;
11544  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11545  while(node->type == Node::TYPE_SPLIT)
11546  {
11547  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11548  if(offset < nodeOffset + nextLevelSize)
11549  {
11550  node = node->split.leftChild;
11551  }
11552  else
11553  {
11554  node = node->split.leftChild->buddy;
11555  nodeOffset += nextLevelSize;
11556  }
11557  ++level;
11558  levelNodeSize = nextLevelSize;
11559  }
11560 
11561  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11562  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11563 
11564  ++m_FreeCount;
11565  --m_AllocationCount;
11566  m_SumFreeSize += alloc->GetSize();
11567 
11568  node->type = Node::TYPE_FREE;
11569 
11570  // Join free nodes if possible.
11571  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11572  {
11573  RemoveFromFreeList(level, node->buddy);
11574  Node* const parent = node->parent;
11575 
11576  vma_delete(GetAllocationCallbacks(), node->buddy);
11577  vma_delete(GetAllocationCallbacks(), node);
11578  parent->type = Node::TYPE_FREE;
11579 
11580  node = parent;
11581  --level;
11582  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11583  --m_FreeCount;
11584  }
11585 
11586  AddToFreeListFront(level, node);
11587 }
11588 
11589 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11590 {
11591  switch(node->type)
11592  {
11593  case Node::TYPE_FREE:
11594  ++outInfo.unusedRangeCount;
11595  outInfo.unusedBytes += levelNodeSize;
11596  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11597  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11598  break;
11599  case Node::TYPE_ALLOCATION:
11600  {
11601  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11602  ++outInfo.allocationCount;
11603  outInfo.usedBytes += allocSize;
11604  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11605  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11606 
11607  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11608  if(unusedRangeSize > 0)
11609  {
11610  ++outInfo.unusedRangeCount;
11611  outInfo.unusedBytes += unusedRangeSize;
11612  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11613  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11614  }
11615  }
11616  break;
11617  case Node::TYPE_SPLIT:
11618  {
11619  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11620  const Node* const leftChild = node->split.leftChild;
11621  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11622  const Node* const rightChild = leftChild->buddy;
11623  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11624  }
11625  break;
11626  default:
11627  VMA_ASSERT(0);
11628  }
11629 }
11630 
11631 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11632 {
11633  VMA_ASSERT(node->type == Node::TYPE_FREE);
11634 
11635  // List is empty.
11636  Node* const frontNode = m_FreeList[level].front;
11637  if(frontNode == VMA_NULL)
11638  {
11639  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11640  node->free.prev = node->free.next = VMA_NULL;
11641  m_FreeList[level].front = m_FreeList[level].back = node;
11642  }
11643  else
11644  {
11645  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11646  node->free.prev = VMA_NULL;
11647  node->free.next = frontNode;
11648  frontNode->free.prev = node;
11649  m_FreeList[level].front = node;
11650  }
11651 }
11652 
11653 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11654 {
11655  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11656 
11657  // It is at the front.
11658  if(node->free.prev == VMA_NULL)
11659  {
11660  VMA_ASSERT(m_FreeList[level].front == node);
11661  m_FreeList[level].front = node->free.next;
11662  }
11663  else
11664  {
11665  Node* const prevFreeNode = node->free.prev;
11666  VMA_ASSERT(prevFreeNode->free.next == node);
11667  prevFreeNode->free.next = node->free.next;
11668  }
11669 
11670  // It is at the back.
11671  if(node->free.next == VMA_NULL)
11672  {
11673  VMA_ASSERT(m_FreeList[level].back == node);
11674  m_FreeList[level].back = node->free.prev;
11675  }
11676  else
11677  {
11678  Node* const nextFreeNode = node->free.next;
11679  VMA_ASSERT(nextFreeNode->free.prev == node);
11680  nextFreeNode->free.prev = node->free.prev;
11681  }
11682 }
11683 
11684 #if VMA_STATS_STRING_ENABLED
11685 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11686 {
11687  switch(node->type)
11688  {
11689  case Node::TYPE_FREE:
11690  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11691  break;
11692  case Node::TYPE_ALLOCATION:
11693  {
11694  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11695  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11696  if(allocSize < levelNodeSize)
11697  {
11698  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11699  }
11700  }
11701  break;
11702  case Node::TYPE_SPLIT:
11703  {
11704  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11705  const Node* const leftChild = node->split.leftChild;
11706  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11707  const Node* const rightChild = leftChild->buddy;
11708  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11709  }
11710  break;
11711  default:
11712  VMA_ASSERT(0);
11713  }
11714 }
11715 #endif // #if VMA_STATS_STRING_ENABLED
11716 
11717 
11719 // class VmaDeviceMemoryBlock
11720 
11721 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11722  m_pMetadata(VMA_NULL),
11723  m_MemoryTypeIndex(UINT32_MAX),
11724  m_Id(0),
11725  m_hMemory(VK_NULL_HANDLE),
11726  m_MapCount(0),
11727  m_pMappedData(VMA_NULL)
11728 {
11729 }
11730 
11731 void VmaDeviceMemoryBlock::Init(
11732  VmaAllocator hAllocator,
11733  VmaPool hParentPool,
11734  uint32_t newMemoryTypeIndex,
11735  VkDeviceMemory newMemory,
11736  VkDeviceSize newSize,
11737  uint32_t id,
11738  uint32_t algorithm)
11739 {
11740  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11741 
11742  m_hParentPool = hParentPool;
11743  m_MemoryTypeIndex = newMemoryTypeIndex;
11744  m_Id = id;
11745  m_hMemory = newMemory;
11746 
11747  switch(algorithm)
11748  {
11750  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11751  break;
11753  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11754  break;
11755  default:
11756  VMA_ASSERT(0);
11757  // Fall-through.
11758  case 0:
11759  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11760  }
11761  m_pMetadata->Init(newSize);
11762 }
11763 
11764 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11765 {
11766  // This is the most important assert in the entire library.
11767  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11768  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11769 
11770  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11771  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11772  m_hMemory = VK_NULL_HANDLE;
11773 
11774  vma_delete(allocator, m_pMetadata);
11775  m_pMetadata = VMA_NULL;
11776 }
11777 
11778 bool VmaDeviceMemoryBlock::Validate() const
11779 {
11780  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11781  (m_pMetadata->GetSize() != 0));
11782 
11783  return m_pMetadata->Validate();
11784 }
11785 
11786 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11787 {
11788  void* pData = nullptr;
11789  VkResult res = Map(hAllocator, 1, &pData);
11790  if(res != VK_SUCCESS)
11791  {
11792  return res;
11793  }
11794 
11795  res = m_pMetadata->CheckCorruption(pData);
11796 
11797  Unmap(hAllocator, 1);
11798 
11799  return res;
11800 }
11801 
11802 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11803 {
11804  if(count == 0)
11805  {
11806  return VK_SUCCESS;
11807  }
11808 
11809  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11810  if(m_MapCount != 0)
11811  {
11812  m_MapCount += count;
11813  VMA_ASSERT(m_pMappedData != VMA_NULL);
11814  if(ppData != VMA_NULL)
11815  {
11816  *ppData = m_pMappedData;
11817  }
11818  return VK_SUCCESS;
11819  }
11820  else
11821  {
11822  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11823  hAllocator->m_hDevice,
11824  m_hMemory,
11825  0, // offset
11826  VK_WHOLE_SIZE,
11827  0, // flags
11828  &m_pMappedData);
11829  if(result == VK_SUCCESS)
11830  {
11831  if(ppData != VMA_NULL)
11832  {
11833  *ppData = m_pMappedData;
11834  }
11835  m_MapCount = count;
11836  }
11837  return result;
11838  }
11839 }
11840 
11841 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11842 {
11843  if(count == 0)
11844  {
11845  return;
11846  }
11847 
11848  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11849  if(m_MapCount >= count)
11850  {
11851  m_MapCount -= count;
11852  if(m_MapCount == 0)
11853  {
11854  m_pMappedData = VMA_NULL;
11855  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11856  }
11857  }
11858  else
11859  {
11860  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11861  }
11862 }
11863 
11864 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11865 {
11866  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11867  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11868 
11869  void* pData;
11870  VkResult res = Map(hAllocator, 1, &pData);
11871  if(res != VK_SUCCESS)
11872  {
11873  return res;
11874  }
11875 
11876  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11877  VmaWriteMagicValue(pData, allocOffset + allocSize);
11878 
11879  Unmap(hAllocator, 1);
11880 
11881  return VK_SUCCESS;
11882 }
11883 
11884 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11885 {
11886  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11887  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11888 
11889  void* pData;
11890  VkResult res = Map(hAllocator, 1, &pData);
11891  if(res != VK_SUCCESS)
11892  {
11893  return res;
11894  }
11895 
11896  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11897  {
11898  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11899  }
11900  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11901  {
11902  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11903  }
11904 
11905  Unmap(hAllocator, 1);
11906 
11907  return VK_SUCCESS;
11908 }
11909 
11910 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11911  const VmaAllocator hAllocator,
11912  const VmaAllocation hAllocation,
11913  VkDeviceSize allocationLocalOffset,
11914  VkBuffer hBuffer,
11915  const void* pNext)
11916 {
11917  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11918  hAllocation->GetBlock() == this);
11919  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11920  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11921  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11922  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11923  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11924  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11925 }
11926 
11927 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11928  const VmaAllocator hAllocator,
11929  const VmaAllocation hAllocation,
11930  VkDeviceSize allocationLocalOffset,
11931  VkImage hImage,
11932  const void* pNext)
11933 {
11934  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11935  hAllocation->GetBlock() == this);
11936  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11937  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11938  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11939  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11940  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11941  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11942 }
11943 
11944 static void InitStatInfo(VmaStatInfo& outInfo)
11945 {
11946  memset(&outInfo, 0, sizeof(outInfo));
11947  outInfo.allocationSizeMin = UINT64_MAX;
11948  outInfo.unusedRangeSizeMin = UINT64_MAX;
11949 }
11950 
11951 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11952 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11953 {
11954  inoutInfo.blockCount += srcInfo.blockCount;
11955  inoutInfo.allocationCount += srcInfo.allocationCount;
11956  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11957  inoutInfo.usedBytes += srcInfo.usedBytes;
11958  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11959  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11960  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11961  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11962  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11963 }
11964 
11965 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11966 {
11967  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11968  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11969  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11970  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11971 }
11972 
11973 VmaPool_T::VmaPool_T(
11974  VmaAllocator hAllocator,
11975  const VmaPoolCreateInfo& createInfo,
11976  VkDeviceSize preferredBlockSize) :
11977  m_BlockVector(
11978  hAllocator,
11979  this, // hParentPool
11980  createInfo.memoryTypeIndex,
11981  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11982  createInfo.minBlockCount,
11983  createInfo.maxBlockCount,
11984  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11985  createInfo.frameInUseCount,
11986  createInfo.blockSize != 0, // explicitBlockSize
11987  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11988  m_Id(0),
11989  m_Name(VMA_NULL)
11990 {
11991 }
11992 
11993 VmaPool_T::~VmaPool_T()
11994 {
11995 }
11996 
11997 void VmaPool_T::SetName(const char* pName)
11998 {
11999  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12000  VmaFreeString(allocs, m_Name);
12001 
12002  if(pName != VMA_NULL)
12003  {
12004  m_Name = VmaCreateStringCopy(allocs, pName);
12005  }
12006  else
12007  {
12008  m_Name = VMA_NULL;
12009  }
12010 }
12011 
12012 #if VMA_STATS_STRING_ENABLED
12013 
12014 #endif // #if VMA_STATS_STRING_ENABLED
12015 
12016 VmaBlockVector::VmaBlockVector(
12017  VmaAllocator hAllocator,
12018  VmaPool hParentPool,
12019  uint32_t memoryTypeIndex,
12020  VkDeviceSize preferredBlockSize,
12021  size_t minBlockCount,
12022  size_t maxBlockCount,
12023  VkDeviceSize bufferImageGranularity,
12024  uint32_t frameInUseCount,
12025  bool explicitBlockSize,
12026  uint32_t algorithm) :
12027  m_hAllocator(hAllocator),
12028  m_hParentPool(hParentPool),
12029  m_MemoryTypeIndex(memoryTypeIndex),
12030  m_PreferredBlockSize(preferredBlockSize),
12031  m_MinBlockCount(minBlockCount),
12032  m_MaxBlockCount(maxBlockCount),
12033  m_BufferImageGranularity(bufferImageGranularity),
12034  m_FrameInUseCount(frameInUseCount),
12035  m_ExplicitBlockSize(explicitBlockSize),
12036  m_Algorithm(algorithm),
12037  m_HasEmptyBlock(false),
12038  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12039  m_NextBlockId(0)
12040 {
12041 }
12042 
12043 VmaBlockVector::~VmaBlockVector()
12044 {
12045  for(size_t i = m_Blocks.size(); i--; )
12046  {
12047  m_Blocks[i]->Destroy(m_hAllocator);
12048  vma_delete(m_hAllocator, m_Blocks[i]);
12049  }
12050 }
12051 
12052 VkResult VmaBlockVector::CreateMinBlocks()
12053 {
12054  for(size_t i = 0; i < m_MinBlockCount; ++i)
12055  {
12056  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12057  if(res != VK_SUCCESS)
12058  {
12059  return res;
12060  }
12061  }
12062  return VK_SUCCESS;
12063 }
12064 
12065 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12066 {
12067  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12068 
12069  const size_t blockCount = m_Blocks.size();
12070 
12071  pStats->size = 0;
12072  pStats->unusedSize = 0;
12073  pStats->allocationCount = 0;
12074  pStats->unusedRangeCount = 0;
12075  pStats->unusedRangeSizeMax = 0;
12076  pStats->blockCount = blockCount;
12077 
12078  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12079  {
12080  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12081  VMA_ASSERT(pBlock);
12082  VMA_HEAVY_ASSERT(pBlock->Validate());
12083  pBlock->m_pMetadata->AddPoolStats(*pStats);
12084  }
12085 }
12086 
12087 bool VmaBlockVector::IsEmpty()
12088 {
12089  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12090  return m_Blocks.empty();
12091 }
12092 
12093 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12094 {
12095  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12096  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12097  (VMA_DEBUG_MARGIN > 0) &&
12098  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12099  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12100 }
12101 
12102 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12103 
12104 VkResult VmaBlockVector::Allocate(
12105  uint32_t currentFrameIndex,
12106  VkDeviceSize size,
12107  VkDeviceSize alignment,
12108  const VmaAllocationCreateInfo& createInfo,
12109  VmaSuballocationType suballocType,
12110  size_t allocationCount,
12111  VmaAllocation* pAllocations)
12112 {
12113  size_t allocIndex;
12114  VkResult res = VK_SUCCESS;
12115 
12116  if(IsCorruptionDetectionEnabled())
12117  {
12118  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12119  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12120  }
12121 
12122  {
12123  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12124  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12125  {
12126  res = AllocatePage(
12127  currentFrameIndex,
12128  size,
12129  alignment,
12130  createInfo,
12131  suballocType,
12132  pAllocations + allocIndex);
12133  if(res != VK_SUCCESS)
12134  {
12135  break;
12136  }
12137  }
12138  }
12139 
12140  if(res != VK_SUCCESS)
12141  {
12142  // Free all already created allocations.
12143  while(allocIndex--)
12144  {
12145  Free(pAllocations[allocIndex]);
12146  }
12147  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12148  }
12149 
12150  return res;
12151 }
12152 
12153 VkResult VmaBlockVector::AllocatePage(
12154  uint32_t currentFrameIndex,
12155  VkDeviceSize size,
12156  VkDeviceSize alignment,
12157  const VmaAllocationCreateInfo& createInfo,
12158  VmaSuballocationType suballocType,
12159  VmaAllocation* pAllocation)
12160 {
12161  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12162  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12163  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12164  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12165 
12166  VkDeviceSize freeMemory;
12167  {
12168  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12169  VmaBudget heapBudget = {};
12170  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12171  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12172  }
12173 
12174  const bool canFallbackToDedicated = !IsCustomPool();
12175  const bool canCreateNewBlock =
12176  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12177  (m_Blocks.size() < m_MaxBlockCount) &&
12178  (freeMemory >= size || !canFallbackToDedicated);
12179  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12180 
12181  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12182  // Which in turn is available only when maxBlockCount = 1.
12183  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12184  {
12185  canMakeOtherLost = false;
12186  }
12187 
12188  // Upper address can only be used with linear allocator and within single memory block.
12189  if(isUpperAddress &&
12190  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12191  {
12192  return VK_ERROR_FEATURE_NOT_PRESENT;
12193  }
12194 
12195  // Validate strategy.
12196  switch(strategy)
12197  {
12198  case 0:
12200  break;
12204  break;
12205  default:
12206  return VK_ERROR_FEATURE_NOT_PRESENT;
12207  }
12208 
12209  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12210  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12211  {
12212  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12213  }
12214 
12215  /*
12216  Under certain condition, this whole section can be skipped for optimization, so
12217  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12218  e.g. for custom pools with linear algorithm.
12219  */
12220  if(!canMakeOtherLost || canCreateNewBlock)
12221  {
12222  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12223  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12225 
12226  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12227  {
12228  // Use only last block.
12229  if(!m_Blocks.empty())
12230  {
12231  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12232  VMA_ASSERT(pCurrBlock);
12233  VkResult res = AllocateFromBlock(
12234  pCurrBlock,
12235  currentFrameIndex,
12236  size,
12237  alignment,
12238  allocFlagsCopy,
12239  createInfo.pUserData,
12240  suballocType,
12241  strategy,
12242  pAllocation);
12243  if(res == VK_SUCCESS)
12244  {
12245  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12246  return VK_SUCCESS;
12247  }
12248  }
12249  }
12250  else
12251  {
12253  {
12254  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12255  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12256  {
12257  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12258  VMA_ASSERT(pCurrBlock);
12259  VkResult res = AllocateFromBlock(
12260  pCurrBlock,
12261  currentFrameIndex,
12262  size,
12263  alignment,
12264  allocFlagsCopy,
12265  createInfo.pUserData,
12266  suballocType,
12267  strategy,
12268  pAllocation);
12269  if(res == VK_SUCCESS)
12270  {
12271  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12272  return VK_SUCCESS;
12273  }
12274  }
12275  }
12276  else // WORST_FIT, FIRST_FIT
12277  {
12278  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12279  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12280  {
12281  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12282  VMA_ASSERT(pCurrBlock);
12283  VkResult res = AllocateFromBlock(
12284  pCurrBlock,
12285  currentFrameIndex,
12286  size,
12287  alignment,
12288  allocFlagsCopy,
12289  createInfo.pUserData,
12290  suballocType,
12291  strategy,
12292  pAllocation);
12293  if(res == VK_SUCCESS)
12294  {
12295  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12296  return VK_SUCCESS;
12297  }
12298  }
12299  }
12300  }
12301 
12302  // 2. Try to create new block.
12303  if(canCreateNewBlock)
12304  {
12305  // Calculate optimal size for new block.
12306  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12307  uint32_t newBlockSizeShift = 0;
12308  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12309 
12310  if(!m_ExplicitBlockSize)
12311  {
12312  // Allocate 1/8, 1/4, 1/2 as first blocks.
12313  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12314  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12315  {
12316  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12317  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12318  {
12319  newBlockSize = smallerNewBlockSize;
12320  ++newBlockSizeShift;
12321  }
12322  else
12323  {
12324  break;
12325  }
12326  }
12327  }
12328 
12329  size_t newBlockIndex = 0;
12330  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12331  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12332  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12333  if(!m_ExplicitBlockSize)
12334  {
12335  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12336  {
12337  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12338  if(smallerNewBlockSize >= size)
12339  {
12340  newBlockSize = smallerNewBlockSize;
12341  ++newBlockSizeShift;
12342  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12343  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12344  }
12345  else
12346  {
12347  break;
12348  }
12349  }
12350  }
12351 
12352  if(res == VK_SUCCESS)
12353  {
12354  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12355  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12356 
12357  res = AllocateFromBlock(
12358  pBlock,
12359  currentFrameIndex,
12360  size,
12361  alignment,
12362  allocFlagsCopy,
12363  createInfo.pUserData,
12364  suballocType,
12365  strategy,
12366  pAllocation);
12367  if(res == VK_SUCCESS)
12368  {
12369  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12370  return VK_SUCCESS;
12371  }
12372  else
12373  {
12374  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12375  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12376  }
12377  }
12378  }
12379  }
12380 
12381  // 3. Try to allocate from existing blocks with making other allocations lost.
12382  if(canMakeOtherLost)
12383  {
12384  uint32_t tryIndex = 0;
12385  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12386  {
12387  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12388  VmaAllocationRequest bestRequest = {};
12389  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12390 
12391  // 1. Search existing allocations.
12393  {
12394  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12395  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12396  {
12397  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12398  VMA_ASSERT(pCurrBlock);
12399  VmaAllocationRequest currRequest = {};
12400  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12401  currentFrameIndex,
12402  m_FrameInUseCount,
12403  m_BufferImageGranularity,
12404  size,
12405  alignment,
12406  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12407  suballocType,
12408  canMakeOtherLost,
12409  strategy,
12410  &currRequest))
12411  {
12412  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12413  if(pBestRequestBlock == VMA_NULL ||
12414  currRequestCost < bestRequestCost)
12415  {
12416  pBestRequestBlock = pCurrBlock;
12417  bestRequest = currRequest;
12418  bestRequestCost = currRequestCost;
12419 
12420  if(bestRequestCost == 0)
12421  {
12422  break;
12423  }
12424  }
12425  }
12426  }
12427  }
12428  else // WORST_FIT, FIRST_FIT
12429  {
12430  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12431  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12432  {
12433  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12434  VMA_ASSERT(pCurrBlock);
12435  VmaAllocationRequest currRequest = {};
12436  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12437  currentFrameIndex,
12438  m_FrameInUseCount,
12439  m_BufferImageGranularity,
12440  size,
12441  alignment,
12442  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12443  suballocType,
12444  canMakeOtherLost,
12445  strategy,
12446  &currRequest))
12447  {
12448  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12449  if(pBestRequestBlock == VMA_NULL ||
12450  currRequestCost < bestRequestCost ||
12452  {
12453  pBestRequestBlock = pCurrBlock;
12454  bestRequest = currRequest;
12455  bestRequestCost = currRequestCost;
12456 
12457  if(bestRequestCost == 0 ||
12459  {
12460  break;
12461  }
12462  }
12463  }
12464  }
12465  }
12466 
12467  if(pBestRequestBlock != VMA_NULL)
12468  {
12469  if(mapped)
12470  {
12471  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12472  if(res != VK_SUCCESS)
12473  {
12474  return res;
12475  }
12476  }
12477 
12478  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12479  currentFrameIndex,
12480  m_FrameInUseCount,
12481  &bestRequest))
12482  {
12483  // Allocate from this pBlock.
12484  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12485  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12486  UpdateHasEmptyBlock();
12487  (*pAllocation)->InitBlockAllocation(
12488  pBestRequestBlock,
12489  bestRequest.offset,
12490  alignment,
12491  size,
12492  m_MemoryTypeIndex,
12493  suballocType,
12494  mapped,
12495  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12496  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12497  VMA_DEBUG_LOG(" Returned from existing block");
12498  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12499  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12500  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12501  {
12502  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12503  }
12504  if(IsCorruptionDetectionEnabled())
12505  {
12506  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12507  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12508  }
12509  return VK_SUCCESS;
12510  }
12511  // else: Some allocations must have been touched while we are here. Next try.
12512  }
12513  else
12514  {
12515  // Could not find place in any of the blocks - break outer loop.
12516  break;
12517  }
12518  }
12519  /* Maximum number of tries exceeded - a very unlike event when many other
12520  threads are simultaneously touching allocations making it impossible to make
12521  lost at the same time as we try to allocate. */
12522  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12523  {
12524  return VK_ERROR_TOO_MANY_OBJECTS;
12525  }
12526  }
12527 
12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12529 }
12530 
12531 void VmaBlockVector::Free(
12532  const VmaAllocation hAllocation)
12533 {
12534  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12535 
12536  bool budgetExceeded = false;
12537  {
12538  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12539  VmaBudget heapBudget = {};
12540  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12541  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12542  }
12543 
12544  // Scope for lock.
12545  {
12546  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12547 
12548  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12549 
12550  if(IsCorruptionDetectionEnabled())
12551  {
12552  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12553  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12554  }
12555 
12556  if(hAllocation->IsPersistentMap())
12557  {
12558  pBlock->Unmap(m_hAllocator, 1);
12559  }
12560 
12561  pBlock->m_pMetadata->Free(hAllocation);
12562  VMA_HEAVY_ASSERT(pBlock->Validate());
12563 
12564  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12565 
12566  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12567  // pBlock became empty after this deallocation.
12568  if(pBlock->m_pMetadata->IsEmpty())
12569  {
12570  // Already has empty block. We don't want to have two, so delete this one.
12571  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12572  {
12573  pBlockToDelete = pBlock;
12574  Remove(pBlock);
12575  }
12576  // else: We now have an empty block - leave it.
12577  }
12578  // pBlock didn't become empty, but we have another empty block - find and free that one.
12579  // (This is optional, heuristics.)
12580  else if(m_HasEmptyBlock && canDeleteBlock)
12581  {
12582  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12583  if(pLastBlock->m_pMetadata->IsEmpty())
12584  {
12585  pBlockToDelete = pLastBlock;
12586  m_Blocks.pop_back();
12587  }
12588  }
12589 
12590  UpdateHasEmptyBlock();
12591  IncrementallySortBlocks();
12592  }
12593 
12594  // Destruction of a free block. Deferred until this point, outside of mutex
12595  // lock, for performance reason.
12596  if(pBlockToDelete != VMA_NULL)
12597  {
12598  VMA_DEBUG_LOG(" Deleted empty block");
12599  pBlockToDelete->Destroy(m_hAllocator);
12600  vma_delete(m_hAllocator, pBlockToDelete);
12601  }
12602 }
12603 
12604 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12605 {
12606  VkDeviceSize result = 0;
12607  for(size_t i = m_Blocks.size(); i--; )
12608  {
12609  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12610  if(result >= m_PreferredBlockSize)
12611  {
12612  break;
12613  }
12614  }
12615  return result;
12616 }
12617 
12618 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12619 {
12620  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12621  {
12622  if(m_Blocks[blockIndex] == pBlock)
12623  {
12624  VmaVectorRemove(m_Blocks, blockIndex);
12625  return;
12626  }
12627  }
12628  VMA_ASSERT(0);
12629 }
12630 
12631 void VmaBlockVector::IncrementallySortBlocks()
12632 {
12633  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12634  {
12635  // Bubble sort only until first swap.
12636  for(size_t i = 1; i < m_Blocks.size(); ++i)
12637  {
12638  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12639  {
12640  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12641  return;
12642  }
12643  }
12644  }
12645 }
12646 
12647 VkResult VmaBlockVector::AllocateFromBlock(
12648  VmaDeviceMemoryBlock* pBlock,
12649  uint32_t currentFrameIndex,
12650  VkDeviceSize size,
12651  VkDeviceSize alignment,
12652  VmaAllocationCreateFlags allocFlags,
12653  void* pUserData,
12654  VmaSuballocationType suballocType,
12655  uint32_t strategy,
12656  VmaAllocation* pAllocation)
12657 {
12658  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12659  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12660  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12661  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12662 
12663  VmaAllocationRequest currRequest = {};
12664  if(pBlock->m_pMetadata->CreateAllocationRequest(
12665  currentFrameIndex,
12666  m_FrameInUseCount,
12667  m_BufferImageGranularity,
12668  size,
12669  alignment,
12670  isUpperAddress,
12671  suballocType,
12672  false, // canMakeOtherLost
12673  strategy,
12674  &currRequest))
12675  {
12676  // Allocate from pCurrBlock.
12677  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12678 
12679  if(mapped)
12680  {
12681  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12682  if(res != VK_SUCCESS)
12683  {
12684  return res;
12685  }
12686  }
12687 
12688  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12689  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12690  UpdateHasEmptyBlock();
12691  (*pAllocation)->InitBlockAllocation(
12692  pBlock,
12693  currRequest.offset,
12694  alignment,
12695  size,
12696  m_MemoryTypeIndex,
12697  suballocType,
12698  mapped,
12699  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12700  VMA_HEAVY_ASSERT(pBlock->Validate());
12701  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12702  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12703  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12704  {
12705  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12706  }
12707  if(IsCorruptionDetectionEnabled())
12708  {
12709  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12710  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12711  }
12712  return VK_SUCCESS;
12713  }
12714  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12715 }
12716 
12717 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12718 {
12719  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12720  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12721  allocInfo.allocationSize = blockSize;
12722 
12723 #if VMA_BUFFER_DEVICE_ADDRESS
12724  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12725  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12726  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
12727  {
12728  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12729  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12730  }
12731 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
12732 
12733  VkDeviceMemory mem = VK_NULL_HANDLE;
12734  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12735  if(res < 0)
12736  {
12737  return res;
12738  }
12739 
12740  // New VkDeviceMemory successfully created.
12741 
12742  // Create new Allocation for it.
12743  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12744  pBlock->Init(
12745  m_hAllocator,
12746  m_hParentPool,
12747  m_MemoryTypeIndex,
12748  mem,
12749  allocInfo.allocationSize,
12750  m_NextBlockId++,
12751  m_Algorithm);
12752 
12753  m_Blocks.push_back(pBlock);
12754  if(pNewBlockIndex != VMA_NULL)
12755  {
12756  *pNewBlockIndex = m_Blocks.size() - 1;
12757  }
12758 
12759  return VK_SUCCESS;
12760 }
12761 
12762 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12763  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12764  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12765 {
12766  const size_t blockCount = m_Blocks.size();
12767  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12768 
12769  enum BLOCK_FLAG
12770  {
12771  BLOCK_FLAG_USED = 0x00000001,
12772  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12773  };
12774 
12775  struct BlockInfo
12776  {
12777  uint32_t flags;
12778  void* pMappedData;
12779  };
12780  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12781  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12782  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12783 
12784  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12785  const size_t moveCount = moves.size();
12786  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12787  {
12788  const VmaDefragmentationMove& move = moves[moveIndex];
12789  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12790  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12791  }
12792 
12793  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12794 
12795  // Go over all blocks. Get mapped pointer or map if necessary.
12796  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12797  {
12798  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12799  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12800  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12801  {
12802  currBlockInfo.pMappedData = pBlock->GetMappedData();
12803  // It is not originally mapped - map it.
12804  if(currBlockInfo.pMappedData == VMA_NULL)
12805  {
12806  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12807  if(pDefragCtx->res == VK_SUCCESS)
12808  {
12809  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12810  }
12811  }
12812  }
12813  }
12814 
12815  // Go over all moves. Do actual data transfer.
12816  if(pDefragCtx->res == VK_SUCCESS)
12817  {
12818  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12819  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12820 
12821  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12822  {
12823  const VmaDefragmentationMove& move = moves[moveIndex];
12824 
12825  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12826  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12827 
12828  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12829 
12830  // Invalidate source.
12831  if(isNonCoherent)
12832  {
12833  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12834  memRange.memory = pSrcBlock->GetDeviceMemory();
12835  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12836  memRange.size = VMA_MIN(
12837  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12838  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12839  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12840  }
12841 
12842  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12843  memmove(
12844  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12845  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12846  static_cast<size_t>(move.size));
12847 
12848  if(IsCorruptionDetectionEnabled())
12849  {
12850  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12851  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12852  }
12853 
12854  // Flush destination.
12855  if(isNonCoherent)
12856  {
12857  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12858  memRange.memory = pDstBlock->GetDeviceMemory();
12859  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12860  memRange.size = VMA_MIN(
12861  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12862  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12863  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12864  }
12865  }
12866  }
12867 
12868  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12869  // Regardless of pCtx->res == VK_SUCCESS.
12870  for(size_t blockIndex = blockCount; blockIndex--; )
12871  {
12872  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12873  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12874  {
12875  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12876  pBlock->Unmap(m_hAllocator, 1);
12877  }
12878  }
12879 }
12880 
12881 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12882  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12883  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12884  VkCommandBuffer commandBuffer)
12885 {
12886  const size_t blockCount = m_Blocks.size();
12887 
12888  pDefragCtx->blockContexts.resize(blockCount);
12889  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12890 
12891  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12892  const size_t moveCount = moves.size();
12893  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12894  {
12895  const VmaDefragmentationMove& move = moves[moveIndex];
12896 
12897  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
12898  {
12899  // Old school move still require us to map the whole block
12900  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12901  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12902  }
12903  }
12904 
12905  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12906 
12907  // Go over all blocks. Create and bind buffer for whole block if necessary.
12908  {
12909  VkBufferCreateInfo bufCreateInfo;
12910  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12911 
12912  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12913  {
12914  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12915  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12916  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12917  {
12918  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12919  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12920  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12921  if(pDefragCtx->res == VK_SUCCESS)
12922  {
12923  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12924  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12925  }
12926  }
12927  }
12928  }
12929 
12930  // Go over all moves. Post data transfer commands to command buffer.
12931  if(pDefragCtx->res == VK_SUCCESS)
12932  {
12933  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12934  {
12935  const VmaDefragmentationMove& move = moves[moveIndex];
12936 
12937  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12938  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12939 
12940  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12941 
12942  VkBufferCopy region = {
12943  move.srcOffset,
12944  move.dstOffset,
12945  move.size };
12946  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12947  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12948  }
12949  }
12950 
12951  // Save buffers to defrag context for later destruction.
12952  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12953  {
12954  pDefragCtx->res = VK_NOT_READY;
12955  }
12956 }
12957 
12958 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12959 {
12960  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12961  {
12962  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12963  if(pBlock->m_pMetadata->IsEmpty())
12964  {
12965  if(m_Blocks.size() > m_MinBlockCount)
12966  {
12967  if(pDefragmentationStats != VMA_NULL)
12968  {
12969  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12970  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12971  }
12972 
12973  VmaVectorRemove(m_Blocks, blockIndex);
12974  pBlock->Destroy(m_hAllocator);
12975  vma_delete(m_hAllocator, pBlock);
12976  }
12977  else
12978  {
12979  break;
12980  }
12981  }
12982  }
12983  UpdateHasEmptyBlock();
12984 }
12985 
12986 void VmaBlockVector::UpdateHasEmptyBlock()
12987 {
12988  m_HasEmptyBlock = false;
12989  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12990  {
12991  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12992  if(pBlock->m_pMetadata->IsEmpty())
12993  {
12994  m_HasEmptyBlock = true;
12995  break;
12996  }
12997  }
12998 }
12999 
13000 #if VMA_STATS_STRING_ENABLED
13001 
13002 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13003 {
13004  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13005 
13006  json.BeginObject();
13007 
13008  if(IsCustomPool())
13009  {
13010  const char* poolName = m_hParentPool->GetName();
13011  if(poolName != VMA_NULL && poolName[0] != '\0')
13012  {
13013  json.WriteString("Name");
13014  json.WriteString(poolName);
13015  }
13016 
13017  json.WriteString("MemoryTypeIndex");
13018  json.WriteNumber(m_MemoryTypeIndex);
13019 
13020  json.WriteString("BlockSize");
13021  json.WriteNumber(m_PreferredBlockSize);
13022 
13023  json.WriteString("BlockCount");
13024  json.BeginObject(true);
13025  if(m_MinBlockCount > 0)
13026  {
13027  json.WriteString("Min");
13028  json.WriteNumber((uint64_t)m_MinBlockCount);
13029  }
13030  if(m_MaxBlockCount < SIZE_MAX)
13031  {
13032  json.WriteString("Max");
13033  json.WriteNumber((uint64_t)m_MaxBlockCount);
13034  }
13035  json.WriteString("Cur");
13036  json.WriteNumber((uint64_t)m_Blocks.size());
13037  json.EndObject();
13038 
13039  if(m_FrameInUseCount > 0)
13040  {
13041  json.WriteString("FrameInUseCount");
13042  json.WriteNumber(m_FrameInUseCount);
13043  }
13044 
13045  if(m_Algorithm != 0)
13046  {
13047  json.WriteString("Algorithm");
13048  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13049  }
13050  }
13051  else
13052  {
13053  json.WriteString("PreferredBlockSize");
13054  json.WriteNumber(m_PreferredBlockSize);
13055  }
13056 
13057  json.WriteString("Blocks");
13058  json.BeginObject();
13059  for(size_t i = 0; i < m_Blocks.size(); ++i)
13060  {
13061  json.BeginString();
13062  json.ContinueString(m_Blocks[i]->GetId());
13063  json.EndString();
13064 
13065  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13066  }
13067  json.EndObject();
13068 
13069  json.EndObject();
13070 }
13071 
13072 #endif // #if VMA_STATS_STRING_ENABLED
13073 
13074 void VmaBlockVector::Defragment(
13075  class VmaBlockVectorDefragmentationContext* pCtx,
13077  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13078  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13079  VkCommandBuffer commandBuffer)
13080 {
13081  pCtx->res = VK_SUCCESS;
13082 
13083  const VkMemoryPropertyFlags memPropFlags =
13084  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13085  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13086 
13087  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13088  isHostVisible;
13089  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13090  !IsCorruptionDetectionEnabled() &&
13091  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13092 
13093  // There are options to defragment this memory type.
13094  if(canDefragmentOnCpu || canDefragmentOnGpu)
13095  {
13096  bool defragmentOnGpu;
13097  // There is only one option to defragment this memory type.
13098  if(canDefragmentOnGpu != canDefragmentOnCpu)
13099  {
13100  defragmentOnGpu = canDefragmentOnGpu;
13101  }
13102  // Both options are available: Heuristics to choose the best one.
13103  else
13104  {
13105  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13106  m_hAllocator->IsIntegratedGpu();
13107  }
13108 
13109  bool overlappingMoveSupported = !defragmentOnGpu;
13110 
13111  if(m_hAllocator->m_UseMutex)
13112  {
13114  {
13115  if(!m_Mutex.TryLockWrite())
13116  {
13117  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13118  return;
13119  }
13120  }
13121  else
13122  {
13123  m_Mutex.LockWrite();
13124  pCtx->mutexLocked = true;
13125  }
13126  }
13127 
13128  pCtx->Begin(overlappingMoveSupported, flags);
13129 
13130  // Defragment.
13131 
13132  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13133  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13134  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13135 
13136  // Accumulate statistics.
13137  if(pStats != VMA_NULL)
13138  {
13139  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13140  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13141  pStats->bytesMoved += bytesMoved;
13142  pStats->allocationsMoved += allocationsMoved;
13143  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13144  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13145  if(defragmentOnGpu)
13146  {
13147  maxGpuBytesToMove -= bytesMoved;
13148  maxGpuAllocationsToMove -= allocationsMoved;
13149  }
13150  else
13151  {
13152  maxCpuBytesToMove -= bytesMoved;
13153  maxCpuAllocationsToMove -= allocationsMoved;
13154  }
13155  }
13156 
13158  {
13159  if(m_hAllocator->m_UseMutex)
13160  m_Mutex.UnlockWrite();
13161 
13162  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13163  pCtx->res = VK_NOT_READY;
13164 
13165  return;
13166  }
13167 
13168  if(pCtx->res >= VK_SUCCESS)
13169  {
13170  if(defragmentOnGpu)
13171  {
13172  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13173  }
13174  else
13175  {
13176  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13177  }
13178  }
13179  }
13180 }
13181 
13182 void VmaBlockVector::DefragmentationEnd(
13183  class VmaBlockVectorDefragmentationContext* pCtx,
13184  VmaDefragmentationStats* pStats)
13185 {
13186  // Destroy buffers.
13187  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
13188  {
13189  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
13190  if(blockCtx.hBuffer)
13191  {
13192  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
13193  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13194  }
13195  }
13196 
13197  if(pCtx->res >= VK_SUCCESS)
13198  {
13199  FreeEmptyBlocks(pStats);
13200  }
13201 
13202  if(pCtx->mutexLocked)
13203  {
13204  VMA_ASSERT(m_hAllocator->m_UseMutex);
13205  m_Mutex.UnlockWrite();
13206  }
13207 }
13208 
13209 uint32_t VmaBlockVector::ProcessDefragmentations(
13210  class VmaBlockVectorDefragmentationContext *pCtx,
13211  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13212 {
13213  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13214 
13215  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13216 
13217  for(uint32_t i = 0; i < moveCount; ++ i)
13218  {
13219  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13220 
13221  pMove->allocation = move.hAllocation;
13222  pMove->memory = move.pDstBlock->GetDeviceMemory();
13223  pMove->offset = move.dstOffset;
13224 
13225  ++ pMove;
13226  }
13227 
13228  pCtx->defragmentationMovesProcessed += moveCount;
13229 
13230  return moveCount;
13231 }
13232 
13233 void VmaBlockVector::CommitDefragmentations(
13234  class VmaBlockVectorDefragmentationContext *pCtx,
13235  VmaDefragmentationStats* pStats)
13236 {
13237  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13238 
13239  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13240  {
13241  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13242 
13243  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13244  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13245  }
13246 
13247  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13248  FreeEmptyBlocks(pStats);
13249 }
13250 
13251 size_t VmaBlockVector::CalcAllocationCount() const
13252 {
13253  size_t result = 0;
13254  for(size_t i = 0; i < m_Blocks.size(); ++i)
13255  {
13256  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13257  }
13258  return result;
13259 }
13260 
13261 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13262 {
13263  if(m_BufferImageGranularity == 1)
13264  {
13265  return false;
13266  }
13267  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13268  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13269  {
13270  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13271  VMA_ASSERT(m_Algorithm == 0);
13272  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13273  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13274  {
13275  return true;
13276  }
13277  }
13278  return false;
13279 }
13280 
13281 void VmaBlockVector::MakePoolAllocationsLost(
13282  uint32_t currentFrameIndex,
13283  size_t* pLostAllocationCount)
13284 {
13285  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13286  size_t lostAllocationCount = 0;
13287  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13288  {
13289  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13290  VMA_ASSERT(pBlock);
13291  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13292  }
13293  if(pLostAllocationCount != VMA_NULL)
13294  {
13295  *pLostAllocationCount = lostAllocationCount;
13296  }
13297 }
13298 
13299 VkResult VmaBlockVector::CheckCorruption()
13300 {
13301  if(!IsCorruptionDetectionEnabled())
13302  {
13303  return VK_ERROR_FEATURE_NOT_PRESENT;
13304  }
13305 
13306  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13307  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13308  {
13309  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13310  VMA_ASSERT(pBlock);
13311  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13312  if(res != VK_SUCCESS)
13313  {
13314  return res;
13315  }
13316  }
13317  return VK_SUCCESS;
13318 }
13319 
13320 void VmaBlockVector::AddStats(VmaStats* pStats)
13321 {
13322  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13323  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13324 
13325  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13326 
13327  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13328  {
13329  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13330  VMA_ASSERT(pBlock);
13331  VMA_HEAVY_ASSERT(pBlock->Validate());
13332  VmaStatInfo allocationStatInfo;
13333  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13334  VmaAddStatInfo(pStats->total, allocationStatInfo);
13335  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13336  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13337  }
13338 }
13339 
13341 // VmaDefragmentationAlgorithm_Generic members definition
13342 
13343 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13344  VmaAllocator hAllocator,
13345  VmaBlockVector* pBlockVector,
13346  uint32_t currentFrameIndex,
13347  bool overlappingMoveSupported) :
13348  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13349  m_AllocationCount(0),
13350  m_AllAllocations(false),
13351  m_BytesMoved(0),
13352  m_AllocationsMoved(0),
13353  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13354 {
13355  // Create block info for each block.
13356  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13357  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13358  {
13359  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13360  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13361  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13362  m_Blocks.push_back(pBlockInfo);
13363  }
13364 
13365  // Sort them by m_pBlock pointer value.
13366  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13367 }
13368 
13369 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13370 {
13371  for(size_t i = m_Blocks.size(); i--; )
13372  {
13373  vma_delete(m_hAllocator, m_Blocks[i]);
13374  }
13375 }
13376 
13377 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13378 {
13379  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13380  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13381  {
13382  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13383  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13384  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13385  {
13386  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13387  (*it)->m_Allocations.push_back(allocInfo);
13388  }
13389  else
13390  {
13391  VMA_ASSERT(0);
13392  }
13393 
13394  ++m_AllocationCount;
13395  }
13396 }
13397 
13398 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13399  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13400  VkDeviceSize maxBytesToMove,
13401  uint32_t maxAllocationsToMove,
13402  bool freeOldAllocations)
13403 {
13404  if(m_Blocks.empty())
13405  {
13406  return VK_SUCCESS;
13407  }
13408 
13409  // This is a choice based on research.
13410  // Option 1:
13411  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13412  // Option 2:
13413  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13414  // Option 3:
13415  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13416 
13417  size_t srcBlockMinIndex = 0;
13418  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13419  /*
13420  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13421  {
13422  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13423  if(blocksWithNonMovableCount > 0)
13424  {
13425  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13426  }
13427  }
13428  */
13429 
13430  size_t srcBlockIndex = m_Blocks.size() - 1;
13431  size_t srcAllocIndex = SIZE_MAX;
13432  for(;;)
13433  {
13434  // 1. Find next allocation to move.
13435  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13436  // 1.2. Then start from last to first m_Allocations.
13437  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13438  {
13439  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13440  {
13441  // Finished: no more allocations to process.
13442  if(srcBlockIndex == srcBlockMinIndex)
13443  {
13444  return VK_SUCCESS;
13445  }
13446  else
13447  {
13448  --srcBlockIndex;
13449  srcAllocIndex = SIZE_MAX;
13450  }
13451  }
13452  else
13453  {
13454  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13455  }
13456  }
13457 
13458  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13459  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13460 
13461  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13462  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13463  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13464  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13465 
13466  // 2. Try to find new place for this allocation in preceding or current block.
13467  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13468  {
13469  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13470  VmaAllocationRequest dstAllocRequest;
13471  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13472  m_CurrentFrameIndex,
13473  m_pBlockVector->GetFrameInUseCount(),
13474  m_pBlockVector->GetBufferImageGranularity(),
13475  size,
13476  alignment,
13477  false, // upperAddress
13478  suballocType,
13479  false, // canMakeOtherLost
13480  strategy,
13481  &dstAllocRequest) &&
13482  MoveMakesSense(
13483  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13484  {
13485  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13486 
13487  // Reached limit on number of allocations or bytes to move.
13488  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13489  (m_BytesMoved + size > maxBytesToMove))
13490  {
13491  return VK_SUCCESS;
13492  }
13493 
13494  VmaDefragmentationMove move = {};
13495  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13496  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13497  move.srcOffset = srcOffset;
13498  move.dstOffset = dstAllocRequest.offset;
13499  move.size = size;
13500  move.hAllocation = allocInfo.m_hAllocation;
13501  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13502  move.pDstBlock = pDstBlockInfo->m_pBlock;
13503 
13504  moves.push_back(move);
13505 
13506  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13507  dstAllocRequest,
13508  suballocType,
13509  size,
13510  allocInfo.m_hAllocation);
13511 
13512  if(freeOldAllocations)
13513  {
13514  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13515  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13516  }
13517 
13518  if(allocInfo.m_pChanged != VMA_NULL)
13519  {
13520  *allocInfo.m_pChanged = VK_TRUE;
13521  }
13522 
13523  ++m_AllocationsMoved;
13524  m_BytesMoved += size;
13525 
13526  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13527 
13528  break;
13529  }
13530  }
13531 
13532  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13533 
13534  if(srcAllocIndex > 0)
13535  {
13536  --srcAllocIndex;
13537  }
13538  else
13539  {
13540  if(srcBlockIndex > 0)
13541  {
13542  --srcBlockIndex;
13543  srcAllocIndex = SIZE_MAX;
13544  }
13545  else
13546  {
13547  return VK_SUCCESS;
13548  }
13549  }
13550  }
13551 }
13552 
13553 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13554 {
13555  size_t result = 0;
13556  for(size_t i = 0; i < m_Blocks.size(); ++i)
13557  {
13558  if(m_Blocks[i]->m_HasNonMovableAllocations)
13559  {
13560  ++result;
13561  }
13562  }
13563  return result;
13564 }
13565 
13566 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13567  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13568  VkDeviceSize maxBytesToMove,
13569  uint32_t maxAllocationsToMove,
13571 {
13572  if(!m_AllAllocations && m_AllocationCount == 0)
13573  {
13574  return VK_SUCCESS;
13575  }
13576 
13577  const size_t blockCount = m_Blocks.size();
13578  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13579  {
13580  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13581 
13582  if(m_AllAllocations)
13583  {
13584  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13585  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13586  it != pMetadata->m_Suballocations.end();
13587  ++it)
13588  {
13589  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13590  {
13591  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13592  pBlockInfo->m_Allocations.push_back(allocInfo);
13593  }
13594  }
13595  }
13596 
13597  pBlockInfo->CalcHasNonMovableAllocations();
13598 
13599  // This is a choice based on research.
13600  // Option 1:
13601  pBlockInfo->SortAllocationsByOffsetDescending();
13602  // Option 2:
13603  //pBlockInfo->SortAllocationsBySizeDescending();
13604  }
13605 
13606  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13607  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13608 
13609  // This is a choice based on research.
13610  const uint32_t roundCount = 2;
13611 
13612  // Execute defragmentation rounds (the main part).
13613  VkResult result = VK_SUCCESS;
13614  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13615  {
13616  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
13617  }
13618 
13619  return result;
13620 }
13621 
13622 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13623  size_t dstBlockIndex, VkDeviceSize dstOffset,
13624  size_t srcBlockIndex, VkDeviceSize srcOffset)
13625 {
13626  if(dstBlockIndex < srcBlockIndex)
13627  {
13628  return true;
13629  }
13630  if(dstBlockIndex > srcBlockIndex)
13631  {
13632  return false;
13633  }
13634  if(dstOffset < srcOffset)
13635  {
13636  return true;
13637  }
13638  return false;
13639 }
13640 
13642 // VmaDefragmentationAlgorithm_Fast
13643 
13644 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13645  VmaAllocator hAllocator,
13646  VmaBlockVector* pBlockVector,
13647  uint32_t currentFrameIndex,
13648  bool overlappingMoveSupported) :
13649  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13650  m_OverlappingMoveSupported(overlappingMoveSupported),
13651  m_AllocationCount(0),
13652  m_AllAllocations(false),
13653  m_BytesMoved(0),
13654  m_AllocationsMoved(0),
13655  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13656 {
13657  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13658 
13659 }
13660 
13661 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13662 {
13663 }
13664 
13665 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13666  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13667  VkDeviceSize maxBytesToMove,
13668  uint32_t maxAllocationsToMove,
13670 {
13671  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13672 
13673  const size_t blockCount = m_pBlockVector->GetBlockCount();
13674  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13675  {
13676  return VK_SUCCESS;
13677  }
13678 
13679  PreprocessMetadata();
13680 
13681  // Sort blocks in order from most destination.
13682 
13683  m_BlockInfos.resize(blockCount);
13684  for(size_t i = 0; i < blockCount; ++i)
13685  {
13686  m_BlockInfos[i].origBlockIndex = i;
13687  }
13688 
13689  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13690  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13691  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13692  });
13693 
13694  // THE MAIN ALGORITHM
13695 
13696  FreeSpaceDatabase freeSpaceDb;
13697 
13698  size_t dstBlockInfoIndex = 0;
13699  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13700  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13701  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13702  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13703  VkDeviceSize dstOffset = 0;
13704 
13705  bool end = false;
13706  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13707  {
13708  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13709  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13710  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13711  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13712  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13713  {
13714  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13715  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13716  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13717  if(m_AllocationsMoved == maxAllocationsToMove ||
13718  m_BytesMoved + srcAllocSize > maxBytesToMove)
13719  {
13720  end = true;
13721  break;
13722  }
13723  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13724 
13725  VmaDefragmentationMove move = {};
13726  // Try to place it in one of free spaces from the database.
13727  size_t freeSpaceInfoIndex;
13728  VkDeviceSize dstAllocOffset;
13729  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13730  freeSpaceInfoIndex, dstAllocOffset))
13731  {
13732  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13733  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13734  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13735 
13736  // Same block
13737  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13738  {
13739  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13740 
13741  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13742 
13743  VmaSuballocation suballoc = *srcSuballocIt;
13744  suballoc.offset = dstAllocOffset;
13745  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13746  m_BytesMoved += srcAllocSize;
13747  ++m_AllocationsMoved;
13748 
13749  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13750  ++nextSuballocIt;
13751  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13752  srcSuballocIt = nextSuballocIt;
13753 
13754  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13755 
13756  move.srcBlockIndex = srcOrigBlockIndex;
13757  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13758  move.srcOffset = srcAllocOffset;
13759  move.dstOffset = dstAllocOffset;
13760  move.size = srcAllocSize;
13761 
13762  moves.push_back(move);
13763  }
13764  // Different block
13765  else
13766  {
13767  // MOVE OPTION 2: Move the allocation to a different block.
13768 
13769  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13770 
13771  VmaSuballocation suballoc = *srcSuballocIt;
13772  suballoc.offset = dstAllocOffset;
13773  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13774  m_BytesMoved += srcAllocSize;
13775  ++m_AllocationsMoved;
13776 
13777  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13778  ++nextSuballocIt;
13779  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13780  srcSuballocIt = nextSuballocIt;
13781 
13782  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13783 
13784  move.srcBlockIndex = srcOrigBlockIndex;
13785  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13786  move.srcOffset = srcAllocOffset;
13787  move.dstOffset = dstAllocOffset;
13788  move.size = srcAllocSize;
13789 
13790  moves.push_back(move);
13791  }
13792  }
13793  else
13794  {
13795  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13796 
13797  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13798  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13799  dstAllocOffset + srcAllocSize > dstBlockSize)
13800  {
13801  // But before that, register remaining free space at the end of dst block.
13802  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13803 
13804  ++dstBlockInfoIndex;
13805  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13806  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13807  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13808  dstBlockSize = pDstMetadata->GetSize();
13809  dstOffset = 0;
13810  dstAllocOffset = 0;
13811  }
13812 
13813  // Same block
13814  if(dstBlockInfoIndex == srcBlockInfoIndex)
13815  {
13816  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13817 
13818  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13819 
13820  bool skipOver = overlap;
13821  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13822  {
13823  // If destination and source place overlap, skip if it would move it
13824  // by only < 1/64 of its size.
13825  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13826  }
13827 
13828  if(skipOver)
13829  {
13830  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13831 
13832  dstOffset = srcAllocOffset + srcAllocSize;
13833  ++srcSuballocIt;
13834  }
13835  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13836  else
13837  {
13838  srcSuballocIt->offset = dstAllocOffset;
13839  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13840  dstOffset = dstAllocOffset + srcAllocSize;
13841  m_BytesMoved += srcAllocSize;
13842  ++m_AllocationsMoved;
13843  ++srcSuballocIt;
13844 
13845  move.srcBlockIndex = srcOrigBlockIndex;
13846  move.dstBlockIndex = dstOrigBlockIndex;
13847  move.srcOffset = srcAllocOffset;
13848  move.dstOffset = dstAllocOffset;
13849  move.size = srcAllocSize;
13850 
13851  moves.push_back(move);
13852  }
13853  }
13854  // Different block
13855  else
13856  {
13857  // MOVE OPTION 2: Move the allocation to a different block.
13858 
13859  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13860  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13861 
13862  VmaSuballocation suballoc = *srcSuballocIt;
13863  suballoc.offset = dstAllocOffset;
13864  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13865  dstOffset = dstAllocOffset + srcAllocSize;
13866  m_BytesMoved += srcAllocSize;
13867  ++m_AllocationsMoved;
13868 
13869  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13870  ++nextSuballocIt;
13871  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13872  srcSuballocIt = nextSuballocIt;
13873 
13874  pDstMetadata->m_Suballocations.push_back(suballoc);
13875 
13876  move.srcBlockIndex = srcOrigBlockIndex;
13877  move.dstBlockIndex = dstOrigBlockIndex;
13878  move.srcOffset = srcAllocOffset;
13879  move.dstOffset = dstAllocOffset;
13880  move.size = srcAllocSize;
13881 
13882  moves.push_back(move);
13883  }
13884  }
13885  }
13886  }
13887 
13888  m_BlockInfos.clear();
13889 
13890  PostprocessMetadata();
13891 
13892  return VK_SUCCESS;
13893 }
13894 
13895 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13896 {
13897  const size_t blockCount = m_pBlockVector->GetBlockCount();
13898  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13899  {
13900  VmaBlockMetadata_Generic* const pMetadata =
13901  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13902  pMetadata->m_FreeCount = 0;
13903  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13904  pMetadata->m_FreeSuballocationsBySize.clear();
13905  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13906  it != pMetadata->m_Suballocations.end(); )
13907  {
13908  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13909  {
13910  VmaSuballocationList::iterator nextIt = it;
13911  ++nextIt;
13912  pMetadata->m_Suballocations.erase(it);
13913  it = nextIt;
13914  }
13915  else
13916  {
13917  ++it;
13918  }
13919  }
13920  }
13921 }
13922 
13923 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13924 {
13925  const size_t blockCount = m_pBlockVector->GetBlockCount();
13926  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13927  {
13928  VmaBlockMetadata_Generic* const pMetadata =
13929  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13930  const VkDeviceSize blockSize = pMetadata->GetSize();
13931 
13932  // No allocations in this block - entire area is free.
13933  if(pMetadata->m_Suballocations.empty())
13934  {
13935  pMetadata->m_FreeCount = 1;
13936  //pMetadata->m_SumFreeSize is already set to blockSize.
13937  VmaSuballocation suballoc = {
13938  0, // offset
13939  blockSize, // size
13940  VMA_NULL, // hAllocation
13941  VMA_SUBALLOCATION_TYPE_FREE };
13942  pMetadata->m_Suballocations.push_back(suballoc);
13943  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13944  }
13945  // There are some allocations in this block.
13946  else
13947  {
13948  VkDeviceSize offset = 0;
13949  VmaSuballocationList::iterator it;
13950  for(it = pMetadata->m_Suballocations.begin();
13951  it != pMetadata->m_Suballocations.end();
13952  ++it)
13953  {
13954  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13955  VMA_ASSERT(it->offset >= offset);
13956 
13957  // Need to insert preceding free space.
13958  if(it->offset > offset)
13959  {
13960  ++pMetadata->m_FreeCount;
13961  const VkDeviceSize freeSize = it->offset - offset;
13962  VmaSuballocation suballoc = {
13963  offset, // offset
13964  freeSize, // size
13965  VMA_NULL, // hAllocation
13966  VMA_SUBALLOCATION_TYPE_FREE };
13967  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13968  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13969  {
13970  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13971  }
13972  }
13973 
13974  pMetadata->m_SumFreeSize -= it->size;
13975  offset = it->offset + it->size;
13976  }
13977 
13978  // Need to insert trailing free space.
13979  if(offset < blockSize)
13980  {
13981  ++pMetadata->m_FreeCount;
13982  const VkDeviceSize freeSize = blockSize - offset;
13983  VmaSuballocation suballoc = {
13984  offset, // offset
13985  freeSize, // size
13986  VMA_NULL, // hAllocation
13987  VMA_SUBALLOCATION_TYPE_FREE };
13988  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13989  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13990  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13991  {
13992  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13993  }
13994  }
13995 
13996  VMA_SORT(
13997  pMetadata->m_FreeSuballocationsBySize.begin(),
13998  pMetadata->m_FreeSuballocationsBySize.end(),
13999  VmaSuballocationItemSizeLess());
14000  }
14001 
14002  VMA_HEAVY_ASSERT(pMetadata->Validate());
14003  }
14004 }
14005 
14006 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14007 {
14008  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14009  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14010  while(it != pMetadata->m_Suballocations.end())
14011  {
14012  if(it->offset < suballoc.offset)
14013  {
14014  ++it;
14015  }
14016  }
14017  pMetadata->m_Suballocations.insert(it, suballoc);
14018 }
14019 
14021 // VmaBlockVectorDefragmentationContext
14022 
14023 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14024  VmaAllocator hAllocator,
14025  VmaPool hCustomPool,
14026  VmaBlockVector* pBlockVector,
14027  uint32_t currFrameIndex) :
14028  res(VK_SUCCESS),
14029  mutexLocked(false),
14030  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14031  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14032  defragmentationMovesProcessed(0),
14033  defragmentationMovesCommitted(0),
14034  hasDefragmentationPlan(0),
14035  m_hAllocator(hAllocator),
14036  m_hCustomPool(hCustomPool),
14037  m_pBlockVector(pBlockVector),
14038  m_CurrFrameIndex(currFrameIndex),
14039  m_pAlgorithm(VMA_NULL),
14040  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14041  m_AllAllocations(false)
14042 {
14043 }
14044 
14045 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14046 {
14047  vma_delete(m_hAllocator, m_pAlgorithm);
14048 }
14049 
14050 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14051 {
14052  AllocInfo info = { hAlloc, pChanged };
14053  m_Allocations.push_back(info);
14054 }
14055 
14056 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14057 {
14058  const bool allAllocations = m_AllAllocations ||
14059  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14060 
14061  /********************************
14062  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14063  ********************************/
14064 
14065  /*
14066  Fast algorithm is supported only when certain criteria are met:
14067  - VMA_DEBUG_MARGIN is 0.
14068  - All allocations in this block vector are moveable.
14069  - There is no possibility of image/buffer granularity conflict.
14070  - The defragmentation is not incremental
14071  */
14072  if(VMA_DEBUG_MARGIN == 0 &&
14073  allAllocations &&
14074  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14076  {
14077  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14078  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14079  }
14080  else
14081  {
14082  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14083  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14084  }
14085 
14086  if(allAllocations)
14087  {
14088  m_pAlgorithm->AddAll();
14089  }
14090  else
14091  {
14092  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14093  {
14094  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14095  }
14096  }
14097 }
14098 
14100 // VmaDefragmentationContext
14101 
14102 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14103  VmaAllocator hAllocator,
14104  uint32_t currFrameIndex,
14105  uint32_t flags,
14106  VmaDefragmentationStats* pStats) :
14107  m_hAllocator(hAllocator),
14108  m_CurrFrameIndex(currFrameIndex),
14109  m_Flags(flags),
14110  m_pStats(pStats),
14111  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14112 {
14113  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14114 }
14115 
14116 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14117 {
14118  for(size_t i = m_CustomPoolContexts.size(); i--; )
14119  {
14120  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14121  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
14122  vma_delete(m_hAllocator, pBlockVectorCtx);
14123  }
14124  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14125  {
14126  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14127  if(pBlockVectorCtx)
14128  {
14129  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
14130  vma_delete(m_hAllocator, pBlockVectorCtx);
14131  }
14132  }
14133 }
14134 
14135 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
14136 {
14137  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14138  {
14139  VmaPool pool = pPools[poolIndex];
14140  VMA_ASSERT(pool);
14141  // Pools with algorithm other than default are not defragmented.
14142  if(pool->m_BlockVector.GetAlgorithm() == 0)
14143  {
14144  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14145 
14146  for(size_t i = m_CustomPoolContexts.size(); i--; )
14147  {
14148  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14149  {
14150  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14151  break;
14152  }
14153  }
14154 
14155  if(!pBlockVectorDefragCtx)
14156  {
14157  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14158  m_hAllocator,
14159  pool,
14160  &pool->m_BlockVector,
14161  m_CurrFrameIndex);
14162  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14163  }
14164 
14165  pBlockVectorDefragCtx->AddAll();
14166  }
14167  }
14168 }
14169 
14170 void VmaDefragmentationContext_T::AddAllocations(
14171  uint32_t allocationCount,
14172  VmaAllocation* pAllocations,
14173  VkBool32* pAllocationsChanged)
14174 {
14175  // Dispatch pAllocations among defragmentators. Create them when necessary.
14176  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14177  {
14178  const VmaAllocation hAlloc = pAllocations[allocIndex];
14179  VMA_ASSERT(hAlloc);
14180  // DedicatedAlloc cannot be defragmented.
14181  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14182  // Lost allocation cannot be defragmented.
14183  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14184  {
14185  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14186 
14187  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14188  // This allocation belongs to custom pool.
14189  if(hAllocPool != VK_NULL_HANDLE)
14190  {
14191  // Pools with algorithm other than default are not defragmented.
14192  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14193  {
14194  for(size_t i = m_CustomPoolContexts.size(); i--; )
14195  {
14196  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14197  {
14198  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14199  break;
14200  }
14201  }
14202  if(!pBlockVectorDefragCtx)
14203  {
14204  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14205  m_hAllocator,
14206  hAllocPool,
14207  &hAllocPool->m_BlockVector,
14208  m_CurrFrameIndex);
14209  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14210  }
14211  }
14212  }
14213  // This allocation belongs to default pool.
14214  else
14215  {
14216  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14217  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14218  if(!pBlockVectorDefragCtx)
14219  {
14220  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14221  m_hAllocator,
14222  VMA_NULL, // hCustomPool
14223  m_hAllocator->m_pBlockVectors[memTypeIndex],
14224  m_CurrFrameIndex);
14225  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14226  }
14227  }
14228 
14229  if(pBlockVectorDefragCtx)
14230  {
14231  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14232  &pAllocationsChanged[allocIndex] : VMA_NULL;
14233  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14234  }
14235  }
14236  }
14237 }
14238 
14239 VkResult VmaDefragmentationContext_T::Defragment(
14240  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14241  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14242  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14243 {
14244  if(pStats)
14245  {
14246  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14247  }
14248 
14250  {
14251  // For incremental defragmetnations, we just earmark how much we can move
14252  // The real meat is in the defragmentation steps
14253  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14254  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14255 
14256  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14257  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14258 
14259  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14260  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14261  return VK_SUCCESS;
14262 
14263  return VK_NOT_READY;
14264  }
14265 
14266  if(commandBuffer == VK_NULL_HANDLE)
14267  {
14268  maxGpuBytesToMove = 0;
14269  maxGpuAllocationsToMove = 0;
14270  }
14271 
14272  VkResult res = VK_SUCCESS;
14273 
14274  // Process default pools.
14275  for(uint32_t memTypeIndex = 0;
14276  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14277  ++memTypeIndex)
14278  {
14279  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14280  if(pBlockVectorCtx)
14281  {
14282  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14283  pBlockVectorCtx->GetBlockVector()->Defragment(
14284  pBlockVectorCtx,
14285  pStats, flags,
14286  maxCpuBytesToMove, maxCpuAllocationsToMove,
14287  maxGpuBytesToMove, maxGpuAllocationsToMove,
14288  commandBuffer);
14289  if(pBlockVectorCtx->res != VK_SUCCESS)
14290  {
14291  res = pBlockVectorCtx->res;
14292  }
14293  }
14294  }
14295 
14296  // Process custom pools.
14297  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14298  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14299  ++customCtxIndex)
14300  {
14301  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14302  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14303  pBlockVectorCtx->GetBlockVector()->Defragment(
14304  pBlockVectorCtx,
14305  pStats, flags,
14306  maxCpuBytesToMove, maxCpuAllocationsToMove,
14307  maxGpuBytesToMove, maxGpuAllocationsToMove,
14308  commandBuffer);
14309  if(pBlockVectorCtx->res != VK_SUCCESS)
14310  {
14311  res = pBlockVectorCtx->res;
14312  }
14313  }
14314 
14315  return res;
14316 }
14317 
14318 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14319 {
14320  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14321  uint32_t movesLeft = pInfo->moveCount;
14322 
14323  // Process default pools.
14324  for(uint32_t memTypeIndex = 0;
14325  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14326  ++memTypeIndex)
14327  {
14328  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14329  if(pBlockVectorCtx)
14330  {
14331  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14332 
14333  if(!pBlockVectorCtx->hasDefragmentationPlan)
14334  {
14335  pBlockVectorCtx->GetBlockVector()->Defragment(
14336  pBlockVectorCtx,
14337  m_pStats, m_Flags,
14338  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14339  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14340  VK_NULL_HANDLE);
14341 
14342  if(pBlockVectorCtx->res < VK_SUCCESS)
14343  continue;
14344 
14345  pBlockVectorCtx->hasDefragmentationPlan = true;
14346  }
14347 
14348  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14349  pBlockVectorCtx,
14350  pCurrentMove, movesLeft);
14351 
14352  movesLeft -= processed;
14353  pCurrentMove += processed;
14354  }
14355  }
14356 
14357  // Process custom pools.
14358  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14359  customCtxIndex < customCtxCount;
14360  ++customCtxIndex)
14361  {
14362  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14363  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14364 
14365  if(!pBlockVectorCtx->hasDefragmentationPlan)
14366  {
14367  pBlockVectorCtx->GetBlockVector()->Defragment(
14368  pBlockVectorCtx,
14369  m_pStats, m_Flags,
14370  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14371  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14372  VK_NULL_HANDLE);
14373 
14374  if(pBlockVectorCtx->res < VK_SUCCESS)
14375  continue;
14376 
14377  pBlockVectorCtx->hasDefragmentationPlan = true;
14378  }
14379 
14380  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14381  pBlockVectorCtx,
14382  pCurrentMove, movesLeft);
14383 
14384  movesLeft -= processed;
14385  pCurrentMove += processed;
14386  }
14387 
14388  pInfo->moveCount = pInfo->moveCount - movesLeft;
14389 
14390  return VK_SUCCESS;
14391 }
14392 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14393 {
14394  VkResult res = VK_SUCCESS;
14395 
14396  // Process default pools.
14397  for(uint32_t memTypeIndex = 0;
14398  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14399  ++memTypeIndex)
14400  {
14401  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14402  if(pBlockVectorCtx)
14403  {
14404  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14405 
14406  if(!pBlockVectorCtx->hasDefragmentationPlan)
14407  {
14408  res = VK_NOT_READY;
14409  continue;
14410  }
14411 
14412  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14413  pBlockVectorCtx, m_pStats);
14414 
14415  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14416  res = VK_NOT_READY;
14417  }
14418  }
14419 
14420  // Process custom pools.
14421  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14422  customCtxIndex < customCtxCount;
14423  ++customCtxIndex)
14424  {
14425  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14426  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14427 
14428  if(!pBlockVectorCtx->hasDefragmentationPlan)
14429  {
14430  res = VK_NOT_READY;
14431  continue;
14432  }
14433 
14434  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14435  pBlockVectorCtx, m_pStats);
14436 
14437  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14438  res = VK_NOT_READY;
14439  }
14440 
14441  return res;
14442 }
14443 
14445 // VmaRecorder
14446 
14447 #if VMA_RECORDING_ENABLED
14448 
14449 VmaRecorder::VmaRecorder() :
14450  m_UseMutex(true),
14451  m_Flags(0),
14452  m_File(VMA_NULL),
14453  m_Freq(INT64_MAX),
14454  m_StartCounter(INT64_MAX)
14455 {
14456 }
14457 
14458 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14459 {
14460  m_UseMutex = useMutex;
14461  m_Flags = settings.flags;
14462 
14463  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
14464  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
14465 
14466  // Open file for writing.
14467  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14468  if(err != 0)
14469  {
14470  return VK_ERROR_INITIALIZATION_FAILED;
14471  }
14472 
14473  // Write header.
14474  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14475  fprintf(m_File, "%s\n", "1,8");
14476 
14477  return VK_SUCCESS;
14478 }
14479 
14480 VmaRecorder::~VmaRecorder()
14481 {
14482  if(m_File != VMA_NULL)
14483  {
14484  fclose(m_File);
14485  }
14486 }
14487 
14488 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14489 {
14490  CallParams callParams;
14491  GetBasicParams(callParams);
14492 
14493  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14494  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14495  Flush();
14496 }
14497 
14498 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14499 {
14500  CallParams callParams;
14501  GetBasicParams(callParams);
14502 
14503  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14504  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14505  Flush();
14506 }
14507 
14508 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14509 {
14510  CallParams callParams;
14511  GetBasicParams(callParams);
14512 
14513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14514  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14515  createInfo.memoryTypeIndex,
14516  createInfo.flags,
14517  createInfo.blockSize,
14518  (uint64_t)createInfo.minBlockCount,
14519  (uint64_t)createInfo.maxBlockCount,
14520  createInfo.frameInUseCount,
14521  pool);
14522  Flush();
14523 }
14524 
14525 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14526 {
14527  CallParams callParams;
14528  GetBasicParams(callParams);
14529 
14530  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14531  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14532  pool);
14533  Flush();
14534 }
14535 
14536 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14537  const VkMemoryRequirements& vkMemReq,
14538  const VmaAllocationCreateInfo& createInfo,
14539  VmaAllocation allocation)
14540 {
14541  CallParams callParams;
14542  GetBasicParams(callParams);
14543 
14544  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14545  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14546  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14547  vkMemReq.size,
14548  vkMemReq.alignment,
14549  vkMemReq.memoryTypeBits,
14550  createInfo.flags,
14551  createInfo.usage,
14552  createInfo.requiredFlags,
14553  createInfo.preferredFlags,
14554  createInfo.memoryTypeBits,
14555  createInfo.pool,
14556  allocation,
14557  userDataStr.GetString());
14558  Flush();
14559 }
14560 
14561 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14562  const VkMemoryRequirements& vkMemReq,
14563  const VmaAllocationCreateInfo& createInfo,
14564  uint64_t allocationCount,
14565  const VmaAllocation* pAllocations)
14566 {
14567  CallParams callParams;
14568  GetBasicParams(callParams);
14569 
14570  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14571  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14572  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14573  vkMemReq.size,
14574  vkMemReq.alignment,
14575  vkMemReq.memoryTypeBits,
14576  createInfo.flags,
14577  createInfo.usage,
14578  createInfo.requiredFlags,
14579  createInfo.preferredFlags,
14580  createInfo.memoryTypeBits,
14581  createInfo.pool);
14582  PrintPointerList(allocationCount, pAllocations);
14583  fprintf(m_File, ",%s\n", userDataStr.GetString());
14584  Flush();
14585 }
14586 
14587 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14588  const VkMemoryRequirements& vkMemReq,
14589  bool requiresDedicatedAllocation,
14590  bool prefersDedicatedAllocation,
14591  const VmaAllocationCreateInfo& createInfo,
14592  VmaAllocation allocation)
14593 {
14594  CallParams callParams;
14595  GetBasicParams(callParams);
14596 
14597  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14598  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14599  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14600  vkMemReq.size,
14601  vkMemReq.alignment,
14602  vkMemReq.memoryTypeBits,
14603  requiresDedicatedAllocation ? 1 : 0,
14604  prefersDedicatedAllocation ? 1 : 0,
14605  createInfo.flags,
14606  createInfo.usage,
14607  createInfo.requiredFlags,
14608  createInfo.preferredFlags,
14609  createInfo.memoryTypeBits,
14610  createInfo.pool,
14611  allocation,
14612  userDataStr.GetString());
14613  Flush();
14614 }
14615 
14616 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14617  const VkMemoryRequirements& vkMemReq,
14618  bool requiresDedicatedAllocation,
14619  bool prefersDedicatedAllocation,
14620  const VmaAllocationCreateInfo& createInfo,
14621  VmaAllocation allocation)
14622 {
14623  CallParams callParams;
14624  GetBasicParams(callParams);
14625 
14626  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14627  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14628  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14629  vkMemReq.size,
14630  vkMemReq.alignment,
14631  vkMemReq.memoryTypeBits,
14632  requiresDedicatedAllocation ? 1 : 0,
14633  prefersDedicatedAllocation ? 1 : 0,
14634  createInfo.flags,
14635  createInfo.usage,
14636  createInfo.requiredFlags,
14637  createInfo.preferredFlags,
14638  createInfo.memoryTypeBits,
14639  createInfo.pool,
14640  allocation,
14641  userDataStr.GetString());
14642  Flush();
14643 }
14644 
14645 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14646  VmaAllocation allocation)
14647 {
14648  CallParams callParams;
14649  GetBasicParams(callParams);
14650 
14651  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14652  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14653  allocation);
14654  Flush();
14655 }
14656 
14657 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14658  uint64_t allocationCount,
14659  const VmaAllocation* pAllocations)
14660 {
14661  CallParams callParams;
14662  GetBasicParams(callParams);
14663 
14664  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14665  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14666  PrintPointerList(allocationCount, pAllocations);
14667  fprintf(m_File, "\n");
14668  Flush();
14669 }
14670 
14671 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14672  VmaAllocation allocation,
14673  const void* pUserData)
14674 {
14675  CallParams callParams;
14676  GetBasicParams(callParams);
14677 
14678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14679  UserDataString userDataStr(
14680  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14681  pUserData);
14682  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14683  allocation,
14684  userDataStr.GetString());
14685  Flush();
14686 }
14687 
14688 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14689  VmaAllocation allocation)
14690 {
14691  CallParams callParams;
14692  GetBasicParams(callParams);
14693 
14694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14695  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14696  allocation);
14697  Flush();
14698 }
14699 
14700 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14701  VmaAllocation allocation)
14702 {
14703  CallParams callParams;
14704  GetBasicParams(callParams);
14705 
14706  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14707  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14708  allocation);
14709  Flush();
14710 }
14711 
14712 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14713  VmaAllocation allocation)
14714 {
14715  CallParams callParams;
14716  GetBasicParams(callParams);
14717 
14718  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14719  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14720  allocation);
14721  Flush();
14722 }
14723 
14724 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14725  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14726 {
14727  CallParams callParams;
14728  GetBasicParams(callParams);
14729 
14730  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14731  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14732  allocation,
14733  offset,
14734  size);
14735  Flush();
14736 }
14737 
14738 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14739  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14740 {
14741  CallParams callParams;
14742  GetBasicParams(callParams);
14743 
14744  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14745  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14746  allocation,
14747  offset,
14748  size);
14749  Flush();
14750 }
14751 
14752 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14753  const VkBufferCreateInfo& bufCreateInfo,
14754  const VmaAllocationCreateInfo& allocCreateInfo,
14755  VmaAllocation allocation)
14756 {
14757  CallParams callParams;
14758  GetBasicParams(callParams);
14759 
14760  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14761  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14762  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14763  bufCreateInfo.flags,
14764  bufCreateInfo.size,
14765  bufCreateInfo.usage,
14766  bufCreateInfo.sharingMode,
14767  allocCreateInfo.flags,
14768  allocCreateInfo.usage,
14769  allocCreateInfo.requiredFlags,
14770  allocCreateInfo.preferredFlags,
14771  allocCreateInfo.memoryTypeBits,
14772  allocCreateInfo.pool,
14773  allocation,
14774  userDataStr.GetString());
14775  Flush();
14776 }
14777 
14778 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14779  const VkImageCreateInfo& imageCreateInfo,
14780  const VmaAllocationCreateInfo& allocCreateInfo,
14781  VmaAllocation allocation)
14782 {
14783  CallParams callParams;
14784  GetBasicParams(callParams);
14785 
14786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14787  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14788  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14789  imageCreateInfo.flags,
14790  imageCreateInfo.imageType,
14791  imageCreateInfo.format,
14792  imageCreateInfo.extent.width,
14793  imageCreateInfo.extent.height,
14794  imageCreateInfo.extent.depth,
14795  imageCreateInfo.mipLevels,
14796  imageCreateInfo.arrayLayers,
14797  imageCreateInfo.samples,
14798  imageCreateInfo.tiling,
14799  imageCreateInfo.usage,
14800  imageCreateInfo.sharingMode,
14801  imageCreateInfo.initialLayout,
14802  allocCreateInfo.flags,
14803  allocCreateInfo.usage,
14804  allocCreateInfo.requiredFlags,
14805  allocCreateInfo.preferredFlags,
14806  allocCreateInfo.memoryTypeBits,
14807  allocCreateInfo.pool,
14808  allocation,
14809  userDataStr.GetString());
14810  Flush();
14811 }
14812 
14813 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14814  VmaAllocation allocation)
14815 {
14816  CallParams callParams;
14817  GetBasicParams(callParams);
14818 
14819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14820  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14821  allocation);
14822  Flush();
14823 }
14824 
14825 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14826  VmaAllocation allocation)
14827 {
14828  CallParams callParams;
14829  GetBasicParams(callParams);
14830 
14831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14832  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14833  allocation);
14834  Flush();
14835 }
14836 
14837 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14838  VmaAllocation allocation)
14839 {
14840  CallParams callParams;
14841  GetBasicParams(callParams);
14842 
14843  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14844  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14845  allocation);
14846  Flush();
14847 }
14848 
14849 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14850  VmaAllocation allocation)
14851 {
14852  CallParams callParams;
14853  GetBasicParams(callParams);
14854 
14855  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14856  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14857  allocation);
14858  Flush();
14859 }
14860 
14861 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14862  VmaPool pool)
14863 {
14864  CallParams callParams;
14865  GetBasicParams(callParams);
14866 
14867  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14868  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14869  pool);
14870  Flush();
14871 }
14872 
14873 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14874  const VmaDefragmentationInfo2& info,
14876 {
14877  CallParams callParams;
14878  GetBasicParams(callParams);
14879 
14880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14881  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14882  info.flags);
14883  PrintPointerList(info.allocationCount, info.pAllocations);
14884  fprintf(m_File, ",");
14885  PrintPointerList(info.poolCount, info.pPools);
14886  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14887  info.maxCpuBytesToMove,
14889  info.maxGpuBytesToMove,
14891  info.commandBuffer,
14892  ctx);
14893  Flush();
14894 }
14895 
14896 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14898 {
14899  CallParams callParams;
14900  GetBasicParams(callParams);
14901 
14902  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14903  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14904  ctx);
14905  Flush();
14906 }
14907 
14908 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14909  VmaPool pool,
14910  const char* name)
14911 {
14912  CallParams callParams;
14913  GetBasicParams(callParams);
14914 
14915  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14916  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14917  pool, name != VMA_NULL ? name : "");
14918  Flush();
14919 }
14920 
14921 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14922 {
14923  if(pUserData != VMA_NULL)
14924  {
14925  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14926  {
14927  m_Str = (const char*)pUserData;
14928  }
14929  else
14930  {
14931  sprintf_s(m_PtrStr, "%p", pUserData);
14932  m_Str = m_PtrStr;
14933  }
14934  }
14935  else
14936  {
14937  m_Str = "";
14938  }
14939 }
14940 
14941 void VmaRecorder::WriteConfiguration(
14942  const VkPhysicalDeviceProperties& devProps,
14943  const VkPhysicalDeviceMemoryProperties& memProps,
14944  uint32_t vulkanApiVersion,
14945  bool dedicatedAllocationExtensionEnabled,
14946  bool bindMemory2ExtensionEnabled,
14947  bool memoryBudgetExtensionEnabled,
14948  bool deviceCoherentMemoryExtensionEnabled)
14949 {
14950  fprintf(m_File, "Config,Begin\n");
14951 
14952  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14953 
14954  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14955  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14956  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14957  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14958  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14959  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14960 
14961  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14962  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14963  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14964 
14965  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14966  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14967  {
14968  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14969  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14970  }
14971  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14972  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14973  {
14974  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14975  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14976  }
14977 
14978  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14979  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14980  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
14981  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
14982 
14983  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14984  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14985  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14986  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14987  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14988  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14989  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14990  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14991  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14992 
14993  fprintf(m_File, "Config,End\n");
14994 }
14995 
14996 void VmaRecorder::GetBasicParams(CallParams& outParams)
14997 {
14998  outParams.threadId = GetCurrentThreadId();
14999 
15000  LARGE_INTEGER counter;
15001  QueryPerformanceCounter(&counter);
15002  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
15003 }
15004 
15005 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15006 {
15007  if(count)
15008  {
15009  fprintf(m_File, "%p", pItems[0]);
15010  for(uint64_t i = 1; i < count; ++i)
15011  {
15012  fprintf(m_File, " %p", pItems[i]);
15013  }
15014  }
15015 }
15016 
15017 void VmaRecorder::Flush()
15018 {
15019  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15020  {
15021  fflush(m_File);
15022  }
15023 }
15024 
15025 #endif // #if VMA_RECORDING_ENABLED
15026 
15028 // VmaAllocationObjectAllocator
15029 
15030 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15031  m_Allocator(pAllocationCallbacks, 1024)
15032 {
15033 }
15034 
15035 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15036 {
15037  VmaMutexLock mutexLock(m_Mutex);
15038  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15039 }
15040 
15041 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15042 {
15043  VmaMutexLock mutexLock(m_Mutex);
15044  m_Allocator.Free(hAlloc);
15045 }
15046 
15048 // VmaAllocator_T
15049 
15050 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15051  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15052  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15053  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15054  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15055  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15056  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15057  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15058  m_hDevice(pCreateInfo->device),
15059  m_hInstance(pCreateInfo->instance),
15060  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15061  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15062  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15063  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15064  m_HeapSizeLimitMask(0),
15065  m_PreferredLargeHeapBlockSize(0),
15066  m_PhysicalDevice(pCreateInfo->physicalDevice),
15067  m_CurrentFrameIndex(0),
15068  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15069  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15070  m_NextPoolId(0),
15071  m_GlobalMemoryTypeBits(UINT32_MAX)
15073  ,m_pRecorder(VMA_NULL)
15074 #endif
15075 {
15076  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15077  {
15078  m_UseKhrDedicatedAllocation = false;
15079  m_UseKhrBindMemory2 = false;
15080  }
15081 
15082  if(VMA_DEBUG_DETECT_CORRUPTION)
15083  {
15084  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15085  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15086  }
15087 
15088  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
15089 
15090  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15091  {
15092 #if !(VMA_DEDICATED_ALLOCATION)
15094  {
15095  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15096  }
15097 #endif
15098 #if !(VMA_BIND_MEMORY2)
15099  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15100  {
15101  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15102  }
15103 #endif
15104  }
15105 #if !(VMA_MEMORY_BUDGET)
15106  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15107  {
15108  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15109  }
15110 #endif
15111 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15112  if(m_UseKhrBufferDeviceAddress)
15113  {
15114  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15115  }
15116 #endif
15117 #if VMA_VULKAN_VERSION < 1002000
15118  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15119  {
15120  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15121  }
15122 #endif
15123 #if VMA_VULKAN_VERSION < 1001000
15124  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15125  {
15126  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15127  }
15128 #endif
15129 
15130  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15131  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15132  memset(&m_MemProps, 0, sizeof(m_MemProps));
15133 
15134  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15135  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15136  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15137 
15138  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15139  {
15140  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15141  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15142  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15143  }
15144 
15145  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15146 
15147  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15148  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15149 
15150  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15151  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15152  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15153  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15154 
15155  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15156  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15157 
15158  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15159 
15160  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15161  {
15162  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15163  {
15164  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15165  if(limit != VK_WHOLE_SIZE)
15166  {
15167  m_HeapSizeLimitMask |= 1u << heapIndex;
15168  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15169  {
15170  m_MemProps.memoryHeaps[heapIndex].size = limit;
15171  }
15172  }
15173  }
15174  }
15175 
15176  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15177  {
15178  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15179 
15180  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15181  this,
15182  VK_NULL_HANDLE, // hParentPool
15183  memTypeIndex,
15184  preferredBlockSize,
15185  0,
15186  SIZE_MAX,
15187  GetBufferImageGranularity(),
15188  pCreateInfo->frameInUseCount,
15189  false, // explicitBlockSize
15190  false); // linearAlgorithm
15191  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15192  // becase minBlockCount is 0.
15193  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15194 
15195  }
15196 }
15197 
15198 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15199 {
15200  VkResult res = VK_SUCCESS;
15201 
15202  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15203  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15204  {
15205 #if VMA_RECORDING_ENABLED
15206  m_pRecorder = vma_new(this, VmaRecorder)();
15207  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15208  if(res != VK_SUCCESS)
15209  {
15210  return res;
15211  }
15212  m_pRecorder->WriteConfiguration(
15213  m_PhysicalDeviceProperties,
15214  m_MemProps,
15215  m_VulkanApiVersion,
15216  m_UseKhrDedicatedAllocation,
15217  m_UseKhrBindMemory2,
15218  m_UseExtMemoryBudget,
15219  m_UseAmdDeviceCoherentMemory);
15220  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15221 #else
15222  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15223  return VK_ERROR_FEATURE_NOT_PRESENT;
15224 #endif
15225  }
15226 
15227 #if VMA_MEMORY_BUDGET
15228  if(m_UseExtMemoryBudget)
15229  {
15230  UpdateVulkanBudget();
15231  }
15232 #endif // #if VMA_MEMORY_BUDGET
15233 
15234  return res;
15235 }
15236 
15237 VmaAllocator_T::~VmaAllocator_T()
15238 {
15239 #if VMA_RECORDING_ENABLED
15240  if(m_pRecorder != VMA_NULL)
15241  {
15242  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15243  vma_delete(this, m_pRecorder);
15244  }
15245 #endif
15246 
15247  VMA_ASSERT(m_Pools.empty());
15248 
15249  for(size_t i = GetMemoryTypeCount(); i--; )
15250  {
15251  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15252  {
15253  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15254  }
15255 
15256  vma_delete(this, m_pDedicatedAllocations[i]);
15257  vma_delete(this, m_pBlockVectors[i]);
15258  }
15259 }
15260 
15261 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15262 {
15263  m_VulkanFunctions.vkGetPhysicalDeviceProperties =
15264  (PFN_vkGetPhysicalDeviceProperties)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceProperties");
15265  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties =
15266  (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties");
15267  m_VulkanFunctions.vkAllocateMemory =
15268  (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, "vkAllocateMemory");
15269  m_VulkanFunctions.vkFreeMemory =
15270  (PFN_vkFreeMemory)vkGetDeviceProcAddr(m_hDevice, "vkFreeMemory");
15271  m_VulkanFunctions.vkMapMemory =
15272  (PFN_vkMapMemory)vkGetDeviceProcAddr(m_hDevice, "vkMapMemory");
15273  m_VulkanFunctions.vkUnmapMemory =
15274  (PFN_vkUnmapMemory)vkGetDeviceProcAddr(m_hDevice, "vkUnmapMemory");
15275  m_VulkanFunctions.vkFlushMappedMemoryRanges =
15276  (PFN_vkFlushMappedMemoryRanges)vkGetDeviceProcAddr(m_hDevice, "vkFlushMappedMemoryRanges");
15277  m_VulkanFunctions.vkInvalidateMappedMemoryRanges =
15278  (PFN_vkInvalidateMappedMemoryRanges)vkGetDeviceProcAddr(m_hDevice, "vkInvalidateMappedMemoryRanges");
15279  m_VulkanFunctions.vkBindBufferMemory =
15280  (PFN_vkBindBufferMemory)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory");
15281  m_VulkanFunctions.vkBindImageMemory =
15282  (PFN_vkBindImageMemory)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory");
15283  m_VulkanFunctions.vkGetBufferMemoryRequirements =
15284  (PFN_vkGetBufferMemoryRequirements)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements");
15285  m_VulkanFunctions.vkGetImageMemoryRequirements =
15286  (PFN_vkGetImageMemoryRequirements)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements");
15287  m_VulkanFunctions.vkCreateBuffer =
15288  (PFN_vkCreateBuffer)vkGetDeviceProcAddr(m_hDevice, "vkCreateBuffer");
15289  m_VulkanFunctions.vkDestroyBuffer =
15290  (PFN_vkDestroyBuffer)vkGetDeviceProcAddr(m_hDevice, "vkDestroyBuffer");
15291  m_VulkanFunctions.vkCreateImage =
15292  (PFN_vkCreateImage)vkGetDeviceProcAddr(m_hDevice, "vkCreateImage");
15293  m_VulkanFunctions.vkDestroyImage =
15294  (PFN_vkDestroyImage)vkGetDeviceProcAddr(m_hDevice, "vkDestroyImage");
15295  m_VulkanFunctions.vkCmdCopyBuffer =
15296  (PFN_vkCmdCopyBuffer)vkGetDeviceProcAddr(m_hDevice, "vkCmdCopyBuffer");
15297 #if VMA_VULKAN_VERSION >= 1001000
15298  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15299  {
15300  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
15301  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
15302  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
15303  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
15304  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
15305  m_VulkanFunctions.vkBindBufferMemory2KHR =
15306  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
15307  m_VulkanFunctions.vkBindImageMemory2KHR =
15308  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
15309  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
15310  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
15311  }
15312 #endif
15313 #if VMA_DEDICATED_ALLOCATION
15314  if(m_UseKhrDedicatedAllocation)
15315  {
15316  if(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR == nullptr)
15317  {
15318  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
15319  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
15320  }
15321  if(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR == nullptr)
15322  {
15323  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
15324  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
15325  }
15326  }
15327 #endif
15328 #if VMA_BIND_MEMORY2
15329  if(m_UseKhrBindMemory2)
15330  {
15331  if(m_VulkanFunctions.vkBindBufferMemory2KHR == nullptr)
15332  {
15333  m_VulkanFunctions.vkBindBufferMemory2KHR =
15334  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
15335  }
15336  if(m_VulkanFunctions.vkBindImageMemory2KHR == nullptr)
15337  {
15338  m_VulkanFunctions.vkBindImageMemory2KHR =
15339  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
15340  }
15341  }
15342 #endif // #if VMA_BIND_MEMORY2
15343 #if VMA_MEMORY_BUDGET
15344  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15345  {
15346  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
15347  if(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR == nullptr)
15348  {
15349  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
15350  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
15351  }
15352  }
15353 #endif // #if VMA_MEMORY_BUDGET
15354 
15355 #define VMA_COPY_IF_NOT_NULL(funcName) \
15356  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15357 
15358  if(pVulkanFunctions != VMA_NULL)
15359  {
15360  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15361  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15362  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15363  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15364  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15365  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15366  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15367  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15368  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15369  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15370  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15371  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15372  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15373  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15374  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15375  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15376  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15377 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15378  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15379  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15380 #endif
15381 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15382  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15383  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15384 #endif
15385 #if VMA_MEMORY_BUDGET
15386  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15387 #endif
15388  }
15389 
15390 #undef VMA_COPY_IF_NOT_NULL
15391 
15392  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
15393  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
15394  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15395  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15396  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15397  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15398  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15399  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15400  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15401  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15402  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15403  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15404  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15405  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15406  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15407  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15408  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15409  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15410  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15411 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15412  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15413  {
15414  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15415  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15416  }
15417 #endif
15418 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15419  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15420  {
15421  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15422  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15423  }
15424 #endif
15425 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15426  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15427  {
15428  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15429  }
15430 #endif
15431 }
15432 
15433 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15434 {
15435  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15436  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15437  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15438  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15439 }
15440 
15441 VkResult VmaAllocator_T::AllocateMemoryOfType(
15442  VkDeviceSize size,
15443  VkDeviceSize alignment,
15444  bool dedicatedAllocation,
15445  VkBuffer dedicatedBuffer,
15446  VkBufferUsageFlags dedicatedBufferUsage,
15447  VkImage dedicatedImage,
15448  const VmaAllocationCreateInfo& createInfo,
15449  uint32_t memTypeIndex,
15450  VmaSuballocationType suballocType,
15451  size_t allocationCount,
15452  VmaAllocation* pAllocations)
15453 {
15454  VMA_ASSERT(pAllocations != VMA_NULL);
15455  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15456 
15457  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15458 
15459  // If memory type is not HOST_VISIBLE, disable MAPPED.
15460  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15461  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15462  {
15463  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15464  }
15465  // If memory is lazily allocated, it should be always dedicated.
15466  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15467  {
15469  }
15470 
15471  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15472  VMA_ASSERT(blockVector);
15473 
15474  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15475  bool preferDedicatedMemory =
15476  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15477  dedicatedAllocation ||
15478  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15479  size > preferredBlockSize / 2;
15480 
15481  if(preferDedicatedMemory &&
15482  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15483  finalCreateInfo.pool == VK_NULL_HANDLE)
15484  {
15486  }
15487 
15488  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15489  {
15490  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15491  {
15492  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15493  }
15494  else
15495  {
15496  return AllocateDedicatedMemory(
15497  size,
15498  suballocType,
15499  memTypeIndex,
15500  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15501  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15502  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15503  finalCreateInfo.pUserData,
15504  dedicatedBuffer,
15505  dedicatedBufferUsage,
15506  dedicatedImage,
15507  allocationCount,
15508  pAllocations);
15509  }
15510  }
15511  else
15512  {
15513  VkResult res = blockVector->Allocate(
15514  m_CurrentFrameIndex.load(),
15515  size,
15516  alignment,
15517  finalCreateInfo,
15518  suballocType,
15519  allocationCount,
15520  pAllocations);
15521  if(res == VK_SUCCESS)
15522  {
15523  return res;
15524  }
15525 
15526  // 5. Try dedicated memory.
15527  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15528  {
15529  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15530  }
15531  else
15532  {
15533  res = AllocateDedicatedMemory(
15534  size,
15535  suballocType,
15536  memTypeIndex,
15537  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15538  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15539  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15540  finalCreateInfo.pUserData,
15541  dedicatedBuffer,
15542  dedicatedBufferUsage,
15543  dedicatedImage,
15544  allocationCount,
15545  pAllocations);
15546  if(res == VK_SUCCESS)
15547  {
15548  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
15549  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
15550  return VK_SUCCESS;
15551  }
15552  else
15553  {
15554  // Everything failed: Return error code.
15555  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15556  return res;
15557  }
15558  }
15559  }
15560 }
15561 
15562 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15563  VkDeviceSize size,
15564  VmaSuballocationType suballocType,
15565  uint32_t memTypeIndex,
15566  bool withinBudget,
15567  bool map,
15568  bool isUserDataString,
15569  void* pUserData,
15570  VkBuffer dedicatedBuffer,
15571  VkBufferUsageFlags dedicatedBufferUsage,
15572  VkImage dedicatedImage,
15573  size_t allocationCount,
15574  VmaAllocation* pAllocations)
15575 {
15576  VMA_ASSERT(allocationCount > 0 && pAllocations);
15577 
15578  if(withinBudget)
15579  {
15580  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15581  VmaBudget heapBudget = {};
15582  GetBudget(&heapBudget, heapIndex, 1);
15583  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15584  {
15585  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15586  }
15587  }
15588 
15589  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15590  allocInfo.memoryTypeIndex = memTypeIndex;
15591  allocInfo.allocationSize = size;
15592 
15593 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15594  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15595  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15596  {
15597  if(dedicatedBuffer != VK_NULL_HANDLE)
15598  {
15599  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15600  dedicatedAllocInfo.buffer = dedicatedBuffer;
15601  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15602  }
15603  else if(dedicatedImage != VK_NULL_HANDLE)
15604  {
15605  dedicatedAllocInfo.image = dedicatedImage;
15606  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
15607  }
15608  }
15609 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15610 
15611 #if VMA_BUFFER_DEVICE_ADDRESS
15612  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
15613  if(m_UseKhrBufferDeviceAddress)
15614  {
15615  bool canContainBufferWithDeviceAddress = true;
15616  if(dedicatedBuffer != VK_NULL_HANDLE)
15617  {
15618  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
15619  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
15620  }
15621  else if(dedicatedImage != VK_NULL_HANDLE)
15622  {
15623  canContainBufferWithDeviceAddress = false;
15624  }
15625  if(canContainBufferWithDeviceAddress)
15626  {
15627  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
15628  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
15629  }
15630  }
15631 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
15632 
15633  size_t allocIndex;
15634  VkResult res = VK_SUCCESS;
15635  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15636  {
15637  res = AllocateDedicatedMemoryPage(
15638  size,
15639  suballocType,
15640  memTypeIndex,
15641  allocInfo,
15642  map,
15643  isUserDataString,
15644  pUserData,
15645  pAllocations + allocIndex);
15646  if(res != VK_SUCCESS)
15647  {
15648  break;
15649  }
15650  }
15651 
15652  if(res == VK_SUCCESS)
15653  {
15654  // Register them in m_pDedicatedAllocations.
15655  {
15656  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15657  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15658  VMA_ASSERT(pDedicatedAllocations);
15659  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15660  {
15661  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15662  }
15663  }
15664 
15665  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15666  }
15667  else
15668  {
15669  // Free all already created allocations.
15670  while(allocIndex--)
15671  {
15672  VmaAllocation currAlloc = pAllocations[allocIndex];
15673  VkDeviceMemory hMemory = currAlloc->GetMemory();
15674 
15675  /*
15676  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15677  before vkFreeMemory.
15678 
15679  if(currAlloc->GetMappedData() != VMA_NULL)
15680  {
15681  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15682  }
15683  */
15684 
15685  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15686  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15687  currAlloc->SetUserData(this, VMA_NULL);
15688  m_AllocationObjectAllocator.Free(currAlloc);
15689  }
15690 
15691  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15692  }
15693 
15694  return res;
15695 }
15696 
15697 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15698  VkDeviceSize size,
15699  VmaSuballocationType suballocType,
15700  uint32_t memTypeIndex,
15701  const VkMemoryAllocateInfo& allocInfo,
15702  bool map,
15703  bool isUserDataString,
15704  void* pUserData,
15705  VmaAllocation* pAllocation)
15706 {
15707  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15708  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15709  if(res < 0)
15710  {
15711  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15712  return res;
15713  }
15714 
15715  void* pMappedData = VMA_NULL;
15716  if(map)
15717  {
15718  res = (*m_VulkanFunctions.vkMapMemory)(
15719  m_hDevice,
15720  hMemory,
15721  0,
15722  VK_WHOLE_SIZE,
15723  0,
15724  &pMappedData);
15725  if(res < 0)
15726  {
15727  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15728  FreeVulkanMemory(memTypeIndex, size, hMemory);
15729  return res;
15730  }
15731  }
15732 
15733  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
15734  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15735  (*pAllocation)->SetUserData(this, pUserData);
15736  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15737  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15738  {
15739  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15740  }
15741 
15742  return VK_SUCCESS;
15743 }
15744 
15745 void VmaAllocator_T::GetBufferMemoryRequirements(
15746  VkBuffer hBuffer,
15747  VkMemoryRequirements& memReq,
15748  bool& requiresDedicatedAllocation,
15749  bool& prefersDedicatedAllocation) const
15750 {
15751 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15752  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15753  {
15754  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15755  memReqInfo.buffer = hBuffer;
15756 
15757  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15758 
15759  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15760  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15761 
15762  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15763 
15764  memReq = memReq2.memoryRequirements;
15765  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15766  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15767  }
15768  else
15769 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15770  {
15771  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15772  requiresDedicatedAllocation = false;
15773  prefersDedicatedAllocation = false;
15774  }
15775 }
15776 
15777 void VmaAllocator_T::GetImageMemoryRequirements(
15778  VkImage hImage,
15779  VkMemoryRequirements& memReq,
15780  bool& requiresDedicatedAllocation,
15781  bool& prefersDedicatedAllocation) const
15782 {
15783 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15784  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15785  {
15786  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15787  memReqInfo.image = hImage;
15788 
15789  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15790 
15791  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15792  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
15793 
15794  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15795 
15796  memReq = memReq2.memoryRequirements;
15797  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15798  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15799  }
15800  else
15801 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15802  {
15803  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15804  requiresDedicatedAllocation = false;
15805  prefersDedicatedAllocation = false;
15806  }
15807 }
15808 
15809 VkResult VmaAllocator_T::AllocateMemory(
15810  const VkMemoryRequirements& vkMemReq,
15811  bool requiresDedicatedAllocation,
15812  bool prefersDedicatedAllocation,
15813  VkBuffer dedicatedBuffer,
15814  VkBufferUsageFlags dedicatedBufferUsage,
15815  VkImage dedicatedImage,
15816  const VmaAllocationCreateInfo& createInfo,
15817  VmaSuballocationType suballocType,
15818  size_t allocationCount,
15819  VmaAllocation* pAllocations)
15820 {
15821  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15822 
15823  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15824 
15825  if(vkMemReq.size == 0)
15826  {
15827  return VK_ERROR_VALIDATION_FAILED_EXT;
15828  }
15829  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15830  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15831  {
15832  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15833  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15834  }
15835  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15837  {
15838  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15839  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15840  }
15841  if(requiresDedicatedAllocation)
15842  {
15843  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15844  {
15845  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15846  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15847  }
15848  if(createInfo.pool != VK_NULL_HANDLE)
15849  {
15850  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15851  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15852  }
15853  }
15854  if((createInfo.pool != VK_NULL_HANDLE) &&
15855  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15856  {
15857  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15858  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15859  }
15860 
15861  if(createInfo.pool != VK_NULL_HANDLE)
15862  {
15863  const VkDeviceSize alignmentForPool = VMA_MAX(
15864  vkMemReq.alignment,
15865  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15866 
15867  VmaAllocationCreateInfo createInfoForPool = createInfo;
15868  // If memory type is not HOST_VISIBLE, disable MAPPED.
15869  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15870  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15871  {
15872  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15873  }
15874 
15875  return createInfo.pool->m_BlockVector.Allocate(
15876  m_CurrentFrameIndex.load(),
15877  vkMemReq.size,
15878  alignmentForPool,
15879  createInfoForPool,
15880  suballocType,
15881  allocationCount,
15882  pAllocations);
15883  }
15884  else
15885  {
15886  // Bit mask of memory Vulkan types acceptable for this allocation.
15887  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15888  uint32_t memTypeIndex = UINT32_MAX;
15889  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15890  if(res == VK_SUCCESS)
15891  {
15892  VkDeviceSize alignmentForMemType = VMA_MAX(
15893  vkMemReq.alignment,
15894  GetMemoryTypeMinAlignment(memTypeIndex));
15895 
15896  res = AllocateMemoryOfType(
15897  vkMemReq.size,
15898  alignmentForMemType,
15899  requiresDedicatedAllocation || prefersDedicatedAllocation,
15900  dedicatedBuffer,
15901  dedicatedBufferUsage,
15902  dedicatedImage,
15903  createInfo,
15904  memTypeIndex,
15905  suballocType,
15906  allocationCount,
15907  pAllocations);
15908  // Succeeded on first try.
15909  if(res == VK_SUCCESS)
15910  {
15911  return res;
15912  }
15913  // Allocation from this memory type failed. Try other compatible memory types.
15914  else
15915  {
15916  for(;;)
15917  {
15918  // Remove old memTypeIndex from list of possibilities.
15919  memoryTypeBits &= ~(1u << memTypeIndex);
15920  // Find alternative memTypeIndex.
15921  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15922  if(res == VK_SUCCESS)
15923  {
15924  alignmentForMemType = VMA_MAX(
15925  vkMemReq.alignment,
15926  GetMemoryTypeMinAlignment(memTypeIndex));
15927 
15928  res = AllocateMemoryOfType(
15929  vkMemReq.size,
15930  alignmentForMemType,
15931  requiresDedicatedAllocation || prefersDedicatedAllocation,
15932  dedicatedBuffer,
15933  dedicatedBufferUsage,
15934  dedicatedImage,
15935  createInfo,
15936  memTypeIndex,
15937  suballocType,
15938  allocationCount,
15939  pAllocations);
15940  // Allocation from this alternative memory type succeeded.
15941  if(res == VK_SUCCESS)
15942  {
15943  return res;
15944  }
15945  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15946  }
15947  // No other matching memory type index could be found.
15948  else
15949  {
15950  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15951  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15952  }
15953  }
15954  }
15955  }
15956  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15957  else
15958  return res;
15959  }
15960 }
15961 
15962 void VmaAllocator_T::FreeMemory(
15963  size_t allocationCount,
15964  const VmaAllocation* pAllocations)
15965 {
15966  VMA_ASSERT(pAllocations);
15967 
15968  for(size_t allocIndex = allocationCount; allocIndex--; )
15969  {
15970  VmaAllocation allocation = pAllocations[allocIndex];
15971 
15972  if(allocation != VK_NULL_HANDLE)
15973  {
15974  if(TouchAllocation(allocation))
15975  {
15976  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15977  {
15978  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15979  }
15980 
15981  switch(allocation->GetType())
15982  {
15983  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15984  {
15985  VmaBlockVector* pBlockVector = VMA_NULL;
15986  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15987  if(hPool != VK_NULL_HANDLE)
15988  {
15989  pBlockVector = &hPool->m_BlockVector;
15990  }
15991  else
15992  {
15993  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15994  pBlockVector = m_pBlockVectors[memTypeIndex];
15995  }
15996  pBlockVector->Free(allocation);
15997  }
15998  break;
15999  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16000  FreeDedicatedMemory(allocation);
16001  break;
16002  default:
16003  VMA_ASSERT(0);
16004  }
16005  }
16006 
16007  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16008  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16009  allocation->SetUserData(this, VMA_NULL);
16010  m_AllocationObjectAllocator.Free(allocation);
16011  }
16012  }
16013 }
16014 
16015 VkResult VmaAllocator_T::ResizeAllocation(
16016  const VmaAllocation alloc,
16017  VkDeviceSize newSize)
16018 {
16019  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16020  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16021  {
16022  return VK_ERROR_VALIDATION_FAILED_EXT;
16023  }
16024  if(newSize == alloc->GetSize())
16025  {
16026  return VK_SUCCESS;
16027  }
16028  return VK_ERROR_OUT_OF_POOL_MEMORY;
16029 }
16030 
16031 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16032 {
16033  // Initialize.
16034  InitStatInfo(pStats->total);
16035  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16036  InitStatInfo(pStats->memoryType[i]);
16037  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16038  InitStatInfo(pStats->memoryHeap[i]);
16039 
16040  // Process default pools.
16041  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16042  {
16043  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16044  VMA_ASSERT(pBlockVector);
16045  pBlockVector->AddStats(pStats);
16046  }
16047 
16048  // Process custom pools.
16049  {
16050  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16051  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16052  {
16053  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16054  }
16055  }
16056 
16057  // Process dedicated allocations.
16058  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16059  {
16060  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16061  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16062  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16063  VMA_ASSERT(pDedicatedAllocVector);
16064  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16065  {
16066  VmaStatInfo allocationStatInfo;
16067  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16068  VmaAddStatInfo(pStats->total, allocationStatInfo);
16069  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16070  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16071  }
16072  }
16073 
16074  // Postprocess.
16075  VmaPostprocessCalcStatInfo(pStats->total);
16076  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16077  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16078  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16079  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16080 }
16081 
16082 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16083 {
16084 #if VMA_MEMORY_BUDGET
16085  if(m_UseExtMemoryBudget)
16086  {
16087  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16088  {
16089  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16090  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16091  {
16092  const uint32_t heapIndex = firstHeap + i;
16093 
16094  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16095  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16096 
16097  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16098  {
16099  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16100  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16101  }
16102  else
16103  {
16104  outBudget->usage = 0;
16105  }
16106 
16107  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16108  outBudget->budget = VMA_MIN(
16109  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16110  }
16111  }
16112  else
16113  {
16114  UpdateVulkanBudget(); // Outside of mutex lock
16115  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16116  }
16117  }
16118  else
16119 #endif
16120  {
16121  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16122  {
16123  const uint32_t heapIndex = firstHeap + i;
16124 
16125  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16126  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16127 
16128  outBudget->usage = outBudget->blockBytes;
16129  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16130  }
16131  }
16132 }
16133 
16134 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16135 
16136 VkResult VmaAllocator_T::DefragmentationBegin(
16137  const VmaDefragmentationInfo2& info,
16138  VmaDefragmentationStats* pStats,
16139  VmaDefragmentationContext* pContext)
16140 {
16141  if(info.pAllocationsChanged != VMA_NULL)
16142  {
16143  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16144  }
16145 
16146  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16147  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16148 
16149  (*pContext)->AddPools(info.poolCount, info.pPools);
16150  (*pContext)->AddAllocations(
16152 
16153  VkResult res = (*pContext)->Defragment(
16156  info.commandBuffer, pStats, info.flags);
16157 
16158  if(res != VK_NOT_READY)
16159  {
16160  vma_delete(this, *pContext);
16161  *pContext = VMA_NULL;
16162  }
16163 
16164  return res;
16165 }
16166 
16167 VkResult VmaAllocator_T::DefragmentationEnd(
16168  VmaDefragmentationContext context)
16169 {
16170  vma_delete(this, context);
16171  return VK_SUCCESS;
16172 }
16173 
16174 VkResult VmaAllocator_T::DefragmentationPassBegin(
16176  VmaDefragmentationContext context)
16177 {
16178  return context->DefragmentPassBegin(pInfo);
16179 }
16180 VkResult VmaAllocator_T::DefragmentationPassEnd(
16181  VmaDefragmentationContext context)
16182 {
16183  return context->DefragmentPassEnd();
16184 
16185 }
16186 
16187 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16188 {
16189  if(hAllocation->CanBecomeLost())
16190  {
16191  /*
16192  Warning: This is a carefully designed algorithm.
16193  Do not modify unless you really know what you're doing :)
16194  */
16195  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16196  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16197  for(;;)
16198  {
16199  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16200  {
16201  pAllocationInfo->memoryType = UINT32_MAX;
16202  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16203  pAllocationInfo->offset = 0;
16204  pAllocationInfo->size = hAllocation->GetSize();
16205  pAllocationInfo->pMappedData = VMA_NULL;
16206  pAllocationInfo->pUserData = hAllocation->GetUserData();
16207  return;
16208  }
16209  else if(localLastUseFrameIndex == localCurrFrameIndex)
16210  {
16211  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16212  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16213  pAllocationInfo->offset = hAllocation->GetOffset();
16214  pAllocationInfo->size = hAllocation->GetSize();
16215  pAllocationInfo->pMappedData = VMA_NULL;
16216  pAllocationInfo->pUserData = hAllocation->GetUserData();
16217  return;
16218  }
16219  else // Last use time earlier than current time.
16220  {
16221  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16222  {
16223  localLastUseFrameIndex = localCurrFrameIndex;
16224  }
16225  }
16226  }
16227  }
16228  else
16229  {
16230 #if VMA_STATS_STRING_ENABLED
16231  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16232  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16233  for(;;)
16234  {
16235  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16236  if(localLastUseFrameIndex == localCurrFrameIndex)
16237  {
16238  break;
16239  }
16240  else // Last use time earlier than current time.
16241  {
16242  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16243  {
16244  localLastUseFrameIndex = localCurrFrameIndex;
16245  }
16246  }
16247  }
16248 #endif
16249 
16250  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16251  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16252  pAllocationInfo->offset = hAllocation->GetOffset();
16253  pAllocationInfo->size = hAllocation->GetSize();
16254  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16255  pAllocationInfo->pUserData = hAllocation->GetUserData();
16256  }
16257 }
16258 
16259 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16260 {
16261  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16262  if(hAllocation->CanBecomeLost())
16263  {
16264  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16265  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16266  for(;;)
16267  {
16268  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16269  {
16270  return false;
16271  }
16272  else if(localLastUseFrameIndex == localCurrFrameIndex)
16273  {
16274  return true;
16275  }
16276  else // Last use time earlier than current time.
16277  {
16278  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16279  {
16280  localLastUseFrameIndex = localCurrFrameIndex;
16281  }
16282  }
16283  }
16284  }
16285  else
16286  {
16287 #if VMA_STATS_STRING_ENABLED
16288  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16289  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16290  for(;;)
16291  {
16292  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16293  if(localLastUseFrameIndex == localCurrFrameIndex)
16294  {
16295  break;
16296  }
16297  else // Last use time earlier than current time.
16298  {
16299  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16300  {
16301  localLastUseFrameIndex = localCurrFrameIndex;
16302  }
16303  }
16304  }
16305 #endif
16306 
16307  return true;
16308  }
16309 }
16310 
16311 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16312 {
16313  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16314 
16315  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16316 
16317  if(newCreateInfo.maxBlockCount == 0)
16318  {
16319  newCreateInfo.maxBlockCount = SIZE_MAX;
16320  }
16321  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16322  {
16323  return VK_ERROR_INITIALIZATION_FAILED;
16324  }
16325  // Memory type index out of range or forbidden.
16326  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16327  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16328  {
16329  return VK_ERROR_FEATURE_NOT_PRESENT;
16330  }
16331 
16332  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16333 
16334  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16335 
16336  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16337  if(res != VK_SUCCESS)
16338  {
16339  vma_delete(this, *pPool);
16340  *pPool = VMA_NULL;
16341  return res;
16342  }
16343 
16344  // Add to m_Pools.
16345  {
16346  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16347  (*pPool)->SetId(m_NextPoolId++);
16348  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16349  }
16350 
16351  return VK_SUCCESS;
16352 }
16353 
16354 void VmaAllocator_T::DestroyPool(VmaPool pool)
16355 {
16356  // Remove from m_Pools.
16357  {
16358  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16359  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16360  VMA_ASSERT(success && "Pool not found in Allocator.");
16361  }
16362 
16363  vma_delete(this, pool);
16364 }
16365 
16366 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16367 {
16368  pool->m_BlockVector.GetPoolStats(pPoolStats);
16369 }
16370 
16371 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16372 {
16373  m_CurrentFrameIndex.store(frameIndex);
16374 
16375 #if VMA_MEMORY_BUDGET
16376  if(m_UseExtMemoryBudget)
16377  {
16378  UpdateVulkanBudget();
16379  }
16380 #endif // #if VMA_MEMORY_BUDGET
16381 }
16382 
16383 void VmaAllocator_T::MakePoolAllocationsLost(
16384  VmaPool hPool,
16385  size_t* pLostAllocationCount)
16386 {
16387  hPool->m_BlockVector.MakePoolAllocationsLost(
16388  m_CurrentFrameIndex.load(),
16389  pLostAllocationCount);
16390 }
16391 
16392 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16393 {
16394  return hPool->m_BlockVector.CheckCorruption();
16395 }
16396 
16397 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16398 {
16399  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16400 
16401  // Process default pools.
16402  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16403  {
16404  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16405  {
16406  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16407  VMA_ASSERT(pBlockVector);
16408  VkResult localRes = pBlockVector->CheckCorruption();
16409  switch(localRes)
16410  {
16411  case VK_ERROR_FEATURE_NOT_PRESENT:
16412  break;
16413  case VK_SUCCESS:
16414  finalRes = VK_SUCCESS;
16415  break;
16416  default:
16417  return localRes;
16418  }
16419  }
16420  }
16421 
16422  // Process custom pools.
16423  {
16424  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16425  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16426  {
16427  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16428  {
16429  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16430  switch(localRes)
16431  {
16432  case VK_ERROR_FEATURE_NOT_PRESENT:
16433  break;
16434  case VK_SUCCESS:
16435  finalRes = VK_SUCCESS;
16436  break;
16437  default:
16438  return localRes;
16439  }
16440  }
16441  }
16442  }
16443 
16444  return finalRes;
16445 }
16446 
16447 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16448 {
16449  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16450  (*pAllocation)->InitLost();
16451 }
16452 
16453 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16454 {
16455  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16456 
16457  // HeapSizeLimit is in effect for this heap.
16458  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16459  {
16460  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16461  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16462  for(;;)
16463  {
16464  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16465  if(blockBytesAfterAllocation > heapSize)
16466  {
16467  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16468  }
16469  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16470  {
16471  break;
16472  }
16473  }
16474  }
16475  else
16476  {
16477  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16478  }
16479 
16480  // VULKAN CALL vkAllocateMemory.
16481  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16482 
16483  if(res == VK_SUCCESS)
16484  {
16485 #if VMA_MEMORY_BUDGET
16486  ++m_Budget.m_OperationsSinceBudgetFetch;
16487 #endif
16488 
16489  // Informative callback.
16490  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16491  {
16492  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
16493  }
16494  }
16495  else
16496  {
16497  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16498  }
16499 
16500  return res;
16501 }
16502 
16503 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
16504 {
16505  // Informative callback.
16506  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
16507  {
16508  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
16509  }
16510 
16511  // VULKAN CALL vkFreeMemory.
16512  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
16513 
16514  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
16515 }
16516 
16517 VkResult VmaAllocator_T::BindVulkanBuffer(
16518  VkDeviceMemory memory,
16519  VkDeviceSize memoryOffset,
16520  VkBuffer buffer,
16521  const void* pNext)
16522 {
16523  if(pNext != VMA_NULL)
16524  {
16525 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16526  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16527  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
16528  {
16529  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
16530  bindBufferMemoryInfo.pNext = pNext;
16531  bindBufferMemoryInfo.buffer = buffer;
16532  bindBufferMemoryInfo.memory = memory;
16533  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16534  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16535  }
16536  else
16537 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16538  {
16539  return VK_ERROR_EXTENSION_NOT_PRESENT;
16540  }
16541  }
16542  else
16543  {
16544  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
16545  }
16546 }
16547 
16548 VkResult VmaAllocator_T::BindVulkanImage(
16549  VkDeviceMemory memory,
16550  VkDeviceSize memoryOffset,
16551  VkImage image,
16552  const void* pNext)
16553 {
16554  if(pNext != VMA_NULL)
16555  {
16556 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16557  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16558  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
16559  {
16560  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
16561  bindBufferMemoryInfo.pNext = pNext;
16562  bindBufferMemoryInfo.image = image;
16563  bindBufferMemoryInfo.memory = memory;
16564  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16565  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16566  }
16567  else
16568 #endif // #if VMA_BIND_MEMORY2
16569  {
16570  return VK_ERROR_EXTENSION_NOT_PRESENT;
16571  }
16572  }
16573  else
16574  {
16575  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
16576  }
16577 }
16578 
16579 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
16580 {
16581  if(hAllocation->CanBecomeLost())
16582  {
16583  return VK_ERROR_MEMORY_MAP_FAILED;
16584  }
16585 
16586  switch(hAllocation->GetType())
16587  {
16588  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16589  {
16590  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16591  char *pBytes = VMA_NULL;
16592  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
16593  if(res == VK_SUCCESS)
16594  {
16595  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16596  hAllocation->BlockAllocMap();
16597  }
16598  return res;
16599  }
16600  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16601  return hAllocation->DedicatedAllocMap(this, ppData);
16602  default:
16603  VMA_ASSERT(0);
16604  return VK_ERROR_MEMORY_MAP_FAILED;
16605  }
16606 }
16607 
16608 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16609 {
16610  switch(hAllocation->GetType())
16611  {
16612  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16613  {
16614  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16615  hAllocation->BlockAllocUnmap();
16616  pBlock->Unmap(this, 1);
16617  }
16618  break;
16619  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16620  hAllocation->DedicatedAllocUnmap(this);
16621  break;
16622  default:
16623  VMA_ASSERT(0);
16624  }
16625 }
16626 
16627 VkResult VmaAllocator_T::BindBufferMemory(
16628  VmaAllocation hAllocation,
16629  VkDeviceSize allocationLocalOffset,
16630  VkBuffer hBuffer,
16631  const void* pNext)
16632 {
16633  VkResult res = VK_SUCCESS;
16634  switch(hAllocation->GetType())
16635  {
16636  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16637  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16638  break;
16639  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16640  {
16641  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16642  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16643  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16644  break;
16645  }
16646  default:
16647  VMA_ASSERT(0);
16648  }
16649  return res;
16650 }
16651 
16652 VkResult VmaAllocator_T::BindImageMemory(
16653  VmaAllocation hAllocation,
16654  VkDeviceSize allocationLocalOffset,
16655  VkImage hImage,
16656  const void* pNext)
16657 {
16658  VkResult res = VK_SUCCESS;
16659  switch(hAllocation->GetType())
16660  {
16661  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16662  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16663  break;
16664  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16665  {
16666  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16667  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16668  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16669  break;
16670  }
16671  default:
16672  VMA_ASSERT(0);
16673  }
16674  return res;
16675 }
16676 
16677 void VmaAllocator_T::FlushOrInvalidateAllocation(
16678  VmaAllocation hAllocation,
16679  VkDeviceSize offset, VkDeviceSize size,
16680  VMA_CACHE_OPERATION op)
16681 {
16682  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16683  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16684  {
16685  const VkDeviceSize allocationSize = hAllocation->GetSize();
16686  VMA_ASSERT(offset <= allocationSize);
16687 
16688  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16689 
16690  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16691  memRange.memory = hAllocation->GetMemory();
16692 
16693  switch(hAllocation->GetType())
16694  {
16695  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16696  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16697  if(size == VK_WHOLE_SIZE)
16698  {
16699  memRange.size = allocationSize - memRange.offset;
16700  }
16701  else
16702  {
16703  VMA_ASSERT(offset + size <= allocationSize);
16704  memRange.size = VMA_MIN(
16705  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16706  allocationSize - memRange.offset);
16707  }
16708  break;
16709 
16710  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16711  {
16712  // 1. Still within this allocation.
16713  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16714  if(size == VK_WHOLE_SIZE)
16715  {
16716  size = allocationSize - offset;
16717  }
16718  else
16719  {
16720  VMA_ASSERT(offset + size <= allocationSize);
16721  }
16722  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16723 
16724  // 2. Adjust to whole block.
16725  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16726  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16727  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16728  memRange.offset += allocationOffset;
16729  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16730 
16731  break;
16732  }
16733 
16734  default:
16735  VMA_ASSERT(0);
16736  }
16737 
16738  switch(op)
16739  {
16740  case VMA_CACHE_FLUSH:
16741  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16742  break;
16743  case VMA_CACHE_INVALIDATE:
16744  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16745  break;
16746  default:
16747  VMA_ASSERT(0);
16748  }
16749  }
16750  // else: Just ignore this call.
16751 }
16752 
16753 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16754 {
16755  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16756 
16757  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16758  {
16759  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16760  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16761  VMA_ASSERT(pDedicatedAllocations);
16762  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16763  VMA_ASSERT(success);
16764  }
16765 
16766  VkDeviceMemory hMemory = allocation->GetMemory();
16767 
16768  /*
16769  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16770  before vkFreeMemory.
16771 
16772  if(allocation->GetMappedData() != VMA_NULL)
16773  {
16774  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16775  }
16776  */
16777 
16778  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16779 
16780  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16781 }
16782 
16783 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16784 {
16785  VkBufferCreateInfo dummyBufCreateInfo;
16786  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16787 
16788  uint32_t memoryTypeBits = 0;
16789 
16790  // Create buffer.
16791  VkBuffer buf = VK_NULL_HANDLE;
16792  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16793  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16794  if(res == VK_SUCCESS)
16795  {
16796  // Query for supported memory types.
16797  VkMemoryRequirements memReq;
16798  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16799  memoryTypeBits = memReq.memoryTypeBits;
16800 
16801  // Destroy buffer.
16802  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16803  }
16804 
16805  return memoryTypeBits;
16806 }
16807 
16808 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
16809 {
16810  // Make sure memory information is already fetched.
16811  VMA_ASSERT(GetMemoryTypeCount() > 0);
16812 
16813  uint32_t memoryTypeBits = UINT32_MAX;
16814 
16815  if(!m_UseAmdDeviceCoherentMemory)
16816  {
16817  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
16818  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16819  {
16820  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
16821  {
16822  memoryTypeBits &= ~(1u << memTypeIndex);
16823  }
16824  }
16825  }
16826 
16827  return memoryTypeBits;
16828 }
16829 
16830 #if VMA_MEMORY_BUDGET
16831 
16832 void VmaAllocator_T::UpdateVulkanBudget()
16833 {
16834  VMA_ASSERT(m_UseExtMemoryBudget);
16835 
16836  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16837 
16838  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16839  VmaPnextChainPushFront(&memProps, &budgetProps);
16840 
16841  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16842 
16843  {
16844  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16845 
16846  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16847  {
16848  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16849  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16850  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16851 
16852  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
16853  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
16854  {
16855  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16856  }
16857  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
16858  {
16859  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
16860  }
16861  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
16862  {
16863  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16864  }
16865  }
16866  m_Budget.m_OperationsSinceBudgetFetch = 0;
16867  }
16868 }
16869 
16870 #endif // #if VMA_MEMORY_BUDGET
16871 
16872 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16873 {
16874  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16875  !hAllocation->CanBecomeLost() &&
16876  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16877  {
16878  void* pData = VMA_NULL;
16879  VkResult res = Map(hAllocation, &pData);
16880  if(res == VK_SUCCESS)
16881  {
16882  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16883  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16884  Unmap(hAllocation);
16885  }
16886  else
16887  {
16888  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16889  }
16890  }
16891 }
16892 
16893 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16894 {
16895  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16896  if(memoryTypeBits == UINT32_MAX)
16897  {
16898  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16899  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16900  }
16901  return memoryTypeBits;
16902 }
16903 
16904 #if VMA_STATS_STRING_ENABLED
16905 
16906 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16907 {
16908  bool dedicatedAllocationsStarted = false;
16909  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16910  {
16911  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16912  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16913  VMA_ASSERT(pDedicatedAllocVector);
16914  if(pDedicatedAllocVector->empty() == false)
16915  {
16916  if(dedicatedAllocationsStarted == false)
16917  {
16918  dedicatedAllocationsStarted = true;
16919  json.WriteString("DedicatedAllocations");
16920  json.BeginObject();
16921  }
16922 
16923  json.BeginString("Type ");
16924  json.ContinueString(memTypeIndex);
16925  json.EndString();
16926 
16927  json.BeginArray();
16928 
16929  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16930  {
16931  json.BeginObject(true);
16932  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16933  hAlloc->PrintParameters(json);
16934  json.EndObject();
16935  }
16936 
16937  json.EndArray();
16938  }
16939  }
16940  if(dedicatedAllocationsStarted)
16941  {
16942  json.EndObject();
16943  }
16944 
16945  {
16946  bool allocationsStarted = false;
16947  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16948  {
16949  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16950  {
16951  if(allocationsStarted == false)
16952  {
16953  allocationsStarted = true;
16954  json.WriteString("DefaultPools");
16955  json.BeginObject();
16956  }
16957 
16958  json.BeginString("Type ");
16959  json.ContinueString(memTypeIndex);
16960  json.EndString();
16961 
16962  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16963  }
16964  }
16965  if(allocationsStarted)
16966  {
16967  json.EndObject();
16968  }
16969  }
16970 
16971  // Custom pools
16972  {
16973  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16974  const size_t poolCount = m_Pools.size();
16975  if(poolCount > 0)
16976  {
16977  json.WriteString("Pools");
16978  json.BeginObject();
16979  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16980  {
16981  json.BeginString();
16982  json.ContinueString(m_Pools[poolIndex]->GetId());
16983  json.EndString();
16984 
16985  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16986  }
16987  json.EndObject();
16988  }
16989  }
16990 }
16991 
16992 #endif // #if VMA_STATS_STRING_ENABLED
16993 
16995 // Public interface
16996 
16997 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16998  const VmaAllocatorCreateInfo* pCreateInfo,
16999  VmaAllocator* pAllocator)
17000 {
17001  VMA_ASSERT(pCreateInfo && pAllocator);
17002  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17003  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17004  VMA_DEBUG_LOG("vmaCreateAllocator");
17005  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17006  return (*pAllocator)->Init(pCreateInfo);
17007 }
17008 
17009 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17010  VmaAllocator allocator)
17011 {
17012  if(allocator != VK_NULL_HANDLE)
17013  {
17014  VMA_DEBUG_LOG("vmaDestroyAllocator");
17015  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17016  vma_delete(&allocationCallbacks, allocator);
17017  }
17018 }
17019 
17020 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17021 {
17022  VMA_ASSERT(allocator && pAllocatorInfo);
17023  pAllocatorInfo->instance = allocator->m_hInstance;
17024  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17025  pAllocatorInfo->device = allocator->m_hDevice;
17026 }
17027 
17028 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17029  VmaAllocator allocator,
17030  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17031 {
17032  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17033  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17034 }
17035 
17036 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17037  VmaAllocator allocator,
17038  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17039 {
17040  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17041  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17042 }
17043 
17044 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17045  VmaAllocator allocator,
17046  uint32_t memoryTypeIndex,
17047  VkMemoryPropertyFlags* pFlags)
17048 {
17049  VMA_ASSERT(allocator && pFlags);
17050  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17051  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17052 }
17053 
17054 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17055  VmaAllocator allocator,
17056  uint32_t frameIndex)
17057 {
17058  VMA_ASSERT(allocator);
17059  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17060 
17061  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17062 
17063  allocator->SetCurrentFrameIndex(frameIndex);
17064 }
17065 
17066 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17067  VmaAllocator allocator,
17068  VmaStats* pStats)
17069 {
17070  VMA_ASSERT(allocator && pStats);
17071  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17072  allocator->CalculateStats(pStats);
17073 }
17074 
17075 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17076  VmaAllocator allocator,
17077  VmaBudget* pBudget)
17078 {
17079  VMA_ASSERT(allocator && pBudget);
17080  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17081  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17082 }
17083 
17084 #if VMA_STATS_STRING_ENABLED
17085 
17086 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17087  VmaAllocator allocator,
17088  char** ppStatsString,
17089  VkBool32 detailedMap)
17090 {
17091  VMA_ASSERT(allocator && ppStatsString);
17092  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17093 
17094  VmaStringBuilder sb(allocator);
17095  {
17096  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17097  json.BeginObject();
17098 
17099  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17100  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17101 
17102  VmaStats stats;
17103  allocator->CalculateStats(&stats);
17104 
17105  json.WriteString("Total");
17106  VmaPrintStatInfo(json, stats.total);
17107 
17108  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17109  {
17110  json.BeginString("Heap ");
17111  json.ContinueString(heapIndex);
17112  json.EndString();
17113  json.BeginObject();
17114 
17115  json.WriteString("Size");
17116  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17117 
17118  json.WriteString("Flags");
17119  json.BeginArray(true);
17120  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17121  {
17122  json.WriteString("DEVICE_LOCAL");
17123  }
17124  json.EndArray();
17125 
17126  json.WriteString("Budget");
17127  json.BeginObject();
17128  {
17129  json.WriteString("BlockBytes");
17130  json.WriteNumber(budget[heapIndex].blockBytes);
17131  json.WriteString("AllocationBytes");
17132  json.WriteNumber(budget[heapIndex].allocationBytes);
17133  json.WriteString("Usage");
17134  json.WriteNumber(budget[heapIndex].usage);
17135  json.WriteString("Budget");
17136  json.WriteNumber(budget[heapIndex].budget);
17137  }
17138  json.EndObject();
17139 
17140  if(stats.memoryHeap[heapIndex].blockCount > 0)
17141  {
17142  json.WriteString("Stats");
17143  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17144  }
17145 
17146  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17147  {
17148  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17149  {
17150  json.BeginString("Type ");
17151  json.ContinueString(typeIndex);
17152  json.EndString();
17153 
17154  json.BeginObject();
17155 
17156  json.WriteString("Flags");
17157  json.BeginArray(true);
17158  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17159  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17160  {
17161  json.WriteString("DEVICE_LOCAL");
17162  }
17163  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17164  {
17165  json.WriteString("HOST_VISIBLE");
17166  }
17167  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17168  {
17169  json.WriteString("HOST_COHERENT");
17170  }
17171  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17172  {
17173  json.WriteString("HOST_CACHED");
17174  }
17175  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17176  {
17177  json.WriteString("LAZILY_ALLOCATED");
17178  }
17179  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17180  {
17181  json.WriteString(" PROTECTED");
17182  }
17183  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17184  {
17185  json.WriteString(" DEVICE_COHERENT");
17186  }
17187  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17188  {
17189  json.WriteString(" DEVICE_UNCACHED");
17190  }
17191  json.EndArray();
17192 
17193  if(stats.memoryType[typeIndex].blockCount > 0)
17194  {
17195  json.WriteString("Stats");
17196  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17197  }
17198 
17199  json.EndObject();
17200  }
17201  }
17202 
17203  json.EndObject();
17204  }
17205  if(detailedMap == VK_TRUE)
17206  {
17207  allocator->PrintDetailedMap(json);
17208  }
17209 
17210  json.EndObject();
17211  }
17212 
17213  const size_t len = sb.GetLength();
17214  char* const pChars = vma_new_array(allocator, char, len + 1);
17215  if(len > 0)
17216  {
17217  memcpy(pChars, sb.GetData(), len);
17218  }
17219  pChars[len] = '\0';
17220  *ppStatsString = pChars;
17221 }
17222 
17223 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17224  VmaAllocator allocator,
17225  char* pStatsString)
17226 {
17227  if(pStatsString != VMA_NULL)
17228  {
17229  VMA_ASSERT(allocator);
17230  size_t len = strlen(pStatsString);
17231  vma_delete_array(allocator, pStatsString, len + 1);
17232  }
17233 }
17234 
17235 #endif // #if VMA_STATS_STRING_ENABLED
17236 
17237 /*
17238 This function is not protected by any mutex because it just reads immutable data.
17239 */
17240 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17241  VmaAllocator allocator,
17242  uint32_t memoryTypeBits,
17243  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17244  uint32_t* pMemoryTypeIndex)
17245 {
17246  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17247  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17248  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17249 
17250  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17251 
17252  if(pAllocationCreateInfo->memoryTypeBits != 0)
17253  {
17254  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17255  }
17256 
17257  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17258  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17259  uint32_t notPreferredFlags = 0;
17260 
17261  // Convert usage to requiredFlags and preferredFlags.
17262  switch(pAllocationCreateInfo->usage)
17263  {
17265  break;
17267  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17268  {
17269  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17270  }
17271  break;
17273  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17274  break;
17276  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17277  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17278  {
17279  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17280  }
17281  break;
17283  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17284  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17285  break;
17287  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17288  break;
17290  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17291  break;
17292  default:
17293  VMA_ASSERT(0);
17294  break;
17295  }
17296 
17297  // Avoid DEVICE_COHERENT unless explicitly requested.
17298  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17299  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17300  {
17301  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17302  }
17303 
17304  *pMemoryTypeIndex = UINT32_MAX;
17305  uint32_t minCost = UINT32_MAX;
17306  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17307  memTypeIndex < allocator->GetMemoryTypeCount();
17308  ++memTypeIndex, memTypeBit <<= 1)
17309  {
17310  // This memory type is acceptable according to memoryTypeBits bitmask.
17311  if((memTypeBit & memoryTypeBits) != 0)
17312  {
17313  const VkMemoryPropertyFlags currFlags =
17314  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17315  // This memory type contains requiredFlags.
17316  if((requiredFlags & ~currFlags) == 0)
17317  {
17318  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17319  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17320  VmaCountBitsSet(currFlags & notPreferredFlags);
17321  // Remember memory type with lowest cost.
17322  if(currCost < minCost)
17323  {
17324  *pMemoryTypeIndex = memTypeIndex;
17325  if(currCost == 0)
17326  {
17327  return VK_SUCCESS;
17328  }
17329  minCost = currCost;
17330  }
17331  }
17332  }
17333  }
17334  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17335 }
17336 
17337 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17338  VmaAllocator allocator,
17339  const VkBufferCreateInfo* pBufferCreateInfo,
17340  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17341  uint32_t* pMemoryTypeIndex)
17342 {
17343  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17344  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17345  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17346  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17347 
17348  const VkDevice hDev = allocator->m_hDevice;
17349  VkBuffer hBuffer = VK_NULL_HANDLE;
17350  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17351  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17352  if(res == VK_SUCCESS)
17353  {
17354  VkMemoryRequirements memReq = {};
17355  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17356  hDev, hBuffer, &memReq);
17357 
17358  res = vmaFindMemoryTypeIndex(
17359  allocator,
17360  memReq.memoryTypeBits,
17361  pAllocationCreateInfo,
17362  pMemoryTypeIndex);
17363 
17364  allocator->GetVulkanFunctions().vkDestroyBuffer(
17365  hDev, hBuffer, allocator->GetAllocationCallbacks());
17366  }
17367  return res;
17368 }
17369 
17370 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17371  VmaAllocator allocator,
17372  const VkImageCreateInfo* pImageCreateInfo,
17373  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17374  uint32_t* pMemoryTypeIndex)
17375 {
17376  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17377  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17378  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17379  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17380 
17381  const VkDevice hDev = allocator->m_hDevice;
17382  VkImage hImage = VK_NULL_HANDLE;
17383  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17384  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17385  if(res == VK_SUCCESS)
17386  {
17387  VkMemoryRequirements memReq = {};
17388  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17389  hDev, hImage, &memReq);
17390 
17391  res = vmaFindMemoryTypeIndex(
17392  allocator,
17393  memReq.memoryTypeBits,
17394  pAllocationCreateInfo,
17395  pMemoryTypeIndex);
17396 
17397  allocator->GetVulkanFunctions().vkDestroyImage(
17398  hDev, hImage, allocator->GetAllocationCallbacks());
17399  }
17400  return res;
17401 }
17402 
17403 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17404  VmaAllocator allocator,
17405  const VmaPoolCreateInfo* pCreateInfo,
17406  VmaPool* pPool)
17407 {
17408  VMA_ASSERT(allocator && pCreateInfo && pPool);
17409 
17410  VMA_DEBUG_LOG("vmaCreatePool");
17411 
17412  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17413 
17414  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17415 
17416 #if VMA_RECORDING_ENABLED
17417  if(allocator->GetRecorder() != VMA_NULL)
17418  {
17419  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17420  }
17421 #endif
17422 
17423  return res;
17424 }
17425 
17426 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17427  VmaAllocator allocator,
17428  VmaPool pool)
17429 {
17430  VMA_ASSERT(allocator);
17431 
17432  if(pool == VK_NULL_HANDLE)
17433  {
17434  return;
17435  }
17436 
17437  VMA_DEBUG_LOG("vmaDestroyPool");
17438 
17439  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17440 
17441 #if VMA_RECORDING_ENABLED
17442  if(allocator->GetRecorder() != VMA_NULL)
17443  {
17444  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17445  }
17446 #endif
17447 
17448  allocator->DestroyPool(pool);
17449 }
17450 
17451 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
17452  VmaAllocator allocator,
17453  VmaPool pool,
17454  VmaPoolStats* pPoolStats)
17455 {
17456  VMA_ASSERT(allocator && pool && pPoolStats);
17457 
17458  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17459 
17460  allocator->GetPoolStats(pool, pPoolStats);
17461 }
17462 
17463 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
17464  VmaAllocator allocator,
17465  VmaPool pool,
17466  size_t* pLostAllocationCount)
17467 {
17468  VMA_ASSERT(allocator && pool);
17469 
17470  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17471 
17472 #if VMA_RECORDING_ENABLED
17473  if(allocator->GetRecorder() != VMA_NULL)
17474  {
17475  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
17476  }
17477 #endif
17478 
17479  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
17480 }
17481 
17482 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
17483 {
17484  VMA_ASSERT(allocator && pool);
17485 
17486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17487 
17488  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
17489 
17490  return allocator->CheckPoolCorruption(pool);
17491 }
17492 
17493 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
17494  VmaAllocator allocator,
17495  VmaPool pool,
17496  const char** ppName)
17497 {
17498  VMA_ASSERT(allocator && pool);
17499 
17500  VMA_DEBUG_LOG("vmaGetPoolName");
17501 
17502  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17503 
17504  *ppName = pool->GetName();
17505 }
17506 
17507 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
17508  VmaAllocator allocator,
17509  VmaPool pool,
17510  const char* pName)
17511 {
17512  VMA_ASSERT(allocator && pool);
17513 
17514  VMA_DEBUG_LOG("vmaSetPoolName");
17515 
17516  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17517 
17518  pool->SetName(pName);
17519 
17520 #if VMA_RECORDING_ENABLED
17521  if(allocator->GetRecorder() != VMA_NULL)
17522  {
17523  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
17524  }
17525 #endif
17526 }
17527 
17528 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
17529  VmaAllocator allocator,
17530  const VkMemoryRequirements* pVkMemoryRequirements,
17531  const VmaAllocationCreateInfo* pCreateInfo,
17532  VmaAllocation* pAllocation,
17533  VmaAllocationInfo* pAllocationInfo)
17534 {
17535  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
17536 
17537  VMA_DEBUG_LOG("vmaAllocateMemory");
17538 
17539  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17540 
17541  VkResult result = allocator->AllocateMemory(
17542  *pVkMemoryRequirements,
17543  false, // requiresDedicatedAllocation
17544  false, // prefersDedicatedAllocation
17545  VK_NULL_HANDLE, // dedicatedBuffer
17546  UINT32_MAX, // dedicatedBufferUsage
17547  VK_NULL_HANDLE, // dedicatedImage
17548  *pCreateInfo,
17549  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17550  1, // allocationCount
17551  pAllocation);
17552 
17553 #if VMA_RECORDING_ENABLED
17554  if(allocator->GetRecorder() != VMA_NULL)
17555  {
17556  allocator->GetRecorder()->RecordAllocateMemory(
17557  allocator->GetCurrentFrameIndex(),
17558  *pVkMemoryRequirements,
17559  *pCreateInfo,
17560  *pAllocation);
17561  }
17562 #endif
17563 
17564  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17565  {
17566  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17567  }
17568 
17569  return result;
17570 }
17571 
17572 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
17573  VmaAllocator allocator,
17574  const VkMemoryRequirements* pVkMemoryRequirements,
17575  const VmaAllocationCreateInfo* pCreateInfo,
17576  size_t allocationCount,
17577  VmaAllocation* pAllocations,
17578  VmaAllocationInfo* pAllocationInfo)
17579 {
17580  if(allocationCount == 0)
17581  {
17582  return VK_SUCCESS;
17583  }
17584 
17585  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
17586 
17587  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
17588 
17589  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17590 
17591  VkResult result = allocator->AllocateMemory(
17592  *pVkMemoryRequirements,
17593  false, // requiresDedicatedAllocation
17594  false, // prefersDedicatedAllocation
17595  VK_NULL_HANDLE, // dedicatedBuffer
17596  UINT32_MAX, // dedicatedBufferUsage
17597  VK_NULL_HANDLE, // dedicatedImage
17598  *pCreateInfo,
17599  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17600  allocationCount,
17601  pAllocations);
17602 
17603 #if VMA_RECORDING_ENABLED
17604  if(allocator->GetRecorder() != VMA_NULL)
17605  {
17606  allocator->GetRecorder()->RecordAllocateMemoryPages(
17607  allocator->GetCurrentFrameIndex(),
17608  *pVkMemoryRequirements,
17609  *pCreateInfo,
17610  (uint64_t)allocationCount,
17611  pAllocations);
17612  }
17613 #endif
17614 
17615  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17616  {
17617  for(size_t i = 0; i < allocationCount; ++i)
17618  {
17619  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
17620  }
17621  }
17622 
17623  return result;
17624 }
17625 
17626 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
17627  VmaAllocator allocator,
17628  VkBuffer buffer,
17629  const VmaAllocationCreateInfo* pCreateInfo,
17630  VmaAllocation* pAllocation,
17631  VmaAllocationInfo* pAllocationInfo)
17632 {
17633  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17634 
17635  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
17636 
17637  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17638 
17639  VkMemoryRequirements vkMemReq = {};
17640  bool requiresDedicatedAllocation = false;
17641  bool prefersDedicatedAllocation = false;
17642  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
17643  requiresDedicatedAllocation,
17644  prefersDedicatedAllocation);
17645 
17646  VkResult result = allocator->AllocateMemory(
17647  vkMemReq,
17648  requiresDedicatedAllocation,
17649  prefersDedicatedAllocation,
17650  buffer, // dedicatedBuffer
17651  UINT32_MAX, // dedicatedBufferUsage
17652  VK_NULL_HANDLE, // dedicatedImage
17653  *pCreateInfo,
17654  VMA_SUBALLOCATION_TYPE_BUFFER,
17655  1, // allocationCount
17656  pAllocation);
17657 
17658 #if VMA_RECORDING_ENABLED
17659  if(allocator->GetRecorder() != VMA_NULL)
17660  {
17661  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
17662  allocator->GetCurrentFrameIndex(),
17663  vkMemReq,
17664  requiresDedicatedAllocation,
17665  prefersDedicatedAllocation,
17666  *pCreateInfo,
17667  *pAllocation);
17668  }
17669 #endif
17670 
17671  if(pAllocationInfo && result == VK_SUCCESS)
17672  {
17673  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17674  }
17675 
17676  return result;
17677 }
17678 
17679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17680  VmaAllocator allocator,
17681  VkImage image,
17682  const VmaAllocationCreateInfo* pCreateInfo,
17683  VmaAllocation* pAllocation,
17684  VmaAllocationInfo* pAllocationInfo)
17685 {
17686  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17687 
17688  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17689 
17690  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17691 
17692  VkMemoryRequirements vkMemReq = {};
17693  bool requiresDedicatedAllocation = false;
17694  bool prefersDedicatedAllocation = false;
17695  allocator->GetImageMemoryRequirements(image, vkMemReq,
17696  requiresDedicatedAllocation, prefersDedicatedAllocation);
17697 
17698  VkResult result = allocator->AllocateMemory(
17699  vkMemReq,
17700  requiresDedicatedAllocation,
17701  prefersDedicatedAllocation,
17702  VK_NULL_HANDLE, // dedicatedBuffer
17703  UINT32_MAX, // dedicatedBufferUsage
17704  image, // dedicatedImage
17705  *pCreateInfo,
17706  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17707  1, // allocationCount
17708  pAllocation);
17709 
17710 #if VMA_RECORDING_ENABLED
17711  if(allocator->GetRecorder() != VMA_NULL)
17712  {
17713  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17714  allocator->GetCurrentFrameIndex(),
17715  vkMemReq,
17716  requiresDedicatedAllocation,
17717  prefersDedicatedAllocation,
17718  *pCreateInfo,
17719  *pAllocation);
17720  }
17721 #endif
17722 
17723  if(pAllocationInfo && result == VK_SUCCESS)
17724  {
17725  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17726  }
17727 
17728  return result;
17729 }
17730 
17731 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17732  VmaAllocator allocator,
17733  VmaAllocation allocation)
17734 {
17735  VMA_ASSERT(allocator);
17736 
17737  if(allocation == VK_NULL_HANDLE)
17738  {
17739  return;
17740  }
17741 
17742  VMA_DEBUG_LOG("vmaFreeMemory");
17743 
17744  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17745 
17746 #if VMA_RECORDING_ENABLED
17747  if(allocator->GetRecorder() != VMA_NULL)
17748  {
17749  allocator->GetRecorder()->RecordFreeMemory(
17750  allocator->GetCurrentFrameIndex(),
17751  allocation);
17752  }
17753 #endif
17754 
17755  allocator->FreeMemory(
17756  1, // allocationCount
17757  &allocation);
17758 }
17759 
17760 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17761  VmaAllocator allocator,
17762  size_t allocationCount,
17763  VmaAllocation* pAllocations)
17764 {
17765  if(allocationCount == 0)
17766  {
17767  return;
17768  }
17769 
17770  VMA_ASSERT(allocator);
17771 
17772  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17773 
17774  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17775 
17776 #if VMA_RECORDING_ENABLED
17777  if(allocator->GetRecorder() != VMA_NULL)
17778  {
17779  allocator->GetRecorder()->RecordFreeMemoryPages(
17780  allocator->GetCurrentFrameIndex(),
17781  (uint64_t)allocationCount,
17782  pAllocations);
17783  }
17784 #endif
17785 
17786  allocator->FreeMemory(allocationCount, pAllocations);
17787 }
17788 
17789 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17790  VmaAllocator allocator,
17791  VmaAllocation allocation,
17792  VkDeviceSize newSize)
17793 {
17794  VMA_ASSERT(allocator && allocation);
17795 
17796  VMA_DEBUG_LOG("vmaResizeAllocation");
17797 
17798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17799 
17800  return allocator->ResizeAllocation(allocation, newSize);
17801 }
17802 
17803 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17804  VmaAllocator allocator,
17805  VmaAllocation allocation,
17806  VmaAllocationInfo* pAllocationInfo)
17807 {
17808  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17809 
17810  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17811 
17812 #if VMA_RECORDING_ENABLED
17813  if(allocator->GetRecorder() != VMA_NULL)
17814  {
17815  allocator->GetRecorder()->RecordGetAllocationInfo(
17816  allocator->GetCurrentFrameIndex(),
17817  allocation);
17818  }
17819 #endif
17820 
17821  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17822 }
17823 
17824 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17825  VmaAllocator allocator,
17826  VmaAllocation allocation)
17827 {
17828  VMA_ASSERT(allocator && allocation);
17829 
17830  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17831 
17832 #if VMA_RECORDING_ENABLED
17833  if(allocator->GetRecorder() != VMA_NULL)
17834  {
17835  allocator->GetRecorder()->RecordTouchAllocation(
17836  allocator->GetCurrentFrameIndex(),
17837  allocation);
17838  }
17839 #endif
17840 
17841  return allocator->TouchAllocation(allocation);
17842 }
17843 
17844 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17845  VmaAllocator allocator,
17846  VmaAllocation allocation,
17847  void* pUserData)
17848 {
17849  VMA_ASSERT(allocator && allocation);
17850 
17851  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17852 
17853  allocation->SetUserData(allocator, pUserData);
17854 
17855 #if VMA_RECORDING_ENABLED
17856  if(allocator->GetRecorder() != VMA_NULL)
17857  {
17858  allocator->GetRecorder()->RecordSetAllocationUserData(
17859  allocator->GetCurrentFrameIndex(),
17860  allocation,
17861  pUserData);
17862  }
17863 #endif
17864 }
17865 
17866 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17867  VmaAllocator allocator,
17868  VmaAllocation* pAllocation)
17869 {
17870  VMA_ASSERT(allocator && pAllocation);
17871 
17872  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17873 
17874  allocator->CreateLostAllocation(pAllocation);
17875 
17876 #if VMA_RECORDING_ENABLED
17877  if(allocator->GetRecorder() != VMA_NULL)
17878  {
17879  allocator->GetRecorder()->RecordCreateLostAllocation(
17880  allocator->GetCurrentFrameIndex(),
17881  *pAllocation);
17882  }
17883 #endif
17884 }
17885 
17886 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17887  VmaAllocator allocator,
17888  VmaAllocation allocation,
17889  void** ppData)
17890 {
17891  VMA_ASSERT(allocator && allocation && ppData);
17892 
17893  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17894 
17895  VkResult res = allocator->Map(allocation, ppData);
17896 
17897 #if VMA_RECORDING_ENABLED
17898  if(allocator->GetRecorder() != VMA_NULL)
17899  {
17900  allocator->GetRecorder()->RecordMapMemory(
17901  allocator->GetCurrentFrameIndex(),
17902  allocation);
17903  }
17904 #endif
17905 
17906  return res;
17907 }
17908 
17909 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17910  VmaAllocator allocator,
17911  VmaAllocation allocation)
17912 {
17913  VMA_ASSERT(allocator && allocation);
17914 
17915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17916 
17917 #if VMA_RECORDING_ENABLED
17918  if(allocator->GetRecorder() != VMA_NULL)
17919  {
17920  allocator->GetRecorder()->RecordUnmapMemory(
17921  allocator->GetCurrentFrameIndex(),
17922  allocation);
17923  }
17924 #endif
17925 
17926  allocator->Unmap(allocation);
17927 }
17928 
17929 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17930 {
17931  VMA_ASSERT(allocator && allocation);
17932 
17933  VMA_DEBUG_LOG("vmaFlushAllocation");
17934 
17935  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17936 
17937  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17938 
17939 #if VMA_RECORDING_ENABLED
17940  if(allocator->GetRecorder() != VMA_NULL)
17941  {
17942  allocator->GetRecorder()->RecordFlushAllocation(
17943  allocator->GetCurrentFrameIndex(),
17944  allocation, offset, size);
17945  }
17946 #endif
17947 }
17948 
17949 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17950 {
17951  VMA_ASSERT(allocator && allocation);
17952 
17953  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17954 
17955  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17956 
17957  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17958 
17959 #if VMA_RECORDING_ENABLED
17960  if(allocator->GetRecorder() != VMA_NULL)
17961  {
17962  allocator->GetRecorder()->RecordInvalidateAllocation(
17963  allocator->GetCurrentFrameIndex(),
17964  allocation, offset, size);
17965  }
17966 #endif
17967 }
17968 
17969 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17970 {
17971  VMA_ASSERT(allocator);
17972 
17973  VMA_DEBUG_LOG("vmaCheckCorruption");
17974 
17975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17976 
17977  return allocator->CheckCorruption(memoryTypeBits);
17978 }
17979 
17980 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17981  VmaAllocator allocator,
17982  VmaAllocation* pAllocations,
17983  size_t allocationCount,
17984  VkBool32* pAllocationsChanged,
17985  const VmaDefragmentationInfo *pDefragmentationInfo,
17986  VmaDefragmentationStats* pDefragmentationStats)
17987 {
17988  // Deprecated interface, reimplemented using new one.
17989 
17990  VmaDefragmentationInfo2 info2 = {};
17991  info2.allocationCount = (uint32_t)allocationCount;
17992  info2.pAllocations = pAllocations;
17993  info2.pAllocationsChanged = pAllocationsChanged;
17994  if(pDefragmentationInfo != VMA_NULL)
17995  {
17996  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17997  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17998  }
17999  else
18000  {
18001  info2.maxCpuAllocationsToMove = UINT32_MAX;
18002  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18003  }
18004  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18005 
18007  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18008  if(res == VK_NOT_READY)
18009  {
18010  res = vmaDefragmentationEnd( allocator, ctx);
18011  }
18012  return res;
18013 }
18014 
18015 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18016  VmaAllocator allocator,
18017  const VmaDefragmentationInfo2* pInfo,
18018  VmaDefragmentationStats* pStats,
18019  VmaDefragmentationContext *pContext)
18020 {
18021  VMA_ASSERT(allocator && pInfo && pContext);
18022 
18023  // Degenerate case: Nothing to defragment.
18024  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18025  {
18026  return VK_SUCCESS;
18027  }
18028 
18029  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18030  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18031  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18032  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18033 
18034  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18035 
18036  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18037 
18038  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18039 
18040 #if VMA_RECORDING_ENABLED
18041  if(allocator->GetRecorder() != VMA_NULL)
18042  {
18043  allocator->GetRecorder()->RecordDefragmentationBegin(
18044  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18045  }
18046 #endif
18047 
18048  return res;
18049 }
18050 
18051 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18052  VmaAllocator allocator,
18053  VmaDefragmentationContext context)
18054 {
18055  VMA_ASSERT(allocator);
18056 
18057  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18058 
18059  if(context != VK_NULL_HANDLE)
18060  {
18061  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18062 
18063 #if VMA_RECORDING_ENABLED
18064  if(allocator->GetRecorder() != VMA_NULL)
18065  {
18066  allocator->GetRecorder()->RecordDefragmentationEnd(
18067  allocator->GetCurrentFrameIndex(), context);
18068  }
18069 #endif
18070 
18071  return allocator->DefragmentationEnd(context);
18072  }
18073  else
18074  {
18075  return VK_SUCCESS;
18076  }
18077 }
18078 
18079 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18080  VmaAllocator allocator,
18081  VmaDefragmentationContext context,
18083  )
18084 {
18085  VMA_ASSERT(allocator);
18086  VMA_ASSERT(pInfo);
18087  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
18088 
18089  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18090 
18091  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18092 
18093  if(context == VK_NULL_HANDLE)
18094  {
18095  pInfo->moveCount = 0;
18096  return VK_SUCCESS;
18097  }
18098 
18099  return allocator->DefragmentationPassBegin(pInfo, context);
18100 }
18101 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18102  VmaAllocator allocator,
18103  VmaDefragmentationContext context)
18104 {
18105  VMA_ASSERT(allocator);
18106 
18107  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18108  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18109 
18110  if(context == VK_NULL_HANDLE)
18111  return VK_SUCCESS;
18112 
18113  return allocator->DefragmentationPassEnd(context);
18114 }
18115 
18116 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18117  VmaAllocator allocator,
18118  VmaAllocation allocation,
18119  VkBuffer buffer)
18120 {
18121  VMA_ASSERT(allocator && allocation && buffer);
18122 
18123  VMA_DEBUG_LOG("vmaBindBufferMemory");
18124 
18125  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18126 
18127  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18128 }
18129 
18130 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18131  VmaAllocator allocator,
18132  VmaAllocation allocation,
18133  VkDeviceSize allocationLocalOffset,
18134  VkBuffer buffer,
18135  const void* pNext)
18136 {
18137  VMA_ASSERT(allocator && allocation && buffer);
18138 
18139  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18140 
18141  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18142 
18143  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18144 }
18145 
18146 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18147  VmaAllocator allocator,
18148  VmaAllocation allocation,
18149  VkImage image)
18150 {
18151  VMA_ASSERT(allocator && allocation && image);
18152 
18153  VMA_DEBUG_LOG("vmaBindImageMemory");
18154 
18155  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18156 
18157  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18158 }
18159 
18160 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18161  VmaAllocator allocator,
18162  VmaAllocation allocation,
18163  VkDeviceSize allocationLocalOffset,
18164  VkImage image,
18165  const void* pNext)
18166 {
18167  VMA_ASSERT(allocator && allocation && image);
18168 
18169  VMA_DEBUG_LOG("vmaBindImageMemory2");
18170 
18171  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18172 
18173  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18174 }
18175 
18176 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18177  VmaAllocator allocator,
18178  const VkBufferCreateInfo* pBufferCreateInfo,
18179  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18180  VkBuffer* pBuffer,
18181  VmaAllocation* pAllocation,
18182  VmaAllocationInfo* pAllocationInfo)
18183 {
18184  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18185 
18186  if(pBufferCreateInfo->size == 0)
18187  {
18188  return VK_ERROR_VALIDATION_FAILED_EXT;
18189  }
18190  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18191  !allocator->m_UseKhrBufferDeviceAddress)
18192  {
18193  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18194  return VK_ERROR_VALIDATION_FAILED_EXT;
18195  }
18196 
18197  VMA_DEBUG_LOG("vmaCreateBuffer");
18198 
18199  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18200 
18201  *pBuffer = VK_NULL_HANDLE;
18202  *pAllocation = VK_NULL_HANDLE;
18203 
18204  // 1. Create VkBuffer.
18205  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18206  allocator->m_hDevice,
18207  pBufferCreateInfo,
18208  allocator->GetAllocationCallbacks(),
18209  pBuffer);
18210  if(res >= 0)
18211  {
18212  // 2. vkGetBufferMemoryRequirements.
18213  VkMemoryRequirements vkMemReq = {};
18214  bool requiresDedicatedAllocation = false;
18215  bool prefersDedicatedAllocation = false;
18216  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18217  requiresDedicatedAllocation, prefersDedicatedAllocation);
18218 
18219  // 3. Allocate memory using allocator.
18220  res = allocator->AllocateMemory(
18221  vkMemReq,
18222  requiresDedicatedAllocation,
18223  prefersDedicatedAllocation,
18224  *pBuffer, // dedicatedBuffer
18225  pBufferCreateInfo->usage, // dedicatedBufferUsage
18226  VK_NULL_HANDLE, // dedicatedImage
18227  *pAllocationCreateInfo,
18228  VMA_SUBALLOCATION_TYPE_BUFFER,
18229  1, // allocationCount
18230  pAllocation);
18231 
18232 #if VMA_RECORDING_ENABLED
18233  if(allocator->GetRecorder() != VMA_NULL)
18234  {
18235  allocator->GetRecorder()->RecordCreateBuffer(
18236  allocator->GetCurrentFrameIndex(),
18237  *pBufferCreateInfo,
18238  *pAllocationCreateInfo,
18239  *pAllocation);
18240  }
18241 #endif
18242 
18243  if(res >= 0)
18244  {
18245  // 3. Bind buffer with memory.
18246  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18247  {
18248  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18249  }
18250  if(res >= 0)
18251  {
18252  // All steps succeeded.
18253  #if VMA_STATS_STRING_ENABLED
18254  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18255  #endif
18256  if(pAllocationInfo != VMA_NULL)
18257  {
18258  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18259  }
18260 
18261  return VK_SUCCESS;
18262  }
18263  allocator->FreeMemory(
18264  1, // allocationCount
18265  pAllocation);
18266  *pAllocation = VK_NULL_HANDLE;
18267  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18268  *pBuffer = VK_NULL_HANDLE;
18269  return res;
18270  }
18271  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18272  *pBuffer = VK_NULL_HANDLE;
18273  return res;
18274  }
18275  return res;
18276 }
18277 
18278 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18279  VmaAllocator allocator,
18280  VkBuffer buffer,
18281  VmaAllocation allocation)
18282 {
18283  VMA_ASSERT(allocator);
18284 
18285  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18286  {
18287  return;
18288  }
18289 
18290  VMA_DEBUG_LOG("vmaDestroyBuffer");
18291 
18292  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18293 
18294 #if VMA_RECORDING_ENABLED
18295  if(allocator->GetRecorder() != VMA_NULL)
18296  {
18297  allocator->GetRecorder()->RecordDestroyBuffer(
18298  allocator->GetCurrentFrameIndex(),
18299  allocation);
18300  }
18301 #endif
18302 
18303  if(buffer != VK_NULL_HANDLE)
18304  {
18305  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18306  }
18307 
18308  if(allocation != VK_NULL_HANDLE)
18309  {
18310  allocator->FreeMemory(
18311  1, // allocationCount
18312  &allocation);
18313  }
18314 }
18315 
18316 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18317  VmaAllocator allocator,
18318  const VkImageCreateInfo* pImageCreateInfo,
18319  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18320  VkImage* pImage,
18321  VmaAllocation* pAllocation,
18322  VmaAllocationInfo* pAllocationInfo)
18323 {
18324  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18325 
18326  if(pImageCreateInfo->extent.width == 0 ||
18327  pImageCreateInfo->extent.height == 0 ||
18328  pImageCreateInfo->extent.depth == 0 ||
18329  pImageCreateInfo->mipLevels == 0 ||
18330  pImageCreateInfo->arrayLayers == 0)
18331  {
18332  return VK_ERROR_VALIDATION_FAILED_EXT;
18333  }
18334 
18335  VMA_DEBUG_LOG("vmaCreateImage");
18336 
18337  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18338 
18339  *pImage = VK_NULL_HANDLE;
18340  *pAllocation = VK_NULL_HANDLE;
18341 
18342  // 1. Create VkImage.
18343  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18344  allocator->m_hDevice,
18345  pImageCreateInfo,
18346  allocator->GetAllocationCallbacks(),
18347  pImage);
18348  if(res >= 0)
18349  {
18350  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18351  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18352  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18353 
18354  // 2. Allocate memory using allocator.
18355  VkMemoryRequirements vkMemReq = {};
18356  bool requiresDedicatedAllocation = false;
18357  bool prefersDedicatedAllocation = false;
18358  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18359  requiresDedicatedAllocation, prefersDedicatedAllocation);
18360 
18361  res = allocator->AllocateMemory(
18362  vkMemReq,
18363  requiresDedicatedAllocation,
18364  prefersDedicatedAllocation,
18365  VK_NULL_HANDLE, // dedicatedBuffer
18366  UINT32_MAX, // dedicatedBufferUsage
18367  *pImage, // dedicatedImage
18368  *pAllocationCreateInfo,
18369  suballocType,
18370  1, // allocationCount
18371  pAllocation);
18372 
18373 #if VMA_RECORDING_ENABLED
18374  if(allocator->GetRecorder() != VMA_NULL)
18375  {
18376  allocator->GetRecorder()->RecordCreateImage(
18377  allocator->GetCurrentFrameIndex(),
18378  *pImageCreateInfo,
18379  *pAllocationCreateInfo,
18380  *pAllocation);
18381  }
18382 #endif
18383 
18384  if(res >= 0)
18385  {
18386  // 3. Bind image with memory.
18387  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18388  {
18389  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
18390  }
18391  if(res >= 0)
18392  {
18393  // All steps succeeded.
18394  #if VMA_STATS_STRING_ENABLED
18395  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
18396  #endif
18397  if(pAllocationInfo != VMA_NULL)
18398  {
18399  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18400  }
18401 
18402  return VK_SUCCESS;
18403  }
18404  allocator->FreeMemory(
18405  1, // allocationCount
18406  pAllocation);
18407  *pAllocation = VK_NULL_HANDLE;
18408  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18409  *pImage = VK_NULL_HANDLE;
18410  return res;
18411  }
18412  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18413  *pImage = VK_NULL_HANDLE;
18414  return res;
18415  }
18416  return res;
18417 }
18418 
18419 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
18420  VmaAllocator allocator,
18421  VkImage image,
18422  VmaAllocation allocation)
18423 {
18424  VMA_ASSERT(allocator);
18425 
18426  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18427  {
18428  return;
18429  }
18430 
18431  VMA_DEBUG_LOG("vmaDestroyImage");
18432 
18433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18434 
18435 #if VMA_RECORDING_ENABLED
18436  if(allocator->GetRecorder() != VMA_NULL)
18437  {
18438  allocator->GetRecorder()->RecordDestroyImage(
18439  allocator->GetCurrentFrameIndex(),
18440  allocation);
18441  }
18442 #endif
18443 
18444  if(image != VK_NULL_HANDLE)
18445  {
18446  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
18447  }
18448  if(allocation != VK_NULL_HANDLE)
18449  {
18450  allocator->FreeMemory(
18451  1, // allocationCount
18452  &allocation);
18453  }
18454 }
18455 
18456 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2129
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2087
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1975
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2124
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2085
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2150
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2750
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3301
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2176
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2038
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2351
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3369
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2496
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2822
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2579
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1986
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2828
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2559
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2116
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1971
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2546
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2156
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2122
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2090
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2965
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2319
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2653
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2325
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2089
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1865
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3370
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3379
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2590
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2251
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2376
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2101
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2144
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2520
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2261
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3287
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2841
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2583
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2011
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3396
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2574
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2434
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2315
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2778
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2790
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3319
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3367
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2831
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2078
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2076
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3285
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2970
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2502
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2086
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2771
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2646
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2333
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2509
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2026
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3298
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2094
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3350
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3400
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2825
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2088
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2126
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2466
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3335
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2488
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3286
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2216
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2093
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2095
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3364
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2329
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2795
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2231
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2312
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3404
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3378
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2456
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2323
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2074
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2321
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2159
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1981
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2803
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2775
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1973
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2482
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2553
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2781
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2498
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2317
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2092
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3295
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3391
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2355
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2245
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2769
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2627
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2600
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2324
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2162
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2604
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2153
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2201
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3368
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2387
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2084
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2984
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2147
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3289
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2099
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:1977
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2325
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2473
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2597
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2594
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2056
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2622
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2324
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2975
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2132
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2091
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2989
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2733
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2332
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2209
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2844
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3377
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2611
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2331
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2222
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2439
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3355
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2098
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2817
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2100
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3340
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2956
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2533
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2097
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1950
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2834
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2715
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3402
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2325
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2564
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1957
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2616
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2096
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2613
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2632
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3316
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3406
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2640
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2256
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3408
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2140
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2324
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2951
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2761
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2366
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2609
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3310
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2765
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3345
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3386
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2570
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.