Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1822 /*
1823 Define this macro to 0/1 to disable/enable support for recording functionality,
1824 available through VmaAllocatorCreateInfo::pRecordSettings.
1825 */
1826 #ifndef VMA_RECORDING_ENABLED
1827  #define VMA_RECORDING_ENABLED 0
1828 #endif
1829 
1830 #ifndef NOMINMAX
1831  #define NOMINMAX // For windows.h
1832 #endif
1833 
1834 #ifndef VULKAN_H_
1835  #include <vulkan/vulkan.h>
1836 #endif
1837 
1838 #if VMA_RECORDING_ENABLED
1839  #include <windows.h>
1840 #endif
1841 
1842 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1843 // where AAA = major, BBB = minor, CCC = patch.
1844 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1845 #if !defined(VMA_VULKAN_VERSION)
1846  #if defined(VK_VERSION_1_1)
1847  #define VMA_VULKAN_VERSION 1001000
1848  #else
1849  #define VMA_VULKAN_VERSION 1000000
1850  #endif
1851 #endif
1852 
1853 #if !defined(VMA_DEDICATED_ALLOCATION)
1854  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1855  #define VMA_DEDICATED_ALLOCATION 1
1856  #else
1857  #define VMA_DEDICATED_ALLOCATION 0
1858  #endif
1859 #endif
1860 
1861 #if !defined(VMA_BIND_MEMORY2)
1862  #if VK_KHR_bind_memory2
1863  #define VMA_BIND_MEMORY2 1
1864  #else
1865  #define VMA_BIND_MEMORY2 0
1866  #endif
1867 #endif
1868 
1869 #if !defined(VMA_MEMORY_BUDGET)
1870  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1871  #define VMA_MEMORY_BUDGET 1
1872  #else
1873  #define VMA_MEMORY_BUDGET 0
1874  #endif
1875 #endif
1876 
1877 // Define these macros to decorate all public functions with additional code,
1878 // before and after returned type, appropriately. This may be useful for
1879 // exporing the functions when compiling VMA as a separate library. Example:
1880 // #define VMA_CALL_PRE __declspec(dllexport)
1881 // #define VMA_CALL_POST __cdecl
1882 #ifndef VMA_CALL_PRE
1883  #define VMA_CALL_PRE
1884 #endif
1885 #ifndef VMA_CALL_POST
1886  #define VMA_CALL_POST
1887 #endif
1888 
1898 VK_DEFINE_HANDLE(VmaAllocator)
1899 
1900 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1902  VmaAllocator allocator,
1903  uint32_t memoryType,
1904  VkDeviceMemory memory,
1905  VkDeviceSize size);
1907 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1908  VmaAllocator allocator,
1909  uint32_t memoryType,
1910  VkDeviceMemory memory,
1911  VkDeviceSize size);
1912 
1926 
2004 
2007 typedef VkFlags VmaAllocatorCreateFlags;
2008 
2013 typedef struct VmaVulkanFunctions {
2014  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2015  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2016  PFN_vkAllocateMemory vkAllocateMemory;
2017  PFN_vkFreeMemory vkFreeMemory;
2018  PFN_vkMapMemory vkMapMemory;
2019  PFN_vkUnmapMemory vkUnmapMemory;
2020  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2021  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2022  PFN_vkBindBufferMemory vkBindBufferMemory;
2023  PFN_vkBindImageMemory vkBindImageMemory;
2024  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2025  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2026  PFN_vkCreateBuffer vkCreateBuffer;
2027  PFN_vkDestroyBuffer vkDestroyBuffer;
2028  PFN_vkCreateImage vkCreateImage;
2029  PFN_vkDestroyImage vkDestroyImage;
2030  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2031 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2032  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
2033  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
2034 #endif
2035 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2036  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
2037  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
2038 #endif
2039 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2040  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
2041 #endif
2043 
2045 typedef enum VmaRecordFlagBits {
2052 
2055 typedef VkFlags VmaRecordFlags;
2056 
2058 typedef struct VmaRecordSettings
2059 {
2069  const char* pFilePath;
2071 
2074 {
2078 
2079  VkPhysicalDevice physicalDevice;
2081 
2082  VkDevice device;
2084 
2087 
2088  const VkAllocationCallbacks* pAllocationCallbacks;
2090 
2130  const VkDeviceSize* pHeapSizeLimit;
2155  VkInstance instance;
2166 
2168 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2169  const VmaAllocatorCreateInfo* pCreateInfo,
2170  VmaAllocator* pAllocator);
2171 
2173 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2174  VmaAllocator allocator);
2175 
2178 typedef struct VmaAllocatorInfo
2179 {
2184  VkInstance instance;
2189  VkPhysicalDevice physicalDevice;
2194  VkDevice device;
2196 
2202 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo);
2203 
2208 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2209  VmaAllocator allocator,
2210  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2211 
2216 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2217  VmaAllocator allocator,
2218  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2219 
2226 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2227  VmaAllocator allocator,
2228  uint32_t memoryTypeIndex,
2229  VkMemoryPropertyFlags* pFlags);
2230 
2239 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2240  VmaAllocator allocator,
2241  uint32_t frameIndex);
2242 
2245 typedef struct VmaStatInfo
2246 {
2248  uint32_t blockCount;
2254  VkDeviceSize usedBytes;
2256  VkDeviceSize unusedBytes;
2259 } VmaStatInfo;
2260 
2262 typedef struct VmaStats
2263 {
2264  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2265  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2267 } VmaStats;
2268 
2278 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2279  VmaAllocator allocator,
2280  VmaStats* pStats);
2281 
2284 typedef struct VmaBudget
2285 {
2288  VkDeviceSize blockBytes;
2289 
2299  VkDeviceSize allocationBytes;
2300 
2309  VkDeviceSize usage;
2310 
2320  VkDeviceSize budget;
2321 } VmaBudget;
2322 
2333 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2334  VmaAllocator allocator,
2335  VmaBudget* pBudget);
2336 
2337 #ifndef VMA_STATS_STRING_ENABLED
2338 #define VMA_STATS_STRING_ENABLED 1
2339 #endif
2340 
2341 #if VMA_STATS_STRING_ENABLED
2342 
2344 
2346 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2347  VmaAllocator allocator,
2348  char** ppStatsString,
2349  VkBool32 detailedMap);
2350 
2351 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2352  VmaAllocator allocator,
2353  char* pStatsString);
2354 
2355 #endif // #if VMA_STATS_STRING_ENABLED
2356 
2365 VK_DEFINE_HANDLE(VmaPool)
2366 
2367 typedef enum VmaMemoryUsage
2368 {
2430 
2432 } VmaMemoryUsage;
2433 
2443 
2508 
2524 
2534 
2541 
2545 
2547 {
2560  VkMemoryPropertyFlags requiredFlags;
2565  VkMemoryPropertyFlags preferredFlags;
2573  uint32_t memoryTypeBits;
2586  void* pUserData;
2588 
2605 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2606  VmaAllocator allocator,
2607  uint32_t memoryTypeBits,
2608  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2609  uint32_t* pMemoryTypeIndex);
2610 
2623 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2624  VmaAllocator allocator,
2625  const VkBufferCreateInfo* pBufferCreateInfo,
2626  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2627  uint32_t* pMemoryTypeIndex);
2628 
2641 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2642  VmaAllocator allocator,
2643  const VkImageCreateInfo* pImageCreateInfo,
2644  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2645  uint32_t* pMemoryTypeIndex);
2646 
2667 
2684 
2695 
2701 
2704 typedef VkFlags VmaPoolCreateFlags;
2705 
2708 typedef struct VmaPoolCreateInfo {
2723  VkDeviceSize blockSize;
2752 
2755 typedef struct VmaPoolStats {
2758  VkDeviceSize size;
2761  VkDeviceSize unusedSize;
2774  VkDeviceSize unusedRangeSizeMax;
2777  size_t blockCount;
2778 } VmaPoolStats;
2779 
2786 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2787  VmaAllocator allocator,
2788  const VmaPoolCreateInfo* pCreateInfo,
2789  VmaPool* pPool);
2790 
2793 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2794  VmaAllocator allocator,
2795  VmaPool pool);
2796 
2803 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2804  VmaAllocator allocator,
2805  VmaPool pool,
2806  VmaPoolStats* pPoolStats);
2807 
2814 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2815  VmaAllocator allocator,
2816  VmaPool pool,
2817  size_t* pLostAllocationCount);
2818 
2833 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2834 
2841 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2842  VmaAllocator allocator,
2843  VmaPool pool,
2844  const char** ppName);
2845 
2851 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2852  VmaAllocator allocator,
2853  VmaPool pool,
2854  const char* pName);
2855 
2880 VK_DEFINE_HANDLE(VmaAllocation)
2881 
2882 
2884 typedef struct VmaAllocationInfo {
2889  uint32_t memoryType;
2898  VkDeviceMemory deviceMemory;
2903  VkDeviceSize offset;
2908  VkDeviceSize size;
2922  void* pUserData;
2924 
2935 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
2936  VmaAllocator allocator,
2937  const VkMemoryRequirements* pVkMemoryRequirements,
2938  const VmaAllocationCreateInfo* pCreateInfo,
2939  VmaAllocation* pAllocation,
2940  VmaAllocationInfo* pAllocationInfo);
2941 
2961 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
2962  VmaAllocator allocator,
2963  const VkMemoryRequirements* pVkMemoryRequirements,
2964  const VmaAllocationCreateInfo* pCreateInfo,
2965  size_t allocationCount,
2966  VmaAllocation* pAllocations,
2967  VmaAllocationInfo* pAllocationInfo);
2968 
2975 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
2976  VmaAllocator allocator,
2977  VkBuffer buffer,
2978  const VmaAllocationCreateInfo* pCreateInfo,
2979  VmaAllocation* pAllocation,
2980  VmaAllocationInfo* pAllocationInfo);
2981 
2983 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
2984  VmaAllocator allocator,
2985  VkImage image,
2986  const VmaAllocationCreateInfo* pCreateInfo,
2987  VmaAllocation* pAllocation,
2988  VmaAllocationInfo* pAllocationInfo);
2989 
2994 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
2995  VmaAllocator allocator,
2996  VmaAllocation allocation);
2997 
3008 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3009  VmaAllocator allocator,
3010  size_t allocationCount,
3011  VmaAllocation* pAllocations);
3012 
3020 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3021  VmaAllocator allocator,
3022  VmaAllocation allocation,
3023  VkDeviceSize newSize);
3024 
3041 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3042  VmaAllocator allocator,
3043  VmaAllocation allocation,
3044  VmaAllocationInfo* pAllocationInfo);
3045 
3060 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3061  VmaAllocator allocator,
3062  VmaAllocation allocation);
3063 
3077 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3078  VmaAllocator allocator,
3079  VmaAllocation allocation,
3080  void* pUserData);
3081 
3092 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3093  VmaAllocator allocator,
3094  VmaAllocation* pAllocation);
3095 
3134 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3135  VmaAllocator allocator,
3136  VmaAllocation allocation,
3137  void** ppData);
3138 
3147 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3148  VmaAllocator allocator,
3149  VmaAllocation allocation);
3150 
3169 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3170 
3189 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3190 
3207 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3208 
3215 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3216 
3217 typedef enum VmaDefragmentationFlagBits {
3222 typedef VkFlags VmaDefragmentationFlags;
3223 
3228 typedef struct VmaDefragmentationInfo2 {
3252  uint32_t poolCount;
3273  VkDeviceSize maxCpuBytesToMove;
3283  VkDeviceSize maxGpuBytesToMove;
3297  VkCommandBuffer commandBuffer;
3299 
3302  VkDeviceMemory memory;
3303  VkDeviceSize offset;
3305 
3311  uint32_t moveCount;
3314 
3319 typedef struct VmaDefragmentationInfo {
3324  VkDeviceSize maxBytesToMove;
3331 
3333 typedef struct VmaDefragmentationStats {
3335  VkDeviceSize bytesMoved;
3337  VkDeviceSize bytesFreed;
3343 
3373 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3374  VmaAllocator allocator,
3375  const VmaDefragmentationInfo2* pInfo,
3376  VmaDefragmentationStats* pStats,
3377  VmaDefragmentationContext *pContext);
3378 
3384 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3385  VmaAllocator allocator,
3386  VmaDefragmentationContext context);
3387 
3388 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3389  VmaAllocator allocator,
3390  VmaDefragmentationContext context,
3392 );
3393 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3394  VmaAllocator allocator,
3396 );
3397 
3438 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3439  VmaAllocator allocator,
3440  VmaAllocation* pAllocations,
3441  size_t allocationCount,
3442  VkBool32* pAllocationsChanged,
3443  const VmaDefragmentationInfo *pDefragmentationInfo,
3444  VmaDefragmentationStats* pDefragmentationStats);
3445 
3458 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3459  VmaAllocator allocator,
3460  VmaAllocation allocation,
3461  VkBuffer buffer);
3462 
3473 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3474  VmaAllocator allocator,
3475  VmaAllocation allocation,
3476  VkDeviceSize allocationLocalOffset,
3477  VkBuffer buffer,
3478  const void* pNext);
3479 
3492 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3493  VmaAllocator allocator,
3494  VmaAllocation allocation,
3495  VkImage image);
3496 
3507 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3508  VmaAllocator allocator,
3509  VmaAllocation allocation,
3510  VkDeviceSize allocationLocalOffset,
3511  VkImage image,
3512  const void* pNext);
3513 
3540 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3541  VmaAllocator allocator,
3542  const VkBufferCreateInfo* pBufferCreateInfo,
3543  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3544  VkBuffer* pBuffer,
3545  VmaAllocation* pAllocation,
3546  VmaAllocationInfo* pAllocationInfo);
3547 
3559 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3560  VmaAllocator allocator,
3561  VkBuffer buffer,
3562  VmaAllocation allocation);
3563 
3565 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3566  VmaAllocator allocator,
3567  const VkImageCreateInfo* pImageCreateInfo,
3568  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3569  VkImage* pImage,
3570  VmaAllocation* pAllocation,
3571  VmaAllocationInfo* pAllocationInfo);
3572 
3584 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3585  VmaAllocator allocator,
3586  VkImage image,
3587  VmaAllocation allocation);
3588 
3589 #ifdef __cplusplus
3590 }
3591 #endif
3592 
3593 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3594 
3595 // For Visual Studio IntelliSense.
3596 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3597 #define VMA_IMPLEMENTATION
3598 #endif
3599 
3600 #ifdef VMA_IMPLEMENTATION
3601 #undef VMA_IMPLEMENTATION
3602 
3603 #include <cstdint>
3604 #include <cstdlib>
3605 #include <cstring>
3606 #include <utility>
3607 
3608 /*******************************************************************************
3609 CONFIGURATION SECTION
3610 
3611 Define some of these macros before each #include of this header or change them
3612 here if you need other then default behavior depending on your environment.
3613 */
3614 
3615 /*
3616 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3617 internally, like:
3618 
3619  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3620 
3621 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3622 VmaAllocatorCreateInfo::pVulkanFunctions.
3623 */
3624 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3625 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3626 #endif
3627 
3628 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3629 //#define VMA_USE_STL_CONTAINERS 1
3630 
3631 /* Set this macro to 1 to make the library including and using STL containers:
3632 std::pair, std::vector, std::list, std::unordered_map.
3633 
3634 Set it to 0 or undefined to make the library using its own implementation of
3635 the containers.
3636 */
3637 #if VMA_USE_STL_CONTAINERS
3638  #define VMA_USE_STL_VECTOR 1
3639  #define VMA_USE_STL_UNORDERED_MAP 1
3640  #define VMA_USE_STL_LIST 1
3641 #endif
3642 
3643 #ifndef VMA_USE_STL_SHARED_MUTEX
3644  // Compiler conforms to C++17.
3645  #if __cplusplus >= 201703L
3646  #define VMA_USE_STL_SHARED_MUTEX 1
3647  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3648  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3649  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3650  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3651  #define VMA_USE_STL_SHARED_MUTEX 1
3652  #else
3653  #define VMA_USE_STL_SHARED_MUTEX 0
3654  #endif
3655 #endif
3656 
3657 /*
3658 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3659 Library has its own container implementation.
3660 */
3661 #if VMA_USE_STL_VECTOR
3662  #include <vector>
3663 #endif
3664 
3665 #if VMA_USE_STL_UNORDERED_MAP
3666  #include <unordered_map>
3667 #endif
3668 
3669 #if VMA_USE_STL_LIST
3670  #include <list>
3671 #endif
3672 
3673 /*
3674 Following headers are used in this CONFIGURATION section only, so feel free to
3675 remove them if not needed.
3676 */
3677 #include <cassert> // for assert
3678 #include <algorithm> // for min, max
3679 #include <mutex>
3680 
3681 #ifndef VMA_NULL
3682  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3683  #define VMA_NULL nullptr
3684 #endif
3685 
3686 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3687 #include <cstdlib>
3688 void *aligned_alloc(size_t alignment, size_t size)
3689 {
3690  // alignment must be >= sizeof(void*)
3691  if(alignment < sizeof(void*))
3692  {
3693  alignment = sizeof(void*);
3694  }
3695 
3696  return memalign(alignment, size);
3697 }
3698 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3699 #include <cstdlib>
3700 void *aligned_alloc(size_t alignment, size_t size)
3701 {
3702  // alignment must be >= sizeof(void*)
3703  if(alignment < sizeof(void*))
3704  {
3705  alignment = sizeof(void*);
3706  }
3707 
3708  void *pointer;
3709  if(posix_memalign(&pointer, alignment, size) == 0)
3710  return pointer;
3711  return VMA_NULL;
3712 }
3713 #endif
3714 
3715 // If your compiler is not compatible with C++11 and definition of
3716 // aligned_alloc() function is missing, uncommeting following line may help:
3717 
3718 //#include <malloc.h>
3719 
3720 // Normal assert to check for programmer's errors, especially in Debug configuration.
3721 #ifndef VMA_ASSERT
3722  #ifdef NDEBUG
3723  #define VMA_ASSERT(expr)
3724  #else
3725  #define VMA_ASSERT(expr) assert(expr)
3726  #endif
3727 #endif
3728 
3729 // Assert that will be called very often, like inside data structures e.g. operator[].
3730 // Making it non-empty can make program slow.
3731 #ifndef VMA_HEAVY_ASSERT
3732  #ifdef NDEBUG
3733  #define VMA_HEAVY_ASSERT(expr)
3734  #else
3735  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3736  #endif
3737 #endif
3738 
3739 #ifndef VMA_ALIGN_OF
3740  #define VMA_ALIGN_OF(type) (__alignof(type))
3741 #endif
3742 
3743 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3744  #if defined(_WIN32)
3745  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3746  #else
3747  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3748  #endif
3749 #endif
3750 
3751 #ifndef VMA_SYSTEM_FREE
3752  #if defined(_WIN32)
3753  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3754  #else
3755  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3756  #endif
3757 #endif
3758 
3759 #ifndef VMA_MIN
3760  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3761 #endif
3762 
3763 #ifndef VMA_MAX
3764  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3765 #endif
3766 
3767 #ifndef VMA_SWAP
3768  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3769 #endif
3770 
3771 #ifndef VMA_SORT
3772  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3773 #endif
3774 
3775 #ifndef VMA_DEBUG_LOG
3776  #define VMA_DEBUG_LOG(format, ...)
3777  /*
3778  #define VMA_DEBUG_LOG(format, ...) do { \
3779  printf(format, __VA_ARGS__); \
3780  printf("\n"); \
3781  } while(false)
3782  */
3783 #endif
3784 
3785 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3786 #if VMA_STATS_STRING_ENABLED
3787  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3788  {
3789  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3790  }
3791  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3792  {
3793  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3794  }
3795  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3796  {
3797  snprintf(outStr, strLen, "%p", ptr);
3798  }
3799 #endif
3800 
3801 #ifndef VMA_MUTEX
3802  class VmaMutex
3803  {
3804  public:
3805  void Lock() { m_Mutex.lock(); }
3806  void Unlock() { m_Mutex.unlock(); }
3807  bool TryLock() { return m_Mutex.try_lock(); }
3808  private:
3809  std::mutex m_Mutex;
3810  };
3811  #define VMA_MUTEX VmaMutex
3812 #endif
3813 
3814 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3815 #ifndef VMA_RW_MUTEX
3816  #if VMA_USE_STL_SHARED_MUTEX
3817  // Use std::shared_mutex from C++17.
3818  #include <shared_mutex>
3819  class VmaRWMutex
3820  {
3821  public:
3822  void LockRead() { m_Mutex.lock_shared(); }
3823  void UnlockRead() { m_Mutex.unlock_shared(); }
3824  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
3825  void LockWrite() { m_Mutex.lock(); }
3826  void UnlockWrite() { m_Mutex.unlock(); }
3827  bool TryLockWrite() { return m_Mutex.try_lock(); }
3828  private:
3829  std::shared_mutex m_Mutex;
3830  };
3831  #define VMA_RW_MUTEX VmaRWMutex
3832  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3833  // Use SRWLOCK from WinAPI.
3834  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3835  class VmaRWMutex
3836  {
3837  public:
3838  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3839  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3840  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3841  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
3842  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3843  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3844  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
3845  private:
3846  SRWLOCK m_Lock;
3847  };
3848  #define VMA_RW_MUTEX VmaRWMutex
3849  #else
3850  // Less efficient fallback: Use normal mutex.
3851  class VmaRWMutex
3852  {
3853  public:
3854  void LockRead() { m_Mutex.Lock(); }
3855  void UnlockRead() { m_Mutex.Unlock(); }
3856  bool TryLockRead() { return m_Mutex.TryLock(); }
3857  void LockWrite() { m_Mutex.Lock(); }
3858  void UnlockWrite() { m_Mutex.Unlock(); }
3859  bool TryLockWrite() { return m_Mutex.TryLock(); }
3860  private:
3861  VMA_MUTEX m_Mutex;
3862  };
3863  #define VMA_RW_MUTEX VmaRWMutex
3864  #endif // #if VMA_USE_STL_SHARED_MUTEX
3865 #endif // #ifndef VMA_RW_MUTEX
3866 
3867 /*
3868 If providing your own implementation, you need to implement a subset of std::atomic.
3869 */
3870 #ifndef VMA_ATOMIC_UINT32
3871  #include <atomic>
3872  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3873 #endif
3874 
3875 #ifndef VMA_ATOMIC_UINT64
3876  #include <atomic>
3877  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3878 #endif
3879 
3880 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3881 
3885  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3886 #endif
3887 
3888 #ifndef VMA_DEBUG_ALIGNMENT
3889 
3893  #define VMA_DEBUG_ALIGNMENT (1)
3894 #endif
3895 
3896 #ifndef VMA_DEBUG_MARGIN
3897 
3901  #define VMA_DEBUG_MARGIN (0)
3902 #endif
3903 
3904 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3905 
3909  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3910 #endif
3911 
3912 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3913 
3918  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3919 #endif
3920 
3921 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3922 
3926  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3927 #endif
3928 
3929 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3930 
3934  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3935 #endif
3936 
3937 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3938  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3940 #endif
3941 
3942 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3943  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3945 #endif
3946 
3947 #ifndef VMA_CLASS_NO_COPY
3948  #define VMA_CLASS_NO_COPY(className) \
3949  private: \
3950  className(const className&) = delete; \
3951  className& operator=(const className&) = delete;
3952 #endif
3953 
3954 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3955 
3956 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3957 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3958 
3959 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3960 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3961 
3962 /*******************************************************************************
3963 END OF CONFIGURATION
3964 */
3965 
3966 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
3967 
3968 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
3969 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
3970 
3971 
3972 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3973 
3974 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3975  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3976 
3977 // Returns number of bits set to 1 in (v).
3978 static inline uint32_t VmaCountBitsSet(uint32_t v)
3979 {
3980  uint32_t c = v - ((v >> 1) & 0x55555555);
3981  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3982  c = ((c >> 4) + c) & 0x0F0F0F0F;
3983  c = ((c >> 8) + c) & 0x00FF00FF;
3984  c = ((c >> 16) + c) & 0x0000FFFF;
3985  return c;
3986 }
3987 
3988 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3989 // Use types like uint32_t, uint64_t as T.
3990 template <typename T>
3991 static inline T VmaAlignUp(T val, T align)
3992 {
3993  return (val + align - 1) / align * align;
3994 }
3995 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3996 // Use types like uint32_t, uint64_t as T.
3997 template <typename T>
3998 static inline T VmaAlignDown(T val, T align)
3999 {
4000  return val / align * align;
4001 }
4002 
4003 // Division with mathematical rounding to nearest number.
4004 template <typename T>
4005 static inline T VmaRoundDiv(T x, T y)
4006 {
4007  return (x + (y / (T)2)) / y;
4008 }
4009 
4010 /*
4011 Returns true if given number is a power of two.
4012 T must be unsigned integer number or signed integer but always nonnegative.
4013 For 0 returns true.
4014 */
4015 template <typename T>
4016 inline bool VmaIsPow2(T x)
4017 {
4018  return (x & (x-1)) == 0;
4019 }
4020 
4021 // Returns smallest power of 2 greater or equal to v.
4022 static inline uint32_t VmaNextPow2(uint32_t v)
4023 {
4024  v--;
4025  v |= v >> 1;
4026  v |= v >> 2;
4027  v |= v >> 4;
4028  v |= v >> 8;
4029  v |= v >> 16;
4030  v++;
4031  return v;
4032 }
4033 static inline uint64_t VmaNextPow2(uint64_t v)
4034 {
4035  v--;
4036  v |= v >> 1;
4037  v |= v >> 2;
4038  v |= v >> 4;
4039  v |= v >> 8;
4040  v |= v >> 16;
4041  v |= v >> 32;
4042  v++;
4043  return v;
4044 }
4045 
4046 // Returns largest power of 2 less or equal to v.
4047 static inline uint32_t VmaPrevPow2(uint32_t v)
4048 {
4049  v |= v >> 1;
4050  v |= v >> 2;
4051  v |= v >> 4;
4052  v |= v >> 8;
4053  v |= v >> 16;
4054  v = v ^ (v >> 1);
4055  return v;
4056 }
4057 static inline uint64_t VmaPrevPow2(uint64_t v)
4058 {
4059  v |= v >> 1;
4060  v |= v >> 2;
4061  v |= v >> 4;
4062  v |= v >> 8;
4063  v |= v >> 16;
4064  v |= v >> 32;
4065  v = v ^ (v >> 1);
4066  return v;
4067 }
4068 
4069 static inline bool VmaStrIsEmpty(const char* pStr)
4070 {
4071  return pStr == VMA_NULL || *pStr == '\0';
4072 }
4073 
4074 #if VMA_STATS_STRING_ENABLED
4075 
4076 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4077 {
4078  switch(algorithm)
4079  {
4081  return "Linear";
4083  return "Buddy";
4084  case 0:
4085  return "Default";
4086  default:
4087  VMA_ASSERT(0);
4088  return "";
4089  }
4090 }
4091 
4092 #endif // #if VMA_STATS_STRING_ENABLED
4093 
4094 #ifndef VMA_SORT
4095 
4096 template<typename Iterator, typename Compare>
4097 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4098 {
4099  Iterator centerValue = end; --centerValue;
4100  Iterator insertIndex = beg;
4101  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4102  {
4103  if(cmp(*memTypeIndex, *centerValue))
4104  {
4105  if(insertIndex != memTypeIndex)
4106  {
4107  VMA_SWAP(*memTypeIndex, *insertIndex);
4108  }
4109  ++insertIndex;
4110  }
4111  }
4112  if(insertIndex != centerValue)
4113  {
4114  VMA_SWAP(*insertIndex, *centerValue);
4115  }
4116  return insertIndex;
4117 }
4118 
4119 template<typename Iterator, typename Compare>
4120 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4121 {
4122  if(beg < end)
4123  {
4124  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4125  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4126  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4127  }
4128 }
4129 
4130 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4131 
4132 #endif // #ifndef VMA_SORT
4133 
4134 /*
4135 Returns true if two memory blocks occupy overlapping pages.
4136 ResourceA must be in less memory offset than ResourceB.
4137 
4138 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4139 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4140 */
4141 static inline bool VmaBlocksOnSamePage(
4142  VkDeviceSize resourceAOffset,
4143  VkDeviceSize resourceASize,
4144  VkDeviceSize resourceBOffset,
4145  VkDeviceSize pageSize)
4146 {
4147  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4148  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4149  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4150  VkDeviceSize resourceBStart = resourceBOffset;
4151  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4152  return resourceAEndPage == resourceBStartPage;
4153 }
4154 
4155 enum VmaSuballocationType
4156 {
4157  VMA_SUBALLOCATION_TYPE_FREE = 0,
4158  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4159  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4160  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4161  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4162  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4163  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4164 };
4165 
4166 /*
4167 Returns true if given suballocation types could conflict and must respect
4168 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4169 or linear image and another one is optimal image. If type is unknown, behave
4170 conservatively.
4171 */
4172 static inline bool VmaIsBufferImageGranularityConflict(
4173  VmaSuballocationType suballocType1,
4174  VmaSuballocationType suballocType2)
4175 {
4176  if(suballocType1 > suballocType2)
4177  {
4178  VMA_SWAP(suballocType1, suballocType2);
4179  }
4180 
4181  switch(suballocType1)
4182  {
4183  case VMA_SUBALLOCATION_TYPE_FREE:
4184  return false;
4185  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4186  return true;
4187  case VMA_SUBALLOCATION_TYPE_BUFFER:
4188  return
4189  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4190  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4191  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4192  return
4193  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4194  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4195  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4196  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4197  return
4198  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4199  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4200  return false;
4201  default:
4202  VMA_ASSERT(0);
4203  return true;
4204  }
4205 }
4206 
4207 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4208 {
4209 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4210  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4211  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4212  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4213  {
4214  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4215  }
4216 #else
4217  // no-op
4218 #endif
4219 }
4220 
4221 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4222 {
4223 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4224  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4225  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4226  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4227  {
4228  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4229  {
4230  return false;
4231  }
4232  }
4233 #endif
4234  return true;
4235 }
4236 
4237 /*
4238 Fills structure with parameters of an example buffer to be used for transfers
4239 during GPU memory defragmentation.
4240 */
4241 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4242 {
4243  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4244  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4245  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4246  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4247 }
4248 
4249 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4250 struct VmaMutexLock
4251 {
4252  VMA_CLASS_NO_COPY(VmaMutexLock)
4253 public:
4254  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4255  m_pMutex(useMutex ? &mutex : VMA_NULL)
4256  { if(m_pMutex) { m_pMutex->Lock(); } }
4257  ~VmaMutexLock()
4258  { if(m_pMutex) { m_pMutex->Unlock(); } }
4259 private:
4260  VMA_MUTEX* m_pMutex;
4261 };
4262 
4263 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4264 struct VmaMutexLockRead
4265 {
4266  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4267 public:
4268  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4269  m_pMutex(useMutex ? &mutex : VMA_NULL)
4270  { if(m_pMutex) { m_pMutex->LockRead(); } }
4271  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4272 private:
4273  VMA_RW_MUTEX* m_pMutex;
4274 };
4275 
4276 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4277 struct VmaMutexLockWrite
4278 {
4279  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4280 public:
4281  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4282  m_pMutex(useMutex ? &mutex : VMA_NULL)
4283  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4284  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4285 private:
4286  VMA_RW_MUTEX* m_pMutex;
4287 };
4288 
4289 #if VMA_DEBUG_GLOBAL_MUTEX
4290  static VMA_MUTEX gDebugGlobalMutex;
4291  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4292 #else
4293  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4294 #endif
4295 
4296 // Minimum size of a free suballocation to register it in the free suballocation collection.
4297 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4298 
4299 /*
4300 Performs binary search and returns iterator to first element that is greater or
4301 equal to (key), according to comparison (cmp).
4302 
4303 Cmp should return true if first argument is less than second argument.
4304 
4305 Returned value is the found element, if present in the collection or place where
4306 new element with value (key) should be inserted.
4307 */
4308 template <typename CmpLess, typename IterT, typename KeyT>
4309 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4310 {
4311  size_t down = 0, up = (end - beg);
4312  while(down < up)
4313  {
4314  const size_t mid = (down + up) / 2;
4315  if(cmp(*(beg+mid), key))
4316  {
4317  down = mid + 1;
4318  }
4319  else
4320  {
4321  up = mid;
4322  }
4323  }
4324  return beg + down;
4325 }
4326 
4327 template<typename CmpLess, typename IterT, typename KeyT>
4328 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4329 {
4330  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4331  beg, end, value, cmp);
4332  if(it == end ||
4333  (!cmp(*it, value) && !cmp(value, *it)))
4334  {
4335  return it;
4336  }
4337  return end;
4338 }
4339 
4340 /*
4341 Returns true if all pointers in the array are not-null and unique.
4342 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4343 T must be pointer type, e.g. VmaAllocation, VmaPool.
4344 */
4345 template<typename T>
4346 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4347 {
4348  for(uint32_t i = 0; i < count; ++i)
4349  {
4350  const T iPtr = arr[i];
4351  if(iPtr == VMA_NULL)
4352  {
4353  return false;
4354  }
4355  for(uint32_t j = i + 1; j < count; ++j)
4356  {
4357  if(iPtr == arr[j])
4358  {
4359  return false;
4360  }
4361  }
4362  }
4363  return true;
4364 }
4365 
4367 // Memory allocation
4368 
4369 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4370 {
4371  if((pAllocationCallbacks != VMA_NULL) &&
4372  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4373  {
4374  return (*pAllocationCallbacks->pfnAllocation)(
4375  pAllocationCallbacks->pUserData,
4376  size,
4377  alignment,
4378  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4379  }
4380  else
4381  {
4382  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4383  }
4384 }
4385 
4386 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4387 {
4388  if((pAllocationCallbacks != VMA_NULL) &&
4389  (pAllocationCallbacks->pfnFree != VMA_NULL))
4390  {
4391  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4392  }
4393  else
4394  {
4395  VMA_SYSTEM_FREE(ptr);
4396  }
4397 }
4398 
4399 template<typename T>
4400 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4401 {
4402  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4403 }
4404 
4405 template<typename T>
4406 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4407 {
4408  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4409 }
4410 
4411 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4412 
4413 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4414 
4415 template<typename T>
4416 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4417 {
4418  ptr->~T();
4419  VmaFree(pAllocationCallbacks, ptr);
4420 }
4421 
4422 template<typename T>
4423 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4424 {
4425  if(ptr != VMA_NULL)
4426  {
4427  for(size_t i = count; i--; )
4428  {
4429  ptr[i].~T();
4430  }
4431  VmaFree(pAllocationCallbacks, ptr);
4432  }
4433 }
4434 
4435 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4436 {
4437  if(srcStr != VMA_NULL)
4438  {
4439  const size_t len = strlen(srcStr);
4440  char* const result = vma_new_array(allocs, char, len + 1);
4441  memcpy(result, srcStr, len + 1);
4442  return result;
4443  }
4444  else
4445  {
4446  return VMA_NULL;
4447  }
4448 }
4449 
4450 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4451 {
4452  if(str != VMA_NULL)
4453  {
4454  const size_t len = strlen(str);
4455  vma_delete_array(allocs, str, len + 1);
4456  }
4457 }
4458 
4459 // STL-compatible allocator.
4460 template<typename T>
4461 class VmaStlAllocator
4462 {
4463 public:
4464  const VkAllocationCallbacks* const m_pCallbacks;
4465  typedef T value_type;
4466 
4467  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4468  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4469 
4470  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4471  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4472 
4473  template<typename U>
4474  bool operator==(const VmaStlAllocator<U>& rhs) const
4475  {
4476  return m_pCallbacks == rhs.m_pCallbacks;
4477  }
4478  template<typename U>
4479  bool operator!=(const VmaStlAllocator<U>& rhs) const
4480  {
4481  return m_pCallbacks != rhs.m_pCallbacks;
4482  }
4483 
4484  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4485 };
4486 
4487 #if VMA_USE_STL_VECTOR
4488 
4489 #define VmaVector std::vector
4490 
4491 template<typename T, typename allocatorT>
4492 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4493 {
4494  vec.insert(vec.begin() + index, item);
4495 }
4496 
4497 template<typename T, typename allocatorT>
4498 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4499 {
4500  vec.erase(vec.begin() + index);
4501 }
4502 
4503 #else // #if VMA_USE_STL_VECTOR
4504 
4505 /* Class with interface compatible with subset of std::vector.
4506 T must be POD because constructors and destructors are not called and memcpy is
4507 used for these objects. */
4508 template<typename T, typename AllocatorT>
4509 class VmaVector
4510 {
4511 public:
4512  typedef T value_type;
4513 
4514  VmaVector(const AllocatorT& allocator) :
4515  m_Allocator(allocator),
4516  m_pArray(VMA_NULL),
4517  m_Count(0),
4518  m_Capacity(0)
4519  {
4520  }
4521 
4522  VmaVector(size_t count, const AllocatorT& allocator) :
4523  m_Allocator(allocator),
4524  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4525  m_Count(count),
4526  m_Capacity(count)
4527  {
4528  }
4529 
4530  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4531  // value is unused.
4532  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4533  : VmaVector(count, allocator) {}
4534 
4535  VmaVector(const VmaVector<T, AllocatorT>& src) :
4536  m_Allocator(src.m_Allocator),
4537  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4538  m_Count(src.m_Count),
4539  m_Capacity(src.m_Count)
4540  {
4541  if(m_Count != 0)
4542  {
4543  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4544  }
4545  }
4546 
4547  ~VmaVector()
4548  {
4549  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4550  }
4551 
4552  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4553  {
4554  if(&rhs != this)
4555  {
4556  resize(rhs.m_Count);
4557  if(m_Count != 0)
4558  {
4559  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4560  }
4561  }
4562  return *this;
4563  }
4564 
4565  bool empty() const { return m_Count == 0; }
4566  size_t size() const { return m_Count; }
4567  T* data() { return m_pArray; }
4568  const T* data() const { return m_pArray; }
4569 
4570  T& operator[](size_t index)
4571  {
4572  VMA_HEAVY_ASSERT(index < m_Count);
4573  return m_pArray[index];
4574  }
4575  const T& operator[](size_t index) const
4576  {
4577  VMA_HEAVY_ASSERT(index < m_Count);
4578  return m_pArray[index];
4579  }
4580 
4581  T& front()
4582  {
4583  VMA_HEAVY_ASSERT(m_Count > 0);
4584  return m_pArray[0];
4585  }
4586  const T& front() const
4587  {
4588  VMA_HEAVY_ASSERT(m_Count > 0);
4589  return m_pArray[0];
4590  }
4591  T& back()
4592  {
4593  VMA_HEAVY_ASSERT(m_Count > 0);
4594  return m_pArray[m_Count - 1];
4595  }
4596  const T& back() const
4597  {
4598  VMA_HEAVY_ASSERT(m_Count > 0);
4599  return m_pArray[m_Count - 1];
4600  }
4601 
4602  void reserve(size_t newCapacity, bool freeMemory = false)
4603  {
4604  newCapacity = VMA_MAX(newCapacity, m_Count);
4605 
4606  if((newCapacity < m_Capacity) && !freeMemory)
4607  {
4608  newCapacity = m_Capacity;
4609  }
4610 
4611  if(newCapacity != m_Capacity)
4612  {
4613  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4614  if(m_Count != 0)
4615  {
4616  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4617  }
4618  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4619  m_Capacity = newCapacity;
4620  m_pArray = newArray;
4621  }
4622  }
4623 
4624  void resize(size_t newCount, bool freeMemory = false)
4625  {
4626  size_t newCapacity = m_Capacity;
4627  if(newCount > m_Capacity)
4628  {
4629  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4630  }
4631  else if(freeMemory)
4632  {
4633  newCapacity = newCount;
4634  }
4635 
4636  if(newCapacity != m_Capacity)
4637  {
4638  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4639  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4640  if(elementsToCopy != 0)
4641  {
4642  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4643  }
4644  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4645  m_Capacity = newCapacity;
4646  m_pArray = newArray;
4647  }
4648 
4649  m_Count = newCount;
4650  }
4651 
4652  void clear(bool freeMemory = false)
4653  {
4654  resize(0, freeMemory);
4655  }
4656 
4657  void insert(size_t index, const T& src)
4658  {
4659  VMA_HEAVY_ASSERT(index <= m_Count);
4660  const size_t oldCount = size();
4661  resize(oldCount + 1);
4662  if(index < oldCount)
4663  {
4664  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4665  }
4666  m_pArray[index] = src;
4667  }
4668 
4669  void remove(size_t index)
4670  {
4671  VMA_HEAVY_ASSERT(index < m_Count);
4672  const size_t oldCount = size();
4673  if(index < oldCount - 1)
4674  {
4675  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4676  }
4677  resize(oldCount - 1);
4678  }
4679 
4680  void push_back(const T& src)
4681  {
4682  const size_t newIndex = size();
4683  resize(newIndex + 1);
4684  m_pArray[newIndex] = src;
4685  }
4686 
4687  void pop_back()
4688  {
4689  VMA_HEAVY_ASSERT(m_Count > 0);
4690  resize(size() - 1);
4691  }
4692 
4693  void push_front(const T& src)
4694  {
4695  insert(0, src);
4696  }
4697 
4698  void pop_front()
4699  {
4700  VMA_HEAVY_ASSERT(m_Count > 0);
4701  remove(0);
4702  }
4703 
4704  typedef T* iterator;
4705 
4706  iterator begin() { return m_pArray; }
4707  iterator end() { return m_pArray + m_Count; }
4708 
4709 private:
4710  AllocatorT m_Allocator;
4711  T* m_pArray;
4712  size_t m_Count;
4713  size_t m_Capacity;
4714 };
4715 
4716 template<typename T, typename allocatorT>
4717 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4718 {
4719  vec.insert(index, item);
4720 }
4721 
4722 template<typename T, typename allocatorT>
4723 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4724 {
4725  vec.remove(index);
4726 }
4727 
4728 #endif // #if VMA_USE_STL_VECTOR
4729 
4730 template<typename CmpLess, typename VectorT>
4731 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4732 {
4733  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4734  vector.data(),
4735  vector.data() + vector.size(),
4736  value,
4737  CmpLess()) - vector.data();
4738  VmaVectorInsert(vector, indexToInsert, value);
4739  return indexToInsert;
4740 }
4741 
4742 template<typename CmpLess, typename VectorT>
4743 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4744 {
4745  CmpLess comparator;
4746  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4747  vector.begin(),
4748  vector.end(),
4749  value,
4750  comparator);
4751  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4752  {
4753  size_t indexToRemove = it - vector.begin();
4754  VmaVectorRemove(vector, indexToRemove);
4755  return true;
4756  }
4757  return false;
4758 }
4759 
4761 // class VmaPoolAllocator
4762 
4763 /*
4764 Allocator for objects of type T using a list of arrays (pools) to speed up
4765 allocation. Number of elements that can be allocated is not bounded because
4766 allocator can create multiple blocks.
4767 */
4768 template<typename T>
4769 class VmaPoolAllocator
4770 {
4771  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4772 public:
4773  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4774  ~VmaPoolAllocator();
4775  template<typename... Types> T* Alloc(Types... args);
4776  void Free(T* ptr);
4777 
4778 private:
4779  union Item
4780  {
4781  uint32_t NextFreeIndex;
4782  alignas(T) char Value[sizeof(T)];
4783  };
4784 
4785  struct ItemBlock
4786  {
4787  Item* pItems;
4788  uint32_t Capacity;
4789  uint32_t FirstFreeIndex;
4790  };
4791 
4792  const VkAllocationCallbacks* m_pAllocationCallbacks;
4793  const uint32_t m_FirstBlockCapacity;
4794  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4795 
4796  ItemBlock& CreateNewBlock();
4797 };
4798 
4799 template<typename T>
4800 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4801  m_pAllocationCallbacks(pAllocationCallbacks),
4802  m_FirstBlockCapacity(firstBlockCapacity),
4803  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4804 {
4805  VMA_ASSERT(m_FirstBlockCapacity > 1);
4806 }
4807 
4808 template<typename T>
4809 VmaPoolAllocator<T>::~VmaPoolAllocator()
4810 {
4811  for(size_t i = m_ItemBlocks.size(); i--; )
4812  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4813  m_ItemBlocks.clear();
4814 }
4815 
4816 template<typename T>
4817 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
4818 {
4819  for(size_t i = m_ItemBlocks.size(); i--; )
4820  {
4821  ItemBlock& block = m_ItemBlocks[i];
4822  // This block has some free items: Use first one.
4823  if(block.FirstFreeIndex != UINT32_MAX)
4824  {
4825  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4826  block.FirstFreeIndex = pItem->NextFreeIndex;
4827  T* result = (T*)&pItem->Value;
4828  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4829  return result;
4830  }
4831  }
4832 
4833  // No block has free item: Create new one and use it.
4834  ItemBlock& newBlock = CreateNewBlock();
4835  Item* const pItem = &newBlock.pItems[0];
4836  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4837  T* result = (T*)&pItem->Value;
4838  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4839  return result;
4840 }
4841 
4842 template<typename T>
4843 void VmaPoolAllocator<T>::Free(T* ptr)
4844 {
4845  // Search all memory blocks to find ptr.
4846  for(size_t i = m_ItemBlocks.size(); i--; )
4847  {
4848  ItemBlock& block = m_ItemBlocks[i];
4849 
4850  // Casting to union.
4851  Item* pItemPtr;
4852  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4853 
4854  // Check if pItemPtr is in address range of this block.
4855  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4856  {
4857  ptr->~T(); // Explicit destructor call.
4858  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4859  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4860  block.FirstFreeIndex = index;
4861  return;
4862  }
4863  }
4864  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4865 }
4866 
4867 template<typename T>
4868 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4869 {
4870  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4871  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4872 
4873  const ItemBlock newBlock = {
4874  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4875  newBlockCapacity,
4876  0 };
4877 
4878  m_ItemBlocks.push_back(newBlock);
4879 
4880  // Setup singly-linked list of all free items in this block.
4881  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4882  newBlock.pItems[i].NextFreeIndex = i + 1;
4883  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4884  return m_ItemBlocks.back();
4885 }
4886 
4888 // class VmaRawList, VmaList
4889 
4890 #if VMA_USE_STL_LIST
4891 
4892 #define VmaList std::list
4893 
4894 #else // #if VMA_USE_STL_LIST
4895 
4896 template<typename T>
4897 struct VmaListItem
4898 {
4899  VmaListItem* pPrev;
4900  VmaListItem* pNext;
4901  T Value;
4902 };
4903 
4904 // Doubly linked list.
4905 template<typename T>
4906 class VmaRawList
4907 {
4908  VMA_CLASS_NO_COPY(VmaRawList)
4909 public:
4910  typedef VmaListItem<T> ItemType;
4911 
4912  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4913  ~VmaRawList();
4914  void Clear();
4915 
4916  size_t GetCount() const { return m_Count; }
4917  bool IsEmpty() const { return m_Count == 0; }
4918 
4919  ItemType* Front() { return m_pFront; }
4920  const ItemType* Front() const { return m_pFront; }
4921  ItemType* Back() { return m_pBack; }
4922  const ItemType* Back() const { return m_pBack; }
4923 
4924  ItemType* PushBack();
4925  ItemType* PushFront();
4926  ItemType* PushBack(const T& value);
4927  ItemType* PushFront(const T& value);
4928  void PopBack();
4929  void PopFront();
4930 
4931  // Item can be null - it means PushBack.
4932  ItemType* InsertBefore(ItemType* pItem);
4933  // Item can be null - it means PushFront.
4934  ItemType* InsertAfter(ItemType* pItem);
4935 
4936  ItemType* InsertBefore(ItemType* pItem, const T& value);
4937  ItemType* InsertAfter(ItemType* pItem, const T& value);
4938 
4939  void Remove(ItemType* pItem);
4940 
4941 private:
4942  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4943  VmaPoolAllocator<ItemType> m_ItemAllocator;
4944  ItemType* m_pFront;
4945  ItemType* m_pBack;
4946  size_t m_Count;
4947 };
4948 
4949 template<typename T>
4950 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4951  m_pAllocationCallbacks(pAllocationCallbacks),
4952  m_ItemAllocator(pAllocationCallbacks, 128),
4953  m_pFront(VMA_NULL),
4954  m_pBack(VMA_NULL),
4955  m_Count(0)
4956 {
4957 }
4958 
4959 template<typename T>
4960 VmaRawList<T>::~VmaRawList()
4961 {
4962  // Intentionally not calling Clear, because that would be unnecessary
4963  // computations to return all items to m_ItemAllocator as free.
4964 }
4965 
4966 template<typename T>
4967 void VmaRawList<T>::Clear()
4968 {
4969  if(IsEmpty() == false)
4970  {
4971  ItemType* pItem = m_pBack;
4972  while(pItem != VMA_NULL)
4973  {
4974  ItemType* const pPrevItem = pItem->pPrev;
4975  m_ItemAllocator.Free(pItem);
4976  pItem = pPrevItem;
4977  }
4978  m_pFront = VMA_NULL;
4979  m_pBack = VMA_NULL;
4980  m_Count = 0;
4981  }
4982 }
4983 
4984 template<typename T>
4985 VmaListItem<T>* VmaRawList<T>::PushBack()
4986 {
4987  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4988  pNewItem->pNext = VMA_NULL;
4989  if(IsEmpty())
4990  {
4991  pNewItem->pPrev = VMA_NULL;
4992  m_pFront = pNewItem;
4993  m_pBack = pNewItem;
4994  m_Count = 1;
4995  }
4996  else
4997  {
4998  pNewItem->pPrev = m_pBack;
4999  m_pBack->pNext = pNewItem;
5000  m_pBack = pNewItem;
5001  ++m_Count;
5002  }
5003  return pNewItem;
5004 }
5005 
5006 template<typename T>
5007 VmaListItem<T>* VmaRawList<T>::PushFront()
5008 {
5009  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5010  pNewItem->pPrev = VMA_NULL;
5011  if(IsEmpty())
5012  {
5013  pNewItem->pNext = VMA_NULL;
5014  m_pFront = pNewItem;
5015  m_pBack = pNewItem;
5016  m_Count = 1;
5017  }
5018  else
5019  {
5020  pNewItem->pNext = m_pFront;
5021  m_pFront->pPrev = pNewItem;
5022  m_pFront = pNewItem;
5023  ++m_Count;
5024  }
5025  return pNewItem;
5026 }
5027 
5028 template<typename T>
5029 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5030 {
5031  ItemType* const pNewItem = PushBack();
5032  pNewItem->Value = value;
5033  return pNewItem;
5034 }
5035 
5036 template<typename T>
5037 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5038 {
5039  ItemType* const pNewItem = PushFront();
5040  pNewItem->Value = value;
5041  return pNewItem;
5042 }
5043 
5044 template<typename T>
5045 void VmaRawList<T>::PopBack()
5046 {
5047  VMA_HEAVY_ASSERT(m_Count > 0);
5048  ItemType* const pBackItem = m_pBack;
5049  ItemType* const pPrevItem = pBackItem->pPrev;
5050  if(pPrevItem != VMA_NULL)
5051  {
5052  pPrevItem->pNext = VMA_NULL;
5053  }
5054  m_pBack = pPrevItem;
5055  m_ItemAllocator.Free(pBackItem);
5056  --m_Count;
5057 }
5058 
5059 template<typename T>
5060 void VmaRawList<T>::PopFront()
5061 {
5062  VMA_HEAVY_ASSERT(m_Count > 0);
5063  ItemType* const pFrontItem = m_pFront;
5064  ItemType* const pNextItem = pFrontItem->pNext;
5065  if(pNextItem != VMA_NULL)
5066  {
5067  pNextItem->pPrev = VMA_NULL;
5068  }
5069  m_pFront = pNextItem;
5070  m_ItemAllocator.Free(pFrontItem);
5071  --m_Count;
5072 }
5073 
5074 template<typename T>
5075 void VmaRawList<T>::Remove(ItemType* pItem)
5076 {
5077  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5078  VMA_HEAVY_ASSERT(m_Count > 0);
5079 
5080  if(pItem->pPrev != VMA_NULL)
5081  {
5082  pItem->pPrev->pNext = pItem->pNext;
5083  }
5084  else
5085  {
5086  VMA_HEAVY_ASSERT(m_pFront == pItem);
5087  m_pFront = pItem->pNext;
5088  }
5089 
5090  if(pItem->pNext != VMA_NULL)
5091  {
5092  pItem->pNext->pPrev = pItem->pPrev;
5093  }
5094  else
5095  {
5096  VMA_HEAVY_ASSERT(m_pBack == pItem);
5097  m_pBack = pItem->pPrev;
5098  }
5099 
5100  m_ItemAllocator.Free(pItem);
5101  --m_Count;
5102 }
5103 
5104 template<typename T>
5105 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5106 {
5107  if(pItem != VMA_NULL)
5108  {
5109  ItemType* const prevItem = pItem->pPrev;
5110  ItemType* const newItem = m_ItemAllocator.Alloc();
5111  newItem->pPrev = prevItem;
5112  newItem->pNext = pItem;
5113  pItem->pPrev = newItem;
5114  if(prevItem != VMA_NULL)
5115  {
5116  prevItem->pNext = newItem;
5117  }
5118  else
5119  {
5120  VMA_HEAVY_ASSERT(m_pFront == pItem);
5121  m_pFront = newItem;
5122  }
5123  ++m_Count;
5124  return newItem;
5125  }
5126  else
5127  return PushBack();
5128 }
5129 
5130 template<typename T>
5131 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5132 {
5133  if(pItem != VMA_NULL)
5134  {
5135  ItemType* const nextItem = pItem->pNext;
5136  ItemType* const newItem = m_ItemAllocator.Alloc();
5137  newItem->pNext = nextItem;
5138  newItem->pPrev = pItem;
5139  pItem->pNext = newItem;
5140  if(nextItem != VMA_NULL)
5141  {
5142  nextItem->pPrev = newItem;
5143  }
5144  else
5145  {
5146  VMA_HEAVY_ASSERT(m_pBack == pItem);
5147  m_pBack = newItem;
5148  }
5149  ++m_Count;
5150  return newItem;
5151  }
5152  else
5153  return PushFront();
5154 }
5155 
5156 template<typename T>
5157 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5158 {
5159  ItemType* const newItem = InsertBefore(pItem);
5160  newItem->Value = value;
5161  return newItem;
5162 }
5163 
5164 template<typename T>
5165 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5166 {
5167  ItemType* const newItem = InsertAfter(pItem);
5168  newItem->Value = value;
5169  return newItem;
5170 }
5171 
5172 template<typename T, typename AllocatorT>
5173 class VmaList
5174 {
5175  VMA_CLASS_NO_COPY(VmaList)
5176 public:
5177  class iterator
5178  {
5179  public:
5180  iterator() :
5181  m_pList(VMA_NULL),
5182  m_pItem(VMA_NULL)
5183  {
5184  }
5185 
5186  T& operator*() const
5187  {
5188  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5189  return m_pItem->Value;
5190  }
5191  T* operator->() const
5192  {
5193  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5194  return &m_pItem->Value;
5195  }
5196 
5197  iterator& operator++()
5198  {
5199  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5200  m_pItem = m_pItem->pNext;
5201  return *this;
5202  }
5203  iterator& operator--()
5204  {
5205  if(m_pItem != VMA_NULL)
5206  {
5207  m_pItem = m_pItem->pPrev;
5208  }
5209  else
5210  {
5211  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5212  m_pItem = m_pList->Back();
5213  }
5214  return *this;
5215  }
5216 
5217  iterator operator++(int)
5218  {
5219  iterator result = *this;
5220  ++*this;
5221  return result;
5222  }
5223  iterator operator--(int)
5224  {
5225  iterator result = *this;
5226  --*this;
5227  return result;
5228  }
5229 
5230  bool operator==(const iterator& rhs) const
5231  {
5232  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5233  return m_pItem == rhs.m_pItem;
5234  }
5235  bool operator!=(const iterator& rhs) const
5236  {
5237  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5238  return m_pItem != rhs.m_pItem;
5239  }
5240 
5241  private:
5242  VmaRawList<T>* m_pList;
5243  VmaListItem<T>* m_pItem;
5244 
5245  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5246  m_pList(pList),
5247  m_pItem(pItem)
5248  {
5249  }
5250 
5251  friend class VmaList<T, AllocatorT>;
5252  };
5253 
5254  class const_iterator
5255  {
5256  public:
5257  const_iterator() :
5258  m_pList(VMA_NULL),
5259  m_pItem(VMA_NULL)
5260  {
5261  }
5262 
5263  const_iterator(const iterator& src) :
5264  m_pList(src.m_pList),
5265  m_pItem(src.m_pItem)
5266  {
5267  }
5268 
5269  const T& operator*() const
5270  {
5271  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5272  return m_pItem->Value;
5273  }
5274  const T* operator->() const
5275  {
5276  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5277  return &m_pItem->Value;
5278  }
5279 
5280  const_iterator& operator++()
5281  {
5282  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5283  m_pItem = m_pItem->pNext;
5284  return *this;
5285  }
5286  const_iterator& operator--()
5287  {
5288  if(m_pItem != VMA_NULL)
5289  {
5290  m_pItem = m_pItem->pPrev;
5291  }
5292  else
5293  {
5294  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5295  m_pItem = m_pList->Back();
5296  }
5297  return *this;
5298  }
5299 
5300  const_iterator operator++(int)
5301  {
5302  const_iterator result = *this;
5303  ++*this;
5304  return result;
5305  }
5306  const_iterator operator--(int)
5307  {
5308  const_iterator result = *this;
5309  --*this;
5310  return result;
5311  }
5312 
5313  bool operator==(const const_iterator& rhs) const
5314  {
5315  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5316  return m_pItem == rhs.m_pItem;
5317  }
5318  bool operator!=(const const_iterator& rhs) const
5319  {
5320  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5321  return m_pItem != rhs.m_pItem;
5322  }
5323 
5324  private:
5325  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5326  m_pList(pList),
5327  m_pItem(pItem)
5328  {
5329  }
5330 
5331  const VmaRawList<T>* m_pList;
5332  const VmaListItem<T>* m_pItem;
5333 
5334  friend class VmaList<T, AllocatorT>;
5335  };
5336 
5337  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5338 
5339  bool empty() const { return m_RawList.IsEmpty(); }
5340  size_t size() const { return m_RawList.GetCount(); }
5341 
5342  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5343  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5344 
5345  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5346  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5347 
5348  void clear() { m_RawList.Clear(); }
5349  void push_back(const T& value) { m_RawList.PushBack(value); }
5350  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5351  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5352 
5353 private:
5354  VmaRawList<T> m_RawList;
5355 };
5356 
5357 #endif // #if VMA_USE_STL_LIST
5358 
5360 // class VmaMap
5361 
5362 // Unused in this version.
5363 #if 0
5364 
5365 #if VMA_USE_STL_UNORDERED_MAP
5366 
5367 #define VmaPair std::pair
5368 
5369 #define VMA_MAP_TYPE(KeyT, ValueT) \
5370  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5371 
5372 #else // #if VMA_USE_STL_UNORDERED_MAP
5373 
5374 template<typename T1, typename T2>
5375 struct VmaPair
5376 {
5377  T1 first;
5378  T2 second;
5379 
5380  VmaPair() : first(), second() { }
5381  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5382 };
5383 
5384 /* Class compatible with subset of interface of std::unordered_map.
5385 KeyT, ValueT must be POD because they will be stored in VmaVector.
5386 */
5387 template<typename KeyT, typename ValueT>
5388 class VmaMap
5389 {
5390 public:
5391  typedef VmaPair<KeyT, ValueT> PairType;
5392  typedef PairType* iterator;
5393 
5394  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5395 
5396  iterator begin() { return m_Vector.begin(); }
5397  iterator end() { return m_Vector.end(); }
5398 
5399  void insert(const PairType& pair);
5400  iterator find(const KeyT& key);
5401  void erase(iterator it);
5402 
5403 private:
5404  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5405 };
5406 
5407 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5408 
5409 template<typename FirstT, typename SecondT>
5410 struct VmaPairFirstLess
5411 {
5412  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5413  {
5414  return lhs.first < rhs.first;
5415  }
5416  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5417  {
5418  return lhs.first < rhsFirst;
5419  }
5420 };
5421 
5422 template<typename KeyT, typename ValueT>
5423 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5424 {
5425  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5426  m_Vector.data(),
5427  m_Vector.data() + m_Vector.size(),
5428  pair,
5429  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5430  VmaVectorInsert(m_Vector, indexToInsert, pair);
5431 }
5432 
5433 template<typename KeyT, typename ValueT>
5434 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5435 {
5436  PairType* it = VmaBinaryFindFirstNotLess(
5437  m_Vector.data(),
5438  m_Vector.data() + m_Vector.size(),
5439  key,
5440  VmaPairFirstLess<KeyT, ValueT>());
5441  if((it != m_Vector.end()) && (it->first == key))
5442  {
5443  return it;
5444  }
5445  else
5446  {
5447  return m_Vector.end();
5448  }
5449 }
5450 
5451 template<typename KeyT, typename ValueT>
5452 void VmaMap<KeyT, ValueT>::erase(iterator it)
5453 {
5454  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5455 }
5456 
5457 #endif // #if VMA_USE_STL_UNORDERED_MAP
5458 
5459 #endif // #if 0
5460 
5462 
5463 class VmaDeviceMemoryBlock;
5464 
5465 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5466 
5467 struct VmaAllocation_T
5468 {
5469 private:
5470  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5471 
5472  enum FLAGS
5473  {
5474  FLAG_USER_DATA_STRING = 0x01,
5475  };
5476 
5477 public:
5478  enum ALLOCATION_TYPE
5479  {
5480  ALLOCATION_TYPE_NONE,
5481  ALLOCATION_TYPE_BLOCK,
5482  ALLOCATION_TYPE_DEDICATED,
5483  };
5484 
5485  /*
5486  This struct is allocated using VmaPoolAllocator.
5487  */
5488 
5489  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
5490  m_Alignment{1},
5491  m_Size{0},
5492  m_pUserData{VMA_NULL},
5493  m_LastUseFrameIndex{currentFrameIndex},
5494  m_MemoryTypeIndex{0},
5495  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
5496  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
5497  m_MapCount{0},
5498  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
5499  {
5500 #if VMA_STATS_STRING_ENABLED
5501  m_CreationFrameIndex = currentFrameIndex;
5502  m_BufferImageUsage = 0;
5503 #endif
5504  }
5505 
5506  ~VmaAllocation_T()
5507  {
5508  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5509 
5510  // Check if owned string was freed.
5511  VMA_ASSERT(m_pUserData == VMA_NULL);
5512  }
5513 
5514  void InitBlockAllocation(
5515  VmaDeviceMemoryBlock* block,
5516  VkDeviceSize offset,
5517  VkDeviceSize alignment,
5518  VkDeviceSize size,
5519  uint32_t memoryTypeIndex,
5520  VmaSuballocationType suballocationType,
5521  bool mapped,
5522  bool canBecomeLost)
5523  {
5524  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5525  VMA_ASSERT(block != VMA_NULL);
5526  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5527  m_Alignment = alignment;
5528  m_Size = size;
5529  m_MemoryTypeIndex = memoryTypeIndex;
5530  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5531  m_SuballocationType = (uint8_t)suballocationType;
5532  m_BlockAllocation.m_Block = block;
5533  m_BlockAllocation.m_Offset = offset;
5534  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5535  }
5536 
5537  void InitLost()
5538  {
5539  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5540  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5541  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5542  m_MemoryTypeIndex = 0;
5543  m_BlockAllocation.m_Block = VMA_NULL;
5544  m_BlockAllocation.m_Offset = 0;
5545  m_BlockAllocation.m_CanBecomeLost = true;
5546  }
5547 
5548  void ChangeBlockAllocation(
5549  VmaAllocator hAllocator,
5550  VmaDeviceMemoryBlock* block,
5551  VkDeviceSize offset);
5552 
5553  void ChangeOffset(VkDeviceSize newOffset);
5554 
5555  // pMappedData not null means allocation is created with MAPPED flag.
5556  void InitDedicatedAllocation(
5557  uint32_t memoryTypeIndex,
5558  VkDeviceMemory hMemory,
5559  VmaSuballocationType suballocationType,
5560  void* pMappedData,
5561  VkDeviceSize size)
5562  {
5563  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5564  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5565  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5566  m_Alignment = 0;
5567  m_Size = size;
5568  m_MemoryTypeIndex = memoryTypeIndex;
5569  m_SuballocationType = (uint8_t)suballocationType;
5570  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5571  m_DedicatedAllocation.m_hMemory = hMemory;
5572  m_DedicatedAllocation.m_pMappedData = pMappedData;
5573  }
5574 
5575  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5576  VkDeviceSize GetAlignment() const { return m_Alignment; }
5577  VkDeviceSize GetSize() const { return m_Size; }
5578  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5579  void* GetUserData() const { return m_pUserData; }
5580  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5581  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5582 
5583  VmaDeviceMemoryBlock* GetBlock() const
5584  {
5585  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5586  return m_BlockAllocation.m_Block;
5587  }
5588  VkDeviceSize GetOffset() const;
5589  VkDeviceMemory GetMemory() const;
5590  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5591  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5592  void* GetMappedData() const;
5593  bool CanBecomeLost() const;
5594 
5595  uint32_t GetLastUseFrameIndex() const
5596  {
5597  return m_LastUseFrameIndex.load();
5598  }
5599  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5600  {
5601  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5602  }
5603  /*
5604  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5605  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5606  - Else, returns false.
5607 
5608  If hAllocation is already lost, assert - you should not call it then.
5609  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5610  */
5611  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5612 
5613  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5614  {
5615  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5616  outInfo.blockCount = 1;
5617  outInfo.allocationCount = 1;
5618  outInfo.unusedRangeCount = 0;
5619  outInfo.usedBytes = m_Size;
5620  outInfo.unusedBytes = 0;
5621  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5622  outInfo.unusedRangeSizeMin = UINT64_MAX;
5623  outInfo.unusedRangeSizeMax = 0;
5624  }
5625 
5626  void BlockAllocMap();
5627  void BlockAllocUnmap();
5628  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5629  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5630 
5631 #if VMA_STATS_STRING_ENABLED
5632  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5633  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5634 
5635  void InitBufferImageUsage(uint32_t bufferImageUsage)
5636  {
5637  VMA_ASSERT(m_BufferImageUsage == 0);
5638  m_BufferImageUsage = bufferImageUsage;
5639  }
5640 
5641  void PrintParameters(class VmaJsonWriter& json) const;
5642 #endif
5643 
5644 private:
5645  VkDeviceSize m_Alignment;
5646  VkDeviceSize m_Size;
5647  void* m_pUserData;
5648  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5649  uint32_t m_MemoryTypeIndex;
5650  uint8_t m_Type; // ALLOCATION_TYPE
5651  uint8_t m_SuballocationType; // VmaSuballocationType
5652  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5653  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5654  uint8_t m_MapCount;
5655  uint8_t m_Flags; // enum FLAGS
5656 
5657  // Allocation out of VmaDeviceMemoryBlock.
5658  struct BlockAllocation
5659  {
5660  VmaDeviceMemoryBlock* m_Block;
5661  VkDeviceSize m_Offset;
5662  bool m_CanBecomeLost;
5663  };
5664 
5665  // Allocation for an object that has its own private VkDeviceMemory.
5666  struct DedicatedAllocation
5667  {
5668  VkDeviceMemory m_hMemory;
5669  void* m_pMappedData; // Not null means memory is mapped.
5670  };
5671 
5672  union
5673  {
5674  // Allocation out of VmaDeviceMemoryBlock.
5675  BlockAllocation m_BlockAllocation;
5676  // Allocation for an object that has its own private VkDeviceMemory.
5677  DedicatedAllocation m_DedicatedAllocation;
5678  };
5679 
5680 #if VMA_STATS_STRING_ENABLED
5681  uint32_t m_CreationFrameIndex;
5682  uint32_t m_BufferImageUsage; // 0 if unknown.
5683 #endif
5684 
5685  void FreeUserDataString(VmaAllocator hAllocator);
5686 };
5687 
5688 /*
5689 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5690 allocated memory block or free.
5691 */
5692 struct VmaSuballocation
5693 {
5694  VkDeviceSize offset;
5695  VkDeviceSize size;
5696  VmaAllocation hAllocation;
5697  VmaSuballocationType type;
5698 };
5699 
5700 // Comparator for offsets.
5701 struct VmaSuballocationOffsetLess
5702 {
5703  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5704  {
5705  return lhs.offset < rhs.offset;
5706  }
5707 };
5708 struct VmaSuballocationOffsetGreater
5709 {
5710  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5711  {
5712  return lhs.offset > rhs.offset;
5713  }
5714 };
5715 
5716 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5717 
5718 // Cost of one additional allocation lost, as equivalent in bytes.
5719 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5720 
5721 enum class VmaAllocationRequestType
5722 {
5723  Normal,
5724  // Used by "Linear" algorithm.
5725  UpperAddress,
5726  EndOf1st,
5727  EndOf2nd,
5728 };
5729 
5730 /*
5731 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5732 
5733 If canMakeOtherLost was false:
5734 - item points to a FREE suballocation.
5735 - itemsToMakeLostCount is 0.
5736 
5737 If canMakeOtherLost was true:
5738 - item points to first of sequence of suballocations, which are either FREE,
5739  or point to VmaAllocations that can become lost.
5740 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5741  the requested allocation to succeed.
5742 */
5743 struct VmaAllocationRequest
5744 {
5745  VkDeviceSize offset;
5746  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5747  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5748  VmaSuballocationList::iterator item;
5749  size_t itemsToMakeLostCount;
5750  void* customData;
5751  VmaAllocationRequestType type;
5752 
5753  VkDeviceSize CalcCost() const
5754  {
5755  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5756  }
5757 };
5758 
5759 /*
5760 Data structure used for bookkeeping of allocations and unused ranges of memory
5761 in a single VkDeviceMemory block.
5762 */
5763 class VmaBlockMetadata
5764 {
5765 public:
5766  VmaBlockMetadata(VmaAllocator hAllocator);
5767  virtual ~VmaBlockMetadata() { }
5768  virtual void Init(VkDeviceSize size) { m_Size = size; }
5769 
5770  // Validates all data structures inside this object. If not valid, returns false.
5771  virtual bool Validate() const = 0;
5772  VkDeviceSize GetSize() const { return m_Size; }
5773  virtual size_t GetAllocationCount() const = 0;
5774  virtual VkDeviceSize GetSumFreeSize() const = 0;
5775  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5776  // Returns true if this block is empty - contains only single free suballocation.
5777  virtual bool IsEmpty() const = 0;
5778 
5779  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5780  // Shouldn't modify blockCount.
5781  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5782 
5783 #if VMA_STATS_STRING_ENABLED
5784  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5785 #endif
5786 
5787  // Tries to find a place for suballocation with given parameters inside this block.
5788  // If succeeded, fills pAllocationRequest and returns true.
5789  // If failed, returns false.
5790  virtual bool CreateAllocationRequest(
5791  uint32_t currentFrameIndex,
5792  uint32_t frameInUseCount,
5793  VkDeviceSize bufferImageGranularity,
5794  VkDeviceSize allocSize,
5795  VkDeviceSize allocAlignment,
5796  bool upperAddress,
5797  VmaSuballocationType allocType,
5798  bool canMakeOtherLost,
5799  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5800  uint32_t strategy,
5801  VmaAllocationRequest* pAllocationRequest) = 0;
5802 
5803  virtual bool MakeRequestedAllocationsLost(
5804  uint32_t currentFrameIndex,
5805  uint32_t frameInUseCount,
5806  VmaAllocationRequest* pAllocationRequest) = 0;
5807 
5808  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5809 
5810  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5811 
5812  // Makes actual allocation based on request. Request must already be checked and valid.
5813  virtual void Alloc(
5814  const VmaAllocationRequest& request,
5815  VmaSuballocationType type,
5816  VkDeviceSize allocSize,
5817  VmaAllocation hAllocation) = 0;
5818 
5819  // Frees suballocation assigned to given memory region.
5820  virtual void Free(const VmaAllocation allocation) = 0;
5821  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5822 
5823 protected:
5824  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5825 
5826 #if VMA_STATS_STRING_ENABLED
5827  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5828  VkDeviceSize unusedBytes,
5829  size_t allocationCount,
5830  size_t unusedRangeCount) const;
5831  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5832  VkDeviceSize offset,
5833  VmaAllocation hAllocation) const;
5834  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5835  VkDeviceSize offset,
5836  VkDeviceSize size) const;
5837  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5838 #endif
5839 
5840 private:
5841  VkDeviceSize m_Size;
5842  const VkAllocationCallbacks* m_pAllocationCallbacks;
5843 };
5844 
5845 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5846  VMA_ASSERT(0 && "Validation failed: " #cond); \
5847  return false; \
5848  } } while(false)
5849 
5850 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5851 {
5852  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5853 public:
5854  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5855  virtual ~VmaBlockMetadata_Generic();
5856  virtual void Init(VkDeviceSize size);
5857 
5858  virtual bool Validate() const;
5859  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5860  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5861  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5862  virtual bool IsEmpty() const;
5863 
5864  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5865  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5866 
5867 #if VMA_STATS_STRING_ENABLED
5868  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5869 #endif
5870 
5871  virtual bool CreateAllocationRequest(
5872  uint32_t currentFrameIndex,
5873  uint32_t frameInUseCount,
5874  VkDeviceSize bufferImageGranularity,
5875  VkDeviceSize allocSize,
5876  VkDeviceSize allocAlignment,
5877  bool upperAddress,
5878  VmaSuballocationType allocType,
5879  bool canMakeOtherLost,
5880  uint32_t strategy,
5881  VmaAllocationRequest* pAllocationRequest);
5882 
5883  virtual bool MakeRequestedAllocationsLost(
5884  uint32_t currentFrameIndex,
5885  uint32_t frameInUseCount,
5886  VmaAllocationRequest* pAllocationRequest);
5887 
5888  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5889 
5890  virtual VkResult CheckCorruption(const void* pBlockData);
5891 
5892  virtual void Alloc(
5893  const VmaAllocationRequest& request,
5894  VmaSuballocationType type,
5895  VkDeviceSize allocSize,
5896  VmaAllocation hAllocation);
5897 
5898  virtual void Free(const VmaAllocation allocation);
5899  virtual void FreeAtOffset(VkDeviceSize offset);
5900 
5902  // For defragmentation
5903 
5904  bool IsBufferImageGranularityConflictPossible(
5905  VkDeviceSize bufferImageGranularity,
5906  VmaSuballocationType& inOutPrevSuballocType) const;
5907 
5908 private:
5909  friend class VmaDefragmentationAlgorithm_Generic;
5910  friend class VmaDefragmentationAlgorithm_Fast;
5911 
5912  uint32_t m_FreeCount;
5913  VkDeviceSize m_SumFreeSize;
5914  VmaSuballocationList m_Suballocations;
5915  // Suballocations that are free and have size greater than certain threshold.
5916  // Sorted by size, ascending.
5917  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5918 
5919  bool ValidateFreeSuballocationList() const;
5920 
5921  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5922  // If yes, fills pOffset and returns true. If no, returns false.
5923  bool CheckAllocation(
5924  uint32_t currentFrameIndex,
5925  uint32_t frameInUseCount,
5926  VkDeviceSize bufferImageGranularity,
5927  VkDeviceSize allocSize,
5928  VkDeviceSize allocAlignment,
5929  VmaSuballocationType allocType,
5930  VmaSuballocationList::const_iterator suballocItem,
5931  bool canMakeOtherLost,
5932  VkDeviceSize* pOffset,
5933  size_t* itemsToMakeLostCount,
5934  VkDeviceSize* pSumFreeSize,
5935  VkDeviceSize* pSumItemSize) const;
5936  // Given free suballocation, it merges it with following one, which must also be free.
5937  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5938  // Releases given suballocation, making it free.
5939  // Merges it with adjacent free suballocations if applicable.
5940  // Returns iterator to new free suballocation at this place.
5941  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5942  // Given free suballocation, it inserts it into sorted list of
5943  // m_FreeSuballocationsBySize if it's suitable.
5944  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5945  // Given free suballocation, it removes it from sorted list of
5946  // m_FreeSuballocationsBySize if it's suitable.
5947  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5948 };
5949 
5950 /*
5951 Allocations and their references in internal data structure look like this:
5952 
5953 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5954 
5955  0 +-------+
5956  | |
5957  | |
5958  | |
5959  +-------+
5960  | Alloc | 1st[m_1stNullItemsBeginCount]
5961  +-------+
5962  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5963  +-------+
5964  | ... |
5965  +-------+
5966  | Alloc | 1st[1st.size() - 1]
5967  +-------+
5968  | |
5969  | |
5970  | |
5971 GetSize() +-------+
5972 
5973 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5974 
5975  0 +-------+
5976  | Alloc | 2nd[0]
5977  +-------+
5978  | Alloc | 2nd[1]
5979  +-------+
5980  | ... |
5981  +-------+
5982  | Alloc | 2nd[2nd.size() - 1]
5983  +-------+
5984  | |
5985  | |
5986  | |
5987  +-------+
5988  | Alloc | 1st[m_1stNullItemsBeginCount]
5989  +-------+
5990  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5991  +-------+
5992  | ... |
5993  +-------+
5994  | Alloc | 1st[1st.size() - 1]
5995  +-------+
5996  | |
5997 GetSize() +-------+
5998 
5999 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6000 
6001  0 +-------+
6002  | |
6003  | |
6004  | |
6005  +-------+
6006  | Alloc | 1st[m_1stNullItemsBeginCount]
6007  +-------+
6008  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6009  +-------+
6010  | ... |
6011  +-------+
6012  | Alloc | 1st[1st.size() - 1]
6013  +-------+
6014  | |
6015  | |
6016  | |
6017  +-------+
6018  | Alloc | 2nd[2nd.size() - 1]
6019  +-------+
6020  | ... |
6021  +-------+
6022  | Alloc | 2nd[1]
6023  +-------+
6024  | Alloc | 2nd[0]
6025 GetSize() +-------+
6026 
6027 */
6028 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6029 {
6030  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6031 public:
6032  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6033  virtual ~VmaBlockMetadata_Linear();
6034  virtual void Init(VkDeviceSize size);
6035 
6036  virtual bool Validate() const;
6037  virtual size_t GetAllocationCount() const;
6038  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6039  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6040  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6041 
6042  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6043  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6044 
6045 #if VMA_STATS_STRING_ENABLED
6046  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6047 #endif
6048 
6049  virtual bool CreateAllocationRequest(
6050  uint32_t currentFrameIndex,
6051  uint32_t frameInUseCount,
6052  VkDeviceSize bufferImageGranularity,
6053  VkDeviceSize allocSize,
6054  VkDeviceSize allocAlignment,
6055  bool upperAddress,
6056  VmaSuballocationType allocType,
6057  bool canMakeOtherLost,
6058  uint32_t strategy,
6059  VmaAllocationRequest* pAllocationRequest);
6060 
6061  virtual bool MakeRequestedAllocationsLost(
6062  uint32_t currentFrameIndex,
6063  uint32_t frameInUseCount,
6064  VmaAllocationRequest* pAllocationRequest);
6065 
6066  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6067 
6068  virtual VkResult CheckCorruption(const void* pBlockData);
6069 
6070  virtual void Alloc(
6071  const VmaAllocationRequest& request,
6072  VmaSuballocationType type,
6073  VkDeviceSize allocSize,
6074  VmaAllocation hAllocation);
6075 
6076  virtual void Free(const VmaAllocation allocation);
6077  virtual void FreeAtOffset(VkDeviceSize offset);
6078 
6079 private:
6080  /*
6081  There are two suballocation vectors, used in ping-pong way.
6082  The one with index m_1stVectorIndex is called 1st.
6083  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6084  2nd can be non-empty only when 1st is not empty.
6085  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6086  */
6087  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6088 
6089  enum SECOND_VECTOR_MODE
6090  {
6091  SECOND_VECTOR_EMPTY,
6092  /*
6093  Suballocations in 2nd vector are created later than the ones in 1st, but they
6094  all have smaller offset.
6095  */
6096  SECOND_VECTOR_RING_BUFFER,
6097  /*
6098  Suballocations in 2nd vector are upper side of double stack.
6099  They all have offsets higher than those in 1st vector.
6100  Top of this stack means smaller offsets, but higher indices in this vector.
6101  */
6102  SECOND_VECTOR_DOUBLE_STACK,
6103  };
6104 
6105  VkDeviceSize m_SumFreeSize;
6106  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6107  uint32_t m_1stVectorIndex;
6108  SECOND_VECTOR_MODE m_2ndVectorMode;
6109 
6110  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6111  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6112  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6113  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6114 
6115  // Number of items in 1st vector with hAllocation = null at the beginning.
6116  size_t m_1stNullItemsBeginCount;
6117  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6118  size_t m_1stNullItemsMiddleCount;
6119  // Number of items in 2nd vector with hAllocation = null.
6120  size_t m_2ndNullItemsCount;
6121 
6122  bool ShouldCompact1st() const;
6123  void CleanupAfterFree();
6124 
6125  bool CreateAllocationRequest_LowerAddress(
6126  uint32_t currentFrameIndex,
6127  uint32_t frameInUseCount,
6128  VkDeviceSize bufferImageGranularity,
6129  VkDeviceSize allocSize,
6130  VkDeviceSize allocAlignment,
6131  VmaSuballocationType allocType,
6132  bool canMakeOtherLost,
6133  uint32_t strategy,
6134  VmaAllocationRequest* pAllocationRequest);
6135  bool CreateAllocationRequest_UpperAddress(
6136  uint32_t currentFrameIndex,
6137  uint32_t frameInUseCount,
6138  VkDeviceSize bufferImageGranularity,
6139  VkDeviceSize allocSize,
6140  VkDeviceSize allocAlignment,
6141  VmaSuballocationType allocType,
6142  bool canMakeOtherLost,
6143  uint32_t strategy,
6144  VmaAllocationRequest* pAllocationRequest);
6145 };
6146 
6147 /*
6148 - GetSize() is the original size of allocated memory block.
6149 - m_UsableSize is this size aligned down to a power of two.
6150  All allocations and calculations happen relative to m_UsableSize.
6151 - GetUnusableSize() is the difference between them.
6152  It is repoted as separate, unused range, not available for allocations.
6153 
6154 Node at level 0 has size = m_UsableSize.
6155 Each next level contains nodes with size 2 times smaller than current level.
6156 m_LevelCount is the maximum number of levels to use in the current object.
6157 */
6158 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6159 {
6160  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6161 public:
6162  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6163  virtual ~VmaBlockMetadata_Buddy();
6164  virtual void Init(VkDeviceSize size);
6165 
6166  virtual bool Validate() const;
6167  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6168  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6169  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6170  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6171 
6172  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6173  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6174 
6175 #if VMA_STATS_STRING_ENABLED
6176  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6177 #endif
6178 
6179  virtual bool CreateAllocationRequest(
6180  uint32_t currentFrameIndex,
6181  uint32_t frameInUseCount,
6182  VkDeviceSize bufferImageGranularity,
6183  VkDeviceSize allocSize,
6184  VkDeviceSize allocAlignment,
6185  bool upperAddress,
6186  VmaSuballocationType allocType,
6187  bool canMakeOtherLost,
6188  uint32_t strategy,
6189  VmaAllocationRequest* pAllocationRequest);
6190 
6191  virtual bool MakeRequestedAllocationsLost(
6192  uint32_t currentFrameIndex,
6193  uint32_t frameInUseCount,
6194  VmaAllocationRequest* pAllocationRequest);
6195 
6196  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6197 
6198  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6199 
6200  virtual void Alloc(
6201  const VmaAllocationRequest& request,
6202  VmaSuballocationType type,
6203  VkDeviceSize allocSize,
6204  VmaAllocation hAllocation);
6205 
6206  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6207  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6208 
6209 private:
6210  static const VkDeviceSize MIN_NODE_SIZE = 32;
6211  static const size_t MAX_LEVELS = 30;
6212 
6213  struct ValidationContext
6214  {
6215  size_t calculatedAllocationCount;
6216  size_t calculatedFreeCount;
6217  VkDeviceSize calculatedSumFreeSize;
6218 
6219  ValidationContext() :
6220  calculatedAllocationCount(0),
6221  calculatedFreeCount(0),
6222  calculatedSumFreeSize(0) { }
6223  };
6224 
6225  struct Node
6226  {
6227  VkDeviceSize offset;
6228  enum TYPE
6229  {
6230  TYPE_FREE,
6231  TYPE_ALLOCATION,
6232  TYPE_SPLIT,
6233  TYPE_COUNT
6234  } type;
6235  Node* parent;
6236  Node* buddy;
6237 
6238  union
6239  {
6240  struct
6241  {
6242  Node* prev;
6243  Node* next;
6244  } free;
6245  struct
6246  {
6247  VmaAllocation alloc;
6248  } allocation;
6249  struct
6250  {
6251  Node* leftChild;
6252  } split;
6253  };
6254  };
6255 
6256  // Size of the memory block aligned down to a power of two.
6257  VkDeviceSize m_UsableSize;
6258  uint32_t m_LevelCount;
6259 
6260  Node* m_Root;
6261  struct {
6262  Node* front;
6263  Node* back;
6264  } m_FreeList[MAX_LEVELS];
6265  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6266  size_t m_AllocationCount;
6267  // Number of nodes in the tree with type == TYPE_FREE.
6268  size_t m_FreeCount;
6269  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6270  VkDeviceSize m_SumFreeSize;
6271 
6272  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6273  void DeleteNode(Node* node);
6274  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6275  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6276  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6277  // Alloc passed just for validation. Can be null.
6278  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6279  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6280  // Adds node to the front of FreeList at given level.
6281  // node->type must be FREE.
6282  // node->free.prev, next can be undefined.
6283  void AddToFreeListFront(uint32_t level, Node* node);
6284  // Removes node from FreeList at given level.
6285  // node->type must be FREE.
6286  // node->free.prev, next stay untouched.
6287  void RemoveFromFreeList(uint32_t level, Node* node);
6288 
6289 #if VMA_STATS_STRING_ENABLED
6290  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6291 #endif
6292 };
6293 
6294 /*
6295 Represents a single block of device memory (`VkDeviceMemory`) with all the
6296 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6297 
6298 Thread-safety: This class must be externally synchronized.
6299 */
6300 class VmaDeviceMemoryBlock
6301 {
6302  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6303 public:
6304  VmaBlockMetadata* m_pMetadata;
6305 
6306  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6307 
6308  ~VmaDeviceMemoryBlock()
6309  {
6310  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6311  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6312  }
6313 
6314  // Always call after construction.
6315  void Init(
6316  VmaAllocator hAllocator,
6317  VmaPool hParentPool,
6318  uint32_t newMemoryTypeIndex,
6319  VkDeviceMemory newMemory,
6320  VkDeviceSize newSize,
6321  uint32_t id,
6322  uint32_t algorithm);
6323  // Always call before destruction.
6324  void Destroy(VmaAllocator allocator);
6325 
6326  VmaPool GetParentPool() const { return m_hParentPool; }
6327  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6328  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6329  uint32_t GetId() const { return m_Id; }
6330  void* GetMappedData() const { return m_pMappedData; }
6331 
6332  // Validates all data structures inside this object. If not valid, returns false.
6333  bool Validate() const;
6334 
6335  VkResult CheckCorruption(VmaAllocator hAllocator);
6336 
6337  // ppData can be null.
6338  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6339  void Unmap(VmaAllocator hAllocator, uint32_t count);
6340 
6341  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6342  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6343 
6344  VkResult BindBufferMemory(
6345  const VmaAllocator hAllocator,
6346  const VmaAllocation hAllocation,
6347  VkDeviceSize allocationLocalOffset,
6348  VkBuffer hBuffer,
6349  const void* pNext);
6350  VkResult BindImageMemory(
6351  const VmaAllocator hAllocator,
6352  const VmaAllocation hAllocation,
6353  VkDeviceSize allocationLocalOffset,
6354  VkImage hImage,
6355  const void* pNext);
6356 
6357 private:
6358  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6359  uint32_t m_MemoryTypeIndex;
6360  uint32_t m_Id;
6361  VkDeviceMemory m_hMemory;
6362 
6363  /*
6364  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6365  Also protects m_MapCount, m_pMappedData.
6366  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6367  */
6368  VMA_MUTEX m_Mutex;
6369  uint32_t m_MapCount;
6370  void* m_pMappedData;
6371 };
6372 
6373 struct VmaPointerLess
6374 {
6375  bool operator()(const void* lhs, const void* rhs) const
6376  {
6377  return lhs < rhs;
6378  }
6379 };
6380 
6381 struct VmaDefragmentationMove
6382 {
6383  size_t srcBlockIndex;
6384  size_t dstBlockIndex;
6385  VkDeviceSize srcOffset;
6386  VkDeviceSize dstOffset;
6387  VkDeviceSize size;
6388  VmaAllocation hAllocation;
6389  VmaDeviceMemoryBlock* pSrcBlock;
6390  VmaDeviceMemoryBlock* pDstBlock;
6391 };
6392 
6393 class VmaDefragmentationAlgorithm;
6394 
6395 /*
6396 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6397 Vulkan memory type.
6398 
6399 Synchronized internally with a mutex.
6400 */
6401 struct VmaBlockVector
6402 {
6403  VMA_CLASS_NO_COPY(VmaBlockVector)
6404 public:
6405  VmaBlockVector(
6406  VmaAllocator hAllocator,
6407  VmaPool hParentPool,
6408  uint32_t memoryTypeIndex,
6409  VkDeviceSize preferredBlockSize,
6410  size_t minBlockCount,
6411  size_t maxBlockCount,
6412  VkDeviceSize bufferImageGranularity,
6413  uint32_t frameInUseCount,
6414  bool explicitBlockSize,
6415  uint32_t algorithm);
6416  ~VmaBlockVector();
6417 
6418  VkResult CreateMinBlocks();
6419 
6420  VmaAllocator GetAllocator() const { return m_hAllocator; }
6421  VmaPool GetParentPool() const { return m_hParentPool; }
6422  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6423  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6424  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6425  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6426  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6427  uint32_t GetAlgorithm() const { return m_Algorithm; }
6428 
6429  void GetPoolStats(VmaPoolStats* pStats);
6430 
6431  bool IsEmpty();
6432  bool IsCorruptionDetectionEnabled() const;
6433 
6434  VkResult Allocate(
6435  uint32_t currentFrameIndex,
6436  VkDeviceSize size,
6437  VkDeviceSize alignment,
6438  const VmaAllocationCreateInfo& createInfo,
6439  VmaSuballocationType suballocType,
6440  size_t allocationCount,
6441  VmaAllocation* pAllocations);
6442 
6443  void Free(const VmaAllocation hAllocation);
6444 
6445  // Adds statistics of this BlockVector to pStats.
6446  void AddStats(VmaStats* pStats);
6447 
6448 #if VMA_STATS_STRING_ENABLED
6449  void PrintDetailedMap(class VmaJsonWriter& json);
6450 #endif
6451 
6452  void MakePoolAllocationsLost(
6453  uint32_t currentFrameIndex,
6454  size_t* pLostAllocationCount);
6455  VkResult CheckCorruption();
6456 
6457  // Saves results in pCtx->res.
6458  void Defragment(
6459  class VmaBlockVectorDefragmentationContext* pCtx,
6461  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6462  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6463  VkCommandBuffer commandBuffer);
6464  void DefragmentationEnd(
6465  class VmaBlockVectorDefragmentationContext* pCtx,
6466  VmaDefragmentationStats* pStats);
6467 
6468  uint32_t ProcessDefragmentations(
6469  class VmaBlockVectorDefragmentationContext *pCtx,
6470  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
6471 
6472  void CommitDefragmentations(
6473  class VmaBlockVectorDefragmentationContext *pCtx,
6474  VmaDefragmentationStats* pStats);
6475 
6477  // To be used only while the m_Mutex is locked. Used during defragmentation.
6478 
6479  size_t GetBlockCount() const { return m_Blocks.size(); }
6480  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6481  size_t CalcAllocationCount() const;
6482  bool IsBufferImageGranularityConflictPossible() const;
6483 
6484 private:
6485  friend class VmaDefragmentationAlgorithm_Generic;
6486 
6487  const VmaAllocator m_hAllocator;
6488  const VmaPool m_hParentPool;
6489  const uint32_t m_MemoryTypeIndex;
6490  const VkDeviceSize m_PreferredBlockSize;
6491  const size_t m_MinBlockCount;
6492  const size_t m_MaxBlockCount;
6493  const VkDeviceSize m_BufferImageGranularity;
6494  const uint32_t m_FrameInUseCount;
6495  const bool m_ExplicitBlockSize;
6496  const uint32_t m_Algorithm;
6497  VMA_RW_MUTEX m_Mutex;
6498 
6499  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6500  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6501  bool m_HasEmptyBlock;
6502  // Incrementally sorted by sumFreeSize, ascending.
6503  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6504  uint32_t m_NextBlockId;
6505 
6506  VkDeviceSize CalcMaxBlockSize() const;
6507 
6508  // Finds and removes given block from vector.
6509  void Remove(VmaDeviceMemoryBlock* pBlock);
6510 
6511  // Performs single step in sorting m_Blocks. They may not be fully sorted
6512  // after this call.
6513  void IncrementallySortBlocks();
6514 
6515  VkResult AllocatePage(
6516  uint32_t currentFrameIndex,
6517  VkDeviceSize size,
6518  VkDeviceSize alignment,
6519  const VmaAllocationCreateInfo& createInfo,
6520  VmaSuballocationType suballocType,
6521  VmaAllocation* pAllocation);
6522 
6523  // To be used only without CAN_MAKE_OTHER_LOST flag.
6524  VkResult AllocateFromBlock(
6525  VmaDeviceMemoryBlock* pBlock,
6526  uint32_t currentFrameIndex,
6527  VkDeviceSize size,
6528  VkDeviceSize alignment,
6529  VmaAllocationCreateFlags allocFlags,
6530  void* pUserData,
6531  VmaSuballocationType suballocType,
6532  uint32_t strategy,
6533  VmaAllocation* pAllocation);
6534 
6535  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6536 
6537  // Saves result to pCtx->res.
6538  void ApplyDefragmentationMovesCpu(
6539  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6540  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6541  // Saves result to pCtx->res.
6542  void ApplyDefragmentationMovesGpu(
6543  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6544  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6545  VkCommandBuffer commandBuffer);
6546 
6547  /*
6548  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6549  - updated with new data.
6550  */
6551  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6552 
6553  void UpdateHasEmptyBlock();
6554 };
6555 
6556 struct VmaPool_T
6557 {
6558  VMA_CLASS_NO_COPY(VmaPool_T)
6559 public:
6560  VmaBlockVector m_BlockVector;
6561 
6562  VmaPool_T(
6563  VmaAllocator hAllocator,
6564  const VmaPoolCreateInfo& createInfo,
6565  VkDeviceSize preferredBlockSize);
6566  ~VmaPool_T();
6567 
6568  uint32_t GetId() const { return m_Id; }
6569  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6570 
6571  const char* GetName() const { return m_Name; }
6572  void SetName(const char* pName);
6573 
6574 #if VMA_STATS_STRING_ENABLED
6575  //void PrintDetailedMap(class VmaStringBuilder& sb);
6576 #endif
6577 
6578 private:
6579  uint32_t m_Id;
6580  char* m_Name;
6581 };
6582 
6583 /*
6584 Performs defragmentation:
6585 
6586 - Updates `pBlockVector->m_pMetadata`.
6587 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6588 - Does not move actual data, only returns requested moves as `moves`.
6589 */
6590 class VmaDefragmentationAlgorithm
6591 {
6592  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6593 public:
6594  VmaDefragmentationAlgorithm(
6595  VmaAllocator hAllocator,
6596  VmaBlockVector* pBlockVector,
6597  uint32_t currentFrameIndex) :
6598  m_hAllocator(hAllocator),
6599  m_pBlockVector(pBlockVector),
6600  m_CurrentFrameIndex(currentFrameIndex)
6601  {
6602  }
6603  virtual ~VmaDefragmentationAlgorithm()
6604  {
6605  }
6606 
6607  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6608  virtual void AddAll() = 0;
6609 
6610  virtual VkResult Defragment(
6611  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6612  VkDeviceSize maxBytesToMove,
6613  uint32_t maxAllocationsToMove,
6614  VmaDefragmentationFlags flags) = 0;
6615 
6616  virtual VkDeviceSize GetBytesMoved() const = 0;
6617  virtual uint32_t GetAllocationsMoved() const = 0;
6618 
6619 protected:
6620  VmaAllocator const m_hAllocator;
6621  VmaBlockVector* const m_pBlockVector;
6622  const uint32_t m_CurrentFrameIndex;
6623 
6624  struct AllocationInfo
6625  {
6626  VmaAllocation m_hAllocation;
6627  VkBool32* m_pChanged;
6628 
6629  AllocationInfo() :
6630  m_hAllocation(VK_NULL_HANDLE),
6631  m_pChanged(VMA_NULL)
6632  {
6633  }
6634  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6635  m_hAllocation(hAlloc),
6636  m_pChanged(pChanged)
6637  {
6638  }
6639  };
6640 };
6641 
6642 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6643 {
6644  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6645 public:
6646  VmaDefragmentationAlgorithm_Generic(
6647  VmaAllocator hAllocator,
6648  VmaBlockVector* pBlockVector,
6649  uint32_t currentFrameIndex,
6650  bool overlappingMoveSupported);
6651  virtual ~VmaDefragmentationAlgorithm_Generic();
6652 
6653  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6654  virtual void AddAll() { m_AllAllocations = true; }
6655 
6656  virtual VkResult Defragment(
6657  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6658  VkDeviceSize maxBytesToMove,
6659  uint32_t maxAllocationsToMove,
6660  VmaDefragmentationFlags flags);
6661 
6662  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6663  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6664 
6665 private:
6666  uint32_t m_AllocationCount;
6667  bool m_AllAllocations;
6668 
6669  VkDeviceSize m_BytesMoved;
6670  uint32_t m_AllocationsMoved;
6671 
6672  struct AllocationInfoSizeGreater
6673  {
6674  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6675  {
6676  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6677  }
6678  };
6679 
6680  struct AllocationInfoOffsetGreater
6681  {
6682  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6683  {
6684  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6685  }
6686  };
6687 
6688  struct BlockInfo
6689  {
6690  size_t m_OriginalBlockIndex;
6691  VmaDeviceMemoryBlock* m_pBlock;
6692  bool m_HasNonMovableAllocations;
6693  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6694 
6695  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6696  m_OriginalBlockIndex(SIZE_MAX),
6697  m_pBlock(VMA_NULL),
6698  m_HasNonMovableAllocations(true),
6699  m_Allocations(pAllocationCallbacks)
6700  {
6701  }
6702 
6703  void CalcHasNonMovableAllocations()
6704  {
6705  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6706  const size_t defragmentAllocCount = m_Allocations.size();
6707  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6708  }
6709 
6710  void SortAllocationsBySizeDescending()
6711  {
6712  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6713  }
6714 
6715  void SortAllocationsByOffsetDescending()
6716  {
6717  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6718  }
6719  };
6720 
6721  struct BlockPointerLess
6722  {
6723  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6724  {
6725  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6726  }
6727  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6728  {
6729  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6730  }
6731  };
6732 
6733  // 1. Blocks with some non-movable allocations go first.
6734  // 2. Blocks with smaller sumFreeSize go first.
6735  struct BlockInfoCompareMoveDestination
6736  {
6737  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6738  {
6739  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6740  {
6741  return true;
6742  }
6743  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6744  {
6745  return false;
6746  }
6747  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6748  {
6749  return true;
6750  }
6751  return false;
6752  }
6753  };
6754 
6755  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6756  BlockInfoVector m_Blocks;
6757 
6758  VkResult DefragmentRound(
6759  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6760  VkDeviceSize maxBytesToMove,
6761  uint32_t maxAllocationsToMove,
6762  bool freeOldAllocations);
6763 
6764  size_t CalcBlocksWithNonMovableCount() const;
6765 
6766  static bool MoveMakesSense(
6767  size_t dstBlockIndex, VkDeviceSize dstOffset,
6768  size_t srcBlockIndex, VkDeviceSize srcOffset);
6769 };
6770 
6771 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6772 {
6773  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6774 public:
6775  VmaDefragmentationAlgorithm_Fast(
6776  VmaAllocator hAllocator,
6777  VmaBlockVector* pBlockVector,
6778  uint32_t currentFrameIndex,
6779  bool overlappingMoveSupported);
6780  virtual ~VmaDefragmentationAlgorithm_Fast();
6781 
6782  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6783  virtual void AddAll() { m_AllAllocations = true; }
6784 
6785  virtual VkResult Defragment(
6786  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6787  VkDeviceSize maxBytesToMove,
6788  uint32_t maxAllocationsToMove,
6789  VmaDefragmentationFlags flags);
6790 
6791  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6792  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6793 
6794 private:
6795  struct BlockInfo
6796  {
6797  size_t origBlockIndex;
6798  };
6799 
6800  class FreeSpaceDatabase
6801  {
6802  public:
6803  FreeSpaceDatabase()
6804  {
6805  FreeSpace s = {};
6806  s.blockInfoIndex = SIZE_MAX;
6807  for(size_t i = 0; i < MAX_COUNT; ++i)
6808  {
6809  m_FreeSpaces[i] = s;
6810  }
6811  }
6812 
6813  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6814  {
6815  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6816  {
6817  return;
6818  }
6819 
6820  // Find first invalid or the smallest structure.
6821  size_t bestIndex = SIZE_MAX;
6822  for(size_t i = 0; i < MAX_COUNT; ++i)
6823  {
6824  // Empty structure.
6825  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6826  {
6827  bestIndex = i;
6828  break;
6829  }
6830  if(m_FreeSpaces[i].size < size &&
6831  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6832  {
6833  bestIndex = i;
6834  }
6835  }
6836 
6837  if(bestIndex != SIZE_MAX)
6838  {
6839  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6840  m_FreeSpaces[bestIndex].offset = offset;
6841  m_FreeSpaces[bestIndex].size = size;
6842  }
6843  }
6844 
6845  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6846  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6847  {
6848  size_t bestIndex = SIZE_MAX;
6849  VkDeviceSize bestFreeSpaceAfter = 0;
6850  for(size_t i = 0; i < MAX_COUNT; ++i)
6851  {
6852  // Structure is valid.
6853  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6854  {
6855  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6856  // Allocation fits into this structure.
6857  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6858  {
6859  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6860  (dstOffset + size);
6861  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6862  {
6863  bestIndex = i;
6864  bestFreeSpaceAfter = freeSpaceAfter;
6865  }
6866  }
6867  }
6868  }
6869 
6870  if(bestIndex != SIZE_MAX)
6871  {
6872  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6873  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6874 
6875  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6876  {
6877  // Leave this structure for remaining empty space.
6878  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6879  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6880  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6881  }
6882  else
6883  {
6884  // This structure becomes invalid.
6885  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6886  }
6887 
6888  return true;
6889  }
6890 
6891  return false;
6892  }
6893 
6894  private:
6895  static const size_t MAX_COUNT = 4;
6896 
6897  struct FreeSpace
6898  {
6899  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6900  VkDeviceSize offset;
6901  VkDeviceSize size;
6902  } m_FreeSpaces[MAX_COUNT];
6903  };
6904 
6905  const bool m_OverlappingMoveSupported;
6906 
6907  uint32_t m_AllocationCount;
6908  bool m_AllAllocations;
6909 
6910  VkDeviceSize m_BytesMoved;
6911  uint32_t m_AllocationsMoved;
6912 
6913  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6914 
6915  void PreprocessMetadata();
6916  void PostprocessMetadata();
6917  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6918 };
6919 
6920 struct VmaBlockDefragmentationContext
6921 {
6922  enum BLOCK_FLAG
6923  {
6924  BLOCK_FLAG_USED = 0x00000001,
6925  };
6926  uint32_t flags;
6927  VkBuffer hBuffer;
6928 };
6929 
6930 class VmaBlockVectorDefragmentationContext
6931 {
6932  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6933 public:
6934  VkResult res;
6935  bool mutexLocked;
6936  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6937  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
6938  uint32_t defragmentationMovesProcessed;
6939  uint32_t defragmentationMovesCommitted;
6940  bool hasDefragmentationPlan;
6941 
6942  VmaBlockVectorDefragmentationContext(
6943  VmaAllocator hAllocator,
6944  VmaPool hCustomPool, // Optional.
6945  VmaBlockVector* pBlockVector,
6946  uint32_t currFrameIndex);
6947  ~VmaBlockVectorDefragmentationContext();
6948 
6949  VmaPool GetCustomPool() const { return m_hCustomPool; }
6950  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6951  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6952 
6953  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6954  void AddAll() { m_AllAllocations = true; }
6955 
6956  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
6957 
6958 private:
6959  const VmaAllocator m_hAllocator;
6960  // Null if not from custom pool.
6961  const VmaPool m_hCustomPool;
6962  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6963  VmaBlockVector* const m_pBlockVector;
6964  const uint32_t m_CurrFrameIndex;
6965  // Owner of this object.
6966  VmaDefragmentationAlgorithm* m_pAlgorithm;
6967 
6968  struct AllocInfo
6969  {
6970  VmaAllocation hAlloc;
6971  VkBool32* pChanged;
6972  };
6973  // Used between constructor and Begin.
6974  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6975  bool m_AllAllocations;
6976 };
6977 
6978 struct VmaDefragmentationContext_T
6979 {
6980 private:
6981  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6982 public:
6983  VmaDefragmentationContext_T(
6984  VmaAllocator hAllocator,
6985  uint32_t currFrameIndex,
6986  uint32_t flags,
6987  VmaDefragmentationStats* pStats);
6988  ~VmaDefragmentationContext_T();
6989 
6990  void AddPools(uint32_t poolCount, VmaPool* pPools);
6991  void AddAllocations(
6992  uint32_t allocationCount,
6993  VmaAllocation* pAllocations,
6994  VkBool32* pAllocationsChanged);
6995 
6996  /*
6997  Returns:
6998  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6999  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7000  - Negative value if error occured and object can be destroyed immediately.
7001  */
7002  VkResult Defragment(
7003  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7004  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7005  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7006 
7007  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7008  VkResult DefragmentPassEnd();
7009 
7010 private:
7011  const VmaAllocator m_hAllocator;
7012  const uint32_t m_CurrFrameIndex;
7013  const uint32_t m_Flags;
7014  VmaDefragmentationStats* const m_pStats;
7015 
7016  VkDeviceSize m_MaxCpuBytesToMove;
7017  uint32_t m_MaxCpuAllocationsToMove;
7018  VkDeviceSize m_MaxGpuBytesToMove;
7019  uint32_t m_MaxGpuAllocationsToMove;
7020 
7021  // Owner of these objects.
7022  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7023  // Owner of these objects.
7024  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7025 };
7026 
7027 #if VMA_RECORDING_ENABLED
7028 
7029 class VmaRecorder
7030 {
7031 public:
7032  VmaRecorder();
7033  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7034  void WriteConfiguration(
7035  const VkPhysicalDeviceProperties& devProps,
7036  const VkPhysicalDeviceMemoryProperties& memProps,
7037  uint32_t vulkanApiVersion,
7038  bool dedicatedAllocationExtensionEnabled,
7039  bool bindMemory2ExtensionEnabled,
7040  bool memoryBudgetExtensionEnabled,
7041  bool deviceCoherentMemoryExtensionEnabled);
7042  ~VmaRecorder();
7043 
7044  void RecordCreateAllocator(uint32_t frameIndex);
7045  void RecordDestroyAllocator(uint32_t frameIndex);
7046  void RecordCreatePool(uint32_t frameIndex,
7047  const VmaPoolCreateInfo& createInfo,
7048  VmaPool pool);
7049  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7050  void RecordAllocateMemory(uint32_t frameIndex,
7051  const VkMemoryRequirements& vkMemReq,
7052  const VmaAllocationCreateInfo& createInfo,
7053  VmaAllocation allocation);
7054  void RecordAllocateMemoryPages(uint32_t frameIndex,
7055  const VkMemoryRequirements& vkMemReq,
7056  const VmaAllocationCreateInfo& createInfo,
7057  uint64_t allocationCount,
7058  const VmaAllocation* pAllocations);
7059  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7060  const VkMemoryRequirements& vkMemReq,
7061  bool requiresDedicatedAllocation,
7062  bool prefersDedicatedAllocation,
7063  const VmaAllocationCreateInfo& createInfo,
7064  VmaAllocation allocation);
7065  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7066  const VkMemoryRequirements& vkMemReq,
7067  bool requiresDedicatedAllocation,
7068  bool prefersDedicatedAllocation,
7069  const VmaAllocationCreateInfo& createInfo,
7070  VmaAllocation allocation);
7071  void RecordFreeMemory(uint32_t frameIndex,
7072  VmaAllocation allocation);
7073  void RecordFreeMemoryPages(uint32_t frameIndex,
7074  uint64_t allocationCount,
7075  const VmaAllocation* pAllocations);
7076  void RecordSetAllocationUserData(uint32_t frameIndex,
7077  VmaAllocation allocation,
7078  const void* pUserData);
7079  void RecordCreateLostAllocation(uint32_t frameIndex,
7080  VmaAllocation allocation);
7081  void RecordMapMemory(uint32_t frameIndex,
7082  VmaAllocation allocation);
7083  void RecordUnmapMemory(uint32_t frameIndex,
7084  VmaAllocation allocation);
7085  void RecordFlushAllocation(uint32_t frameIndex,
7086  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7087  void RecordInvalidateAllocation(uint32_t frameIndex,
7088  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7089  void RecordCreateBuffer(uint32_t frameIndex,
7090  const VkBufferCreateInfo& bufCreateInfo,
7091  const VmaAllocationCreateInfo& allocCreateInfo,
7092  VmaAllocation allocation);
7093  void RecordCreateImage(uint32_t frameIndex,
7094  const VkImageCreateInfo& imageCreateInfo,
7095  const VmaAllocationCreateInfo& allocCreateInfo,
7096  VmaAllocation allocation);
7097  void RecordDestroyBuffer(uint32_t frameIndex,
7098  VmaAllocation allocation);
7099  void RecordDestroyImage(uint32_t frameIndex,
7100  VmaAllocation allocation);
7101  void RecordTouchAllocation(uint32_t frameIndex,
7102  VmaAllocation allocation);
7103  void RecordGetAllocationInfo(uint32_t frameIndex,
7104  VmaAllocation allocation);
7105  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7106  VmaPool pool);
7107  void RecordDefragmentationBegin(uint32_t frameIndex,
7108  const VmaDefragmentationInfo2& info,
7110  void RecordDefragmentationEnd(uint32_t frameIndex,
7112  void RecordSetPoolName(uint32_t frameIndex,
7113  VmaPool pool,
7114  const char* name);
7115 
7116 private:
7117  struct CallParams
7118  {
7119  uint32_t threadId;
7120  double time;
7121  };
7122 
7123  class UserDataString
7124  {
7125  public:
7126  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7127  const char* GetString() const { return m_Str; }
7128 
7129  private:
7130  char m_PtrStr[17];
7131  const char* m_Str;
7132  };
7133 
7134  bool m_UseMutex;
7135  VmaRecordFlags m_Flags;
7136  FILE* m_File;
7137  VMA_MUTEX m_FileMutex;
7138  int64_t m_Freq;
7139  int64_t m_StartCounter;
7140 
7141  void GetBasicParams(CallParams& outParams);
7142 
7143  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7144  template<typename T>
7145  void PrintPointerList(uint64_t count, const T* pItems)
7146  {
7147  if(count)
7148  {
7149  fprintf(m_File, "%p", pItems[0]);
7150  for(uint64_t i = 1; i < count; ++i)
7151  {
7152  fprintf(m_File, " %p", pItems[i]);
7153  }
7154  }
7155  }
7156 
7157  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7158  void Flush();
7159 };
7160 
7161 #endif // #if VMA_RECORDING_ENABLED
7162 
7163 /*
7164 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7165 */
7166 class VmaAllocationObjectAllocator
7167 {
7168  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7169 public:
7170  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7171 
7172  template<typename... Types> VmaAllocation Allocate(Types... args);
7173  void Free(VmaAllocation hAlloc);
7174 
7175 private:
7176  VMA_MUTEX m_Mutex;
7177  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7178 };
7179 
7180 struct VmaCurrentBudgetData
7181 {
7182  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7183  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7184 
7185 #if VMA_MEMORY_BUDGET
7186  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7187  VMA_RW_MUTEX m_BudgetMutex;
7188  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7189  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7190  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7191 #endif // #if VMA_MEMORY_BUDGET
7192 
7193  VmaCurrentBudgetData()
7194  {
7195  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7196  {
7197  m_BlockBytes[heapIndex] = 0;
7198  m_AllocationBytes[heapIndex] = 0;
7199 #if VMA_MEMORY_BUDGET
7200  m_VulkanUsage[heapIndex] = 0;
7201  m_VulkanBudget[heapIndex] = 0;
7202  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7203 #endif
7204  }
7205 
7206 #if VMA_MEMORY_BUDGET
7207  m_OperationsSinceBudgetFetch = 0;
7208 #endif
7209  }
7210 
7211  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7212  {
7213  m_AllocationBytes[heapIndex] += allocationSize;
7214 #if VMA_MEMORY_BUDGET
7215  ++m_OperationsSinceBudgetFetch;
7216 #endif
7217  }
7218 
7219  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7220  {
7221  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7222  m_AllocationBytes[heapIndex] -= allocationSize;
7223 #if VMA_MEMORY_BUDGET
7224  ++m_OperationsSinceBudgetFetch;
7225 #endif
7226  }
7227 };
7228 
7229 // Main allocator object.
7230 struct VmaAllocator_T
7231 {
7232  VMA_CLASS_NO_COPY(VmaAllocator_T)
7233 public:
7234  bool m_UseMutex;
7235  uint32_t m_VulkanApiVersion;
7236  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7237  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7238  bool m_UseExtMemoryBudget;
7239  bool m_UseAmdDeviceCoherentMemory;
7240  VkDevice m_hDevice;
7241  VkInstance m_hInstance;
7242  bool m_AllocationCallbacksSpecified;
7243  VkAllocationCallbacks m_AllocationCallbacks;
7244  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7245  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7246 
7247  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7248  uint32_t m_HeapSizeLimitMask;
7249 
7250  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7251  VkPhysicalDeviceMemoryProperties m_MemProps;
7252 
7253  // Default pools.
7254  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7255 
7256  // Each vector is sorted by memory (handle value).
7257  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7258  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7259  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7260 
7261  VmaCurrentBudgetData m_Budget;
7262 
7263  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7264  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7265  ~VmaAllocator_T();
7266 
7267  const VkAllocationCallbacks* GetAllocationCallbacks() const
7268  {
7269  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7270  }
7271  const VmaVulkanFunctions& GetVulkanFunctions() const
7272  {
7273  return m_VulkanFunctions;
7274  }
7275 
7276  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7277 
7278  VkDeviceSize GetBufferImageGranularity() const
7279  {
7280  return VMA_MAX(
7281  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7282  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7283  }
7284 
7285  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7286  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7287 
7288  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7289  {
7290  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7291  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7292  }
7293  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7294  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7295  {
7296  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7297  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7298  }
7299  // Minimum alignment for all allocations in specific memory type.
7300  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7301  {
7302  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7303  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7304  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7305  }
7306 
7307  bool IsIntegratedGpu() const
7308  {
7309  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7310  }
7311 
7312  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7313 
7314 #if VMA_RECORDING_ENABLED
7315  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7316 #endif
7317 
7318  void GetBufferMemoryRequirements(
7319  VkBuffer hBuffer,
7320  VkMemoryRequirements& memReq,
7321  bool& requiresDedicatedAllocation,
7322  bool& prefersDedicatedAllocation) const;
7323  void GetImageMemoryRequirements(
7324  VkImage hImage,
7325  VkMemoryRequirements& memReq,
7326  bool& requiresDedicatedAllocation,
7327  bool& prefersDedicatedAllocation) const;
7328 
7329  // Main allocation function.
7330  VkResult AllocateMemory(
7331  const VkMemoryRequirements& vkMemReq,
7332  bool requiresDedicatedAllocation,
7333  bool prefersDedicatedAllocation,
7334  VkBuffer dedicatedBuffer,
7335  VkImage dedicatedImage,
7336  const VmaAllocationCreateInfo& createInfo,
7337  VmaSuballocationType suballocType,
7338  size_t allocationCount,
7339  VmaAllocation* pAllocations);
7340 
7341  // Main deallocation function.
7342  void FreeMemory(
7343  size_t allocationCount,
7344  const VmaAllocation* pAllocations);
7345 
7346  VkResult ResizeAllocation(
7347  const VmaAllocation alloc,
7348  VkDeviceSize newSize);
7349 
7350  void CalculateStats(VmaStats* pStats);
7351 
7352  void GetBudget(
7353  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7354 
7355 #if VMA_STATS_STRING_ENABLED
7356  void PrintDetailedMap(class VmaJsonWriter& json);
7357 #endif
7358 
7359  VkResult DefragmentationBegin(
7360  const VmaDefragmentationInfo2& info,
7361  VmaDefragmentationStats* pStats,
7362  VmaDefragmentationContext* pContext);
7363  VkResult DefragmentationEnd(
7364  VmaDefragmentationContext context);
7365 
7366  VkResult DefragmentationPassBegin(
7368  VmaDefragmentationContext context);
7369  VkResult DefragmentationPassEnd(
7370  VmaDefragmentationContext context);
7371 
7372  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7373  bool TouchAllocation(VmaAllocation hAllocation);
7374 
7375  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7376  void DestroyPool(VmaPool pool);
7377  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7378 
7379  void SetCurrentFrameIndex(uint32_t frameIndex);
7380  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7381 
7382  void MakePoolAllocationsLost(
7383  VmaPool hPool,
7384  size_t* pLostAllocationCount);
7385  VkResult CheckPoolCorruption(VmaPool hPool);
7386  VkResult CheckCorruption(uint32_t memoryTypeBits);
7387 
7388  void CreateLostAllocation(VmaAllocation* pAllocation);
7389 
7390  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7391  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7392  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7393  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7394  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7395  VkResult BindVulkanBuffer(
7396  VkDeviceMemory memory,
7397  VkDeviceSize memoryOffset,
7398  VkBuffer buffer,
7399  const void* pNext);
7400  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7401  VkResult BindVulkanImage(
7402  VkDeviceMemory memory,
7403  VkDeviceSize memoryOffset,
7404  VkImage image,
7405  const void* pNext);
7406 
7407  VkResult Map(VmaAllocation hAllocation, void** ppData);
7408  void Unmap(VmaAllocation hAllocation);
7409 
7410  VkResult BindBufferMemory(
7411  VmaAllocation hAllocation,
7412  VkDeviceSize allocationLocalOffset,
7413  VkBuffer hBuffer,
7414  const void* pNext);
7415  VkResult BindImageMemory(
7416  VmaAllocation hAllocation,
7417  VkDeviceSize allocationLocalOffset,
7418  VkImage hImage,
7419  const void* pNext);
7420 
7421  void FlushOrInvalidateAllocation(
7422  VmaAllocation hAllocation,
7423  VkDeviceSize offset, VkDeviceSize size,
7424  VMA_CACHE_OPERATION op);
7425 
7426  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7427 
7428  /*
7429  Returns bit mask of memory types that can support defragmentation on GPU as
7430  they support creation of required buffer for copy operations.
7431  */
7432  uint32_t GetGpuDefragmentationMemoryTypeBits();
7433 
7434 private:
7435  VkDeviceSize m_PreferredLargeHeapBlockSize;
7436 
7437  VkPhysicalDevice m_PhysicalDevice;
7438  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7439  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7440 
7441  VMA_RW_MUTEX m_PoolsMutex;
7442  // Protected by m_PoolsMutex. Sorted by pointer value.
7443  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7444  uint32_t m_NextPoolId;
7445 
7446  VmaVulkanFunctions m_VulkanFunctions;
7447 
7448  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
7449  uint32_t m_GlobalMemoryTypeBits;
7450 
7451 #if VMA_RECORDING_ENABLED
7452  VmaRecorder* m_pRecorder;
7453 #endif
7454 
7455  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7456 
7457  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7458 
7459  VkResult AllocateMemoryOfType(
7460  VkDeviceSize size,
7461  VkDeviceSize alignment,
7462  bool dedicatedAllocation,
7463  VkBuffer dedicatedBuffer,
7464  VkImage dedicatedImage,
7465  const VmaAllocationCreateInfo& createInfo,
7466  uint32_t memTypeIndex,
7467  VmaSuballocationType suballocType,
7468  size_t allocationCount,
7469  VmaAllocation* pAllocations);
7470 
7471  // Helper function only to be used inside AllocateDedicatedMemory.
7472  VkResult AllocateDedicatedMemoryPage(
7473  VkDeviceSize size,
7474  VmaSuballocationType suballocType,
7475  uint32_t memTypeIndex,
7476  const VkMemoryAllocateInfo& allocInfo,
7477  bool map,
7478  bool isUserDataString,
7479  void* pUserData,
7480  VmaAllocation* pAllocation);
7481 
7482  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7483  VkResult AllocateDedicatedMemory(
7484  VkDeviceSize size,
7485  VmaSuballocationType suballocType,
7486  uint32_t memTypeIndex,
7487  bool withinBudget,
7488  bool map,
7489  bool isUserDataString,
7490  void* pUserData,
7491  VkBuffer dedicatedBuffer,
7492  VkImage dedicatedImage,
7493  size_t allocationCount,
7494  VmaAllocation* pAllocations);
7495 
7496  void FreeDedicatedMemory(const VmaAllocation allocation);
7497 
7498  /*
7499  Calculates and returns bit mask of memory types that can support defragmentation
7500  on GPU as they support creation of required buffer for copy operations.
7501  */
7502  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7503 
7504  uint32_t CalculateGlobalMemoryTypeBits() const;
7505 
7506 #if VMA_MEMORY_BUDGET
7507  void UpdateVulkanBudget();
7508 #endif // #if VMA_MEMORY_BUDGET
7509 };
7510 
7512 // Memory allocation #2 after VmaAllocator_T definition
7513 
7514 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7515 {
7516  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7517 }
7518 
7519 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7520 {
7521  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7522 }
7523 
7524 template<typename T>
7525 static T* VmaAllocate(VmaAllocator hAllocator)
7526 {
7527  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7528 }
7529 
7530 template<typename T>
7531 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7532 {
7533  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7534 }
7535 
7536 template<typename T>
7537 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7538 {
7539  if(ptr != VMA_NULL)
7540  {
7541  ptr->~T();
7542  VmaFree(hAllocator, ptr);
7543  }
7544 }
7545 
7546 template<typename T>
7547 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7548 {
7549  if(ptr != VMA_NULL)
7550  {
7551  for(size_t i = count; i--; )
7552  ptr[i].~T();
7553  VmaFree(hAllocator, ptr);
7554  }
7555 }
7556 
7558 // VmaStringBuilder
7559 
7560 #if VMA_STATS_STRING_ENABLED
7561 
7562 class VmaStringBuilder
7563 {
7564 public:
7565  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7566  size_t GetLength() const { return m_Data.size(); }
7567  const char* GetData() const { return m_Data.data(); }
7568 
7569  void Add(char ch) { m_Data.push_back(ch); }
7570  void Add(const char* pStr);
7571  void AddNewLine() { Add('\n'); }
7572  void AddNumber(uint32_t num);
7573  void AddNumber(uint64_t num);
7574  void AddPointer(const void* ptr);
7575 
7576 private:
7577  VmaVector< char, VmaStlAllocator<char> > m_Data;
7578 };
7579 
7580 void VmaStringBuilder::Add(const char* pStr)
7581 {
7582  const size_t strLen = strlen(pStr);
7583  if(strLen > 0)
7584  {
7585  const size_t oldCount = m_Data.size();
7586  m_Data.resize(oldCount + strLen);
7587  memcpy(m_Data.data() + oldCount, pStr, strLen);
7588  }
7589 }
7590 
7591 void VmaStringBuilder::AddNumber(uint32_t num)
7592 {
7593  char buf[11];
7594  buf[10] = '\0';
7595  char *p = &buf[10];
7596  do
7597  {
7598  *--p = '0' + (num % 10);
7599  num /= 10;
7600  }
7601  while(num);
7602  Add(p);
7603 }
7604 
7605 void VmaStringBuilder::AddNumber(uint64_t num)
7606 {
7607  char buf[21];
7608  buf[20] = '\0';
7609  char *p = &buf[20];
7610  do
7611  {
7612  *--p = '0' + (num % 10);
7613  num /= 10;
7614  }
7615  while(num);
7616  Add(p);
7617 }
7618 
7619 void VmaStringBuilder::AddPointer(const void* ptr)
7620 {
7621  char buf[21];
7622  VmaPtrToStr(buf, sizeof(buf), ptr);
7623  Add(buf);
7624 }
7625 
7626 #endif // #if VMA_STATS_STRING_ENABLED
7627 
7629 // VmaJsonWriter
7630 
7631 #if VMA_STATS_STRING_ENABLED
7632 
7633 class VmaJsonWriter
7634 {
7635  VMA_CLASS_NO_COPY(VmaJsonWriter)
7636 public:
7637  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7638  ~VmaJsonWriter();
7639 
7640  void BeginObject(bool singleLine = false);
7641  void EndObject();
7642 
7643  void BeginArray(bool singleLine = false);
7644  void EndArray();
7645 
7646  void WriteString(const char* pStr);
7647  void BeginString(const char* pStr = VMA_NULL);
7648  void ContinueString(const char* pStr);
7649  void ContinueString(uint32_t n);
7650  void ContinueString(uint64_t n);
7651  void ContinueString_Pointer(const void* ptr);
7652  void EndString(const char* pStr = VMA_NULL);
7653 
7654  void WriteNumber(uint32_t n);
7655  void WriteNumber(uint64_t n);
7656  void WriteBool(bool b);
7657  void WriteNull();
7658 
7659 private:
7660  static const char* const INDENT;
7661 
7662  enum COLLECTION_TYPE
7663  {
7664  COLLECTION_TYPE_OBJECT,
7665  COLLECTION_TYPE_ARRAY,
7666  };
7667  struct StackItem
7668  {
7669  COLLECTION_TYPE type;
7670  uint32_t valueCount;
7671  bool singleLineMode;
7672  };
7673 
7674  VmaStringBuilder& m_SB;
7675  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7676  bool m_InsideString;
7677 
7678  void BeginValue(bool isString);
7679  void WriteIndent(bool oneLess = false);
7680 };
7681 
7682 const char* const VmaJsonWriter::INDENT = " ";
7683 
7684 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7685  m_SB(sb),
7686  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7687  m_InsideString(false)
7688 {
7689 }
7690 
7691 VmaJsonWriter::~VmaJsonWriter()
7692 {
7693  VMA_ASSERT(!m_InsideString);
7694  VMA_ASSERT(m_Stack.empty());
7695 }
7696 
7697 void VmaJsonWriter::BeginObject(bool singleLine)
7698 {
7699  VMA_ASSERT(!m_InsideString);
7700 
7701  BeginValue(false);
7702  m_SB.Add('{');
7703 
7704  StackItem item;
7705  item.type = COLLECTION_TYPE_OBJECT;
7706  item.valueCount = 0;
7707  item.singleLineMode = singleLine;
7708  m_Stack.push_back(item);
7709 }
7710 
7711 void VmaJsonWriter::EndObject()
7712 {
7713  VMA_ASSERT(!m_InsideString);
7714 
7715  WriteIndent(true);
7716  m_SB.Add('}');
7717 
7718  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7719  m_Stack.pop_back();
7720 }
7721 
7722 void VmaJsonWriter::BeginArray(bool singleLine)
7723 {
7724  VMA_ASSERT(!m_InsideString);
7725 
7726  BeginValue(false);
7727  m_SB.Add('[');
7728 
7729  StackItem item;
7730  item.type = COLLECTION_TYPE_ARRAY;
7731  item.valueCount = 0;
7732  item.singleLineMode = singleLine;
7733  m_Stack.push_back(item);
7734 }
7735 
7736 void VmaJsonWriter::EndArray()
7737 {
7738  VMA_ASSERT(!m_InsideString);
7739 
7740  WriteIndent(true);
7741  m_SB.Add(']');
7742 
7743  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7744  m_Stack.pop_back();
7745 }
7746 
7747 void VmaJsonWriter::WriteString(const char* pStr)
7748 {
7749  BeginString(pStr);
7750  EndString();
7751 }
7752 
7753 void VmaJsonWriter::BeginString(const char* pStr)
7754 {
7755  VMA_ASSERT(!m_InsideString);
7756 
7757  BeginValue(true);
7758  m_SB.Add('"');
7759  m_InsideString = true;
7760  if(pStr != VMA_NULL && pStr[0] != '\0')
7761  {
7762  ContinueString(pStr);
7763  }
7764 }
7765 
7766 void VmaJsonWriter::ContinueString(const char* pStr)
7767 {
7768  VMA_ASSERT(m_InsideString);
7769 
7770  const size_t strLen = strlen(pStr);
7771  for(size_t i = 0; i < strLen; ++i)
7772  {
7773  char ch = pStr[i];
7774  if(ch == '\\')
7775  {
7776  m_SB.Add("\\\\");
7777  }
7778  else if(ch == '"')
7779  {
7780  m_SB.Add("\\\"");
7781  }
7782  else if(ch >= 32)
7783  {
7784  m_SB.Add(ch);
7785  }
7786  else switch(ch)
7787  {
7788  case '\b':
7789  m_SB.Add("\\b");
7790  break;
7791  case '\f':
7792  m_SB.Add("\\f");
7793  break;
7794  case '\n':
7795  m_SB.Add("\\n");
7796  break;
7797  case '\r':
7798  m_SB.Add("\\r");
7799  break;
7800  case '\t':
7801  m_SB.Add("\\t");
7802  break;
7803  default:
7804  VMA_ASSERT(0 && "Character not currently supported.");
7805  break;
7806  }
7807  }
7808 }
7809 
7810 void VmaJsonWriter::ContinueString(uint32_t n)
7811 {
7812  VMA_ASSERT(m_InsideString);
7813  m_SB.AddNumber(n);
7814 }
7815 
7816 void VmaJsonWriter::ContinueString(uint64_t n)
7817 {
7818  VMA_ASSERT(m_InsideString);
7819  m_SB.AddNumber(n);
7820 }
7821 
7822 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7823 {
7824  VMA_ASSERT(m_InsideString);
7825  m_SB.AddPointer(ptr);
7826 }
7827 
7828 void VmaJsonWriter::EndString(const char* pStr)
7829 {
7830  VMA_ASSERT(m_InsideString);
7831  if(pStr != VMA_NULL && pStr[0] != '\0')
7832  {
7833  ContinueString(pStr);
7834  }
7835  m_SB.Add('"');
7836  m_InsideString = false;
7837 }
7838 
7839 void VmaJsonWriter::WriteNumber(uint32_t n)
7840 {
7841  VMA_ASSERT(!m_InsideString);
7842  BeginValue(false);
7843  m_SB.AddNumber(n);
7844 }
7845 
7846 void VmaJsonWriter::WriteNumber(uint64_t n)
7847 {
7848  VMA_ASSERT(!m_InsideString);
7849  BeginValue(false);
7850  m_SB.AddNumber(n);
7851 }
7852 
7853 void VmaJsonWriter::WriteBool(bool b)
7854 {
7855  VMA_ASSERT(!m_InsideString);
7856  BeginValue(false);
7857  m_SB.Add(b ? "true" : "false");
7858 }
7859 
7860 void VmaJsonWriter::WriteNull()
7861 {
7862  VMA_ASSERT(!m_InsideString);
7863  BeginValue(false);
7864  m_SB.Add("null");
7865 }
7866 
7867 void VmaJsonWriter::BeginValue(bool isString)
7868 {
7869  if(!m_Stack.empty())
7870  {
7871  StackItem& currItem = m_Stack.back();
7872  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7873  currItem.valueCount % 2 == 0)
7874  {
7875  VMA_ASSERT(isString);
7876  }
7877 
7878  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7879  currItem.valueCount % 2 != 0)
7880  {
7881  m_SB.Add(": ");
7882  }
7883  else if(currItem.valueCount > 0)
7884  {
7885  m_SB.Add(", ");
7886  WriteIndent();
7887  }
7888  else
7889  {
7890  WriteIndent();
7891  }
7892  ++currItem.valueCount;
7893  }
7894 }
7895 
7896 void VmaJsonWriter::WriteIndent(bool oneLess)
7897 {
7898  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7899  {
7900  m_SB.AddNewLine();
7901 
7902  size_t count = m_Stack.size();
7903  if(count > 0 && oneLess)
7904  {
7905  --count;
7906  }
7907  for(size_t i = 0; i < count; ++i)
7908  {
7909  m_SB.Add(INDENT);
7910  }
7911  }
7912 }
7913 
7914 #endif // #if VMA_STATS_STRING_ENABLED
7915 
7917 
7918 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7919 {
7920  if(IsUserDataString())
7921  {
7922  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7923 
7924  FreeUserDataString(hAllocator);
7925 
7926  if(pUserData != VMA_NULL)
7927  {
7928  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
7929  }
7930  }
7931  else
7932  {
7933  m_pUserData = pUserData;
7934  }
7935 }
7936 
7937 void VmaAllocation_T::ChangeBlockAllocation(
7938  VmaAllocator hAllocator,
7939  VmaDeviceMemoryBlock* block,
7940  VkDeviceSize offset)
7941 {
7942  VMA_ASSERT(block != VMA_NULL);
7943  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7944 
7945  // Move mapping reference counter from old block to new block.
7946  if(block != m_BlockAllocation.m_Block)
7947  {
7948  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7949  if(IsPersistentMap())
7950  ++mapRefCount;
7951  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7952  block->Map(hAllocator, mapRefCount, VMA_NULL);
7953  }
7954 
7955  m_BlockAllocation.m_Block = block;
7956  m_BlockAllocation.m_Offset = offset;
7957 }
7958 
7959 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7960 {
7961  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7962  m_BlockAllocation.m_Offset = newOffset;
7963 }
7964 
7965 VkDeviceSize VmaAllocation_T::GetOffset() const
7966 {
7967  switch(m_Type)
7968  {
7969  case ALLOCATION_TYPE_BLOCK:
7970  return m_BlockAllocation.m_Offset;
7971  case ALLOCATION_TYPE_DEDICATED:
7972  return 0;
7973  default:
7974  VMA_ASSERT(0);
7975  return 0;
7976  }
7977 }
7978 
7979 VkDeviceMemory VmaAllocation_T::GetMemory() const
7980 {
7981  switch(m_Type)
7982  {
7983  case ALLOCATION_TYPE_BLOCK:
7984  return m_BlockAllocation.m_Block->GetDeviceMemory();
7985  case ALLOCATION_TYPE_DEDICATED:
7986  return m_DedicatedAllocation.m_hMemory;
7987  default:
7988  VMA_ASSERT(0);
7989  return VK_NULL_HANDLE;
7990  }
7991 }
7992 
7993 void* VmaAllocation_T::GetMappedData() const
7994 {
7995  switch(m_Type)
7996  {
7997  case ALLOCATION_TYPE_BLOCK:
7998  if(m_MapCount != 0)
7999  {
8000  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8001  VMA_ASSERT(pBlockData != VMA_NULL);
8002  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8003  }
8004  else
8005  {
8006  return VMA_NULL;
8007  }
8008  break;
8009  case ALLOCATION_TYPE_DEDICATED:
8010  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8011  return m_DedicatedAllocation.m_pMappedData;
8012  default:
8013  VMA_ASSERT(0);
8014  return VMA_NULL;
8015  }
8016 }
8017 
8018 bool VmaAllocation_T::CanBecomeLost() const
8019 {
8020  switch(m_Type)
8021  {
8022  case ALLOCATION_TYPE_BLOCK:
8023  return m_BlockAllocation.m_CanBecomeLost;
8024  case ALLOCATION_TYPE_DEDICATED:
8025  return false;
8026  default:
8027  VMA_ASSERT(0);
8028  return false;
8029  }
8030 }
8031 
8032 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8033 {
8034  VMA_ASSERT(CanBecomeLost());
8035 
8036  /*
8037  Warning: This is a carefully designed algorithm.
8038  Do not modify unless you really know what you're doing :)
8039  */
8040  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8041  for(;;)
8042  {
8043  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8044  {
8045  VMA_ASSERT(0);
8046  return false;
8047  }
8048  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8049  {
8050  return false;
8051  }
8052  else // Last use time earlier than current time.
8053  {
8054  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8055  {
8056  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8057  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8058  return true;
8059  }
8060  }
8061  }
8062 }
8063 
8064 #if VMA_STATS_STRING_ENABLED
8065 
8066 // Correspond to values of enum VmaSuballocationType.
8067 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8068  "FREE",
8069  "UNKNOWN",
8070  "BUFFER",
8071  "IMAGE_UNKNOWN",
8072  "IMAGE_LINEAR",
8073  "IMAGE_OPTIMAL",
8074 };
8075 
8076 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8077 {
8078  json.WriteString("Type");
8079  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8080 
8081  json.WriteString("Size");
8082  json.WriteNumber(m_Size);
8083 
8084  if(m_pUserData != VMA_NULL)
8085  {
8086  json.WriteString("UserData");
8087  if(IsUserDataString())
8088  {
8089  json.WriteString((const char*)m_pUserData);
8090  }
8091  else
8092  {
8093  json.BeginString();
8094  json.ContinueString_Pointer(m_pUserData);
8095  json.EndString();
8096  }
8097  }
8098 
8099  json.WriteString("CreationFrameIndex");
8100  json.WriteNumber(m_CreationFrameIndex);
8101 
8102  json.WriteString("LastUseFrameIndex");
8103  json.WriteNumber(GetLastUseFrameIndex());
8104 
8105  if(m_BufferImageUsage != 0)
8106  {
8107  json.WriteString("Usage");
8108  json.WriteNumber(m_BufferImageUsage);
8109  }
8110 }
8111 
8112 #endif
8113 
8114 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8115 {
8116  VMA_ASSERT(IsUserDataString());
8117  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8118  m_pUserData = VMA_NULL;
8119 }
8120 
8121 void VmaAllocation_T::BlockAllocMap()
8122 {
8123  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8124 
8125  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8126  {
8127  ++m_MapCount;
8128  }
8129  else
8130  {
8131  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8132  }
8133 }
8134 
8135 void VmaAllocation_T::BlockAllocUnmap()
8136 {
8137  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8138 
8139  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8140  {
8141  --m_MapCount;
8142  }
8143  else
8144  {
8145  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8146  }
8147 }
8148 
8149 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8150 {
8151  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8152 
8153  if(m_MapCount != 0)
8154  {
8155  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8156  {
8157  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8158  *ppData = m_DedicatedAllocation.m_pMappedData;
8159  ++m_MapCount;
8160  return VK_SUCCESS;
8161  }
8162  else
8163  {
8164  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8165  return VK_ERROR_MEMORY_MAP_FAILED;
8166  }
8167  }
8168  else
8169  {
8170  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8171  hAllocator->m_hDevice,
8172  m_DedicatedAllocation.m_hMemory,
8173  0, // offset
8174  VK_WHOLE_SIZE,
8175  0, // flags
8176  ppData);
8177  if(result == VK_SUCCESS)
8178  {
8179  m_DedicatedAllocation.m_pMappedData = *ppData;
8180  m_MapCount = 1;
8181  }
8182  return result;
8183  }
8184 }
8185 
8186 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8187 {
8188  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8189 
8190  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8191  {
8192  --m_MapCount;
8193  if(m_MapCount == 0)
8194  {
8195  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8196  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8197  hAllocator->m_hDevice,
8198  m_DedicatedAllocation.m_hMemory);
8199  }
8200  }
8201  else
8202  {
8203  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8204  }
8205 }
8206 
8207 #if VMA_STATS_STRING_ENABLED
8208 
8209 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8210 {
8211  json.BeginObject();
8212 
8213  json.WriteString("Blocks");
8214  json.WriteNumber(stat.blockCount);
8215 
8216  json.WriteString("Allocations");
8217  json.WriteNumber(stat.allocationCount);
8218 
8219  json.WriteString("UnusedRanges");
8220  json.WriteNumber(stat.unusedRangeCount);
8221 
8222  json.WriteString("UsedBytes");
8223  json.WriteNumber(stat.usedBytes);
8224 
8225  json.WriteString("UnusedBytes");
8226  json.WriteNumber(stat.unusedBytes);
8227 
8228  if(stat.allocationCount > 1)
8229  {
8230  json.WriteString("AllocationSize");
8231  json.BeginObject(true);
8232  json.WriteString("Min");
8233  json.WriteNumber(stat.allocationSizeMin);
8234  json.WriteString("Avg");
8235  json.WriteNumber(stat.allocationSizeAvg);
8236  json.WriteString("Max");
8237  json.WriteNumber(stat.allocationSizeMax);
8238  json.EndObject();
8239  }
8240 
8241  if(stat.unusedRangeCount > 1)
8242  {
8243  json.WriteString("UnusedRangeSize");
8244  json.BeginObject(true);
8245  json.WriteString("Min");
8246  json.WriteNumber(stat.unusedRangeSizeMin);
8247  json.WriteString("Avg");
8248  json.WriteNumber(stat.unusedRangeSizeAvg);
8249  json.WriteString("Max");
8250  json.WriteNumber(stat.unusedRangeSizeMax);
8251  json.EndObject();
8252  }
8253 
8254  json.EndObject();
8255 }
8256 
8257 #endif // #if VMA_STATS_STRING_ENABLED
8258 
8259 struct VmaSuballocationItemSizeLess
8260 {
8261  bool operator()(
8262  const VmaSuballocationList::iterator lhs,
8263  const VmaSuballocationList::iterator rhs) const
8264  {
8265  return lhs->size < rhs->size;
8266  }
8267  bool operator()(
8268  const VmaSuballocationList::iterator lhs,
8269  VkDeviceSize rhsSize) const
8270  {
8271  return lhs->size < rhsSize;
8272  }
8273 };
8274 
8275 
8277 // class VmaBlockMetadata
8278 
8279 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8280  m_Size(0),
8281  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8282 {
8283 }
8284 
8285 #if VMA_STATS_STRING_ENABLED
8286 
8287 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8288  VkDeviceSize unusedBytes,
8289  size_t allocationCount,
8290  size_t unusedRangeCount) const
8291 {
8292  json.BeginObject();
8293 
8294  json.WriteString("TotalBytes");
8295  json.WriteNumber(GetSize());
8296 
8297  json.WriteString("UnusedBytes");
8298  json.WriteNumber(unusedBytes);
8299 
8300  json.WriteString("Allocations");
8301  json.WriteNumber((uint64_t)allocationCount);
8302 
8303  json.WriteString("UnusedRanges");
8304  json.WriteNumber((uint64_t)unusedRangeCount);
8305 
8306  json.WriteString("Suballocations");
8307  json.BeginArray();
8308 }
8309 
8310 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8311  VkDeviceSize offset,
8312  VmaAllocation hAllocation) const
8313 {
8314  json.BeginObject(true);
8315 
8316  json.WriteString("Offset");
8317  json.WriteNumber(offset);
8318 
8319  hAllocation->PrintParameters(json);
8320 
8321  json.EndObject();
8322 }
8323 
8324 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8325  VkDeviceSize offset,
8326  VkDeviceSize size) const
8327 {
8328  json.BeginObject(true);
8329 
8330  json.WriteString("Offset");
8331  json.WriteNumber(offset);
8332 
8333  json.WriteString("Type");
8334  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8335 
8336  json.WriteString("Size");
8337  json.WriteNumber(size);
8338 
8339  json.EndObject();
8340 }
8341 
8342 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8343 {
8344  json.EndArray();
8345  json.EndObject();
8346 }
8347 
8348 #endif // #if VMA_STATS_STRING_ENABLED
8349 
8351 // class VmaBlockMetadata_Generic
8352 
8353 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8354  VmaBlockMetadata(hAllocator),
8355  m_FreeCount(0),
8356  m_SumFreeSize(0),
8357  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8358  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8359 {
8360 }
8361 
8362 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8363 {
8364 }
8365 
8366 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8367 {
8368  VmaBlockMetadata::Init(size);
8369 
8370  m_FreeCount = 1;
8371  m_SumFreeSize = size;
8372 
8373  VmaSuballocation suballoc = {};
8374  suballoc.offset = 0;
8375  suballoc.size = size;
8376  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8377  suballoc.hAllocation = VK_NULL_HANDLE;
8378 
8379  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8380  m_Suballocations.push_back(suballoc);
8381  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8382  --suballocItem;
8383  m_FreeSuballocationsBySize.push_back(suballocItem);
8384 }
8385 
8386 bool VmaBlockMetadata_Generic::Validate() const
8387 {
8388  VMA_VALIDATE(!m_Suballocations.empty());
8389 
8390  // Expected offset of new suballocation as calculated from previous ones.
8391  VkDeviceSize calculatedOffset = 0;
8392  // Expected number of free suballocations as calculated from traversing their list.
8393  uint32_t calculatedFreeCount = 0;
8394  // Expected sum size of free suballocations as calculated from traversing their list.
8395  VkDeviceSize calculatedSumFreeSize = 0;
8396  // Expected number of free suballocations that should be registered in
8397  // m_FreeSuballocationsBySize calculated from traversing their list.
8398  size_t freeSuballocationsToRegister = 0;
8399  // True if previous visited suballocation was free.
8400  bool prevFree = false;
8401 
8402  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8403  suballocItem != m_Suballocations.cend();
8404  ++suballocItem)
8405  {
8406  const VmaSuballocation& subAlloc = *suballocItem;
8407 
8408  // Actual offset of this suballocation doesn't match expected one.
8409  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8410 
8411  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8412  // Two adjacent free suballocations are invalid. They should be merged.
8413  VMA_VALIDATE(!prevFree || !currFree);
8414 
8415  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8416 
8417  if(currFree)
8418  {
8419  calculatedSumFreeSize += subAlloc.size;
8420  ++calculatedFreeCount;
8421  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8422  {
8423  ++freeSuballocationsToRegister;
8424  }
8425 
8426  // Margin required between allocations - every free space must be at least that large.
8427  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8428  }
8429  else
8430  {
8431  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8432  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8433 
8434  // Margin required between allocations - previous allocation must be free.
8435  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8436  }
8437 
8438  calculatedOffset += subAlloc.size;
8439  prevFree = currFree;
8440  }
8441 
8442  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8443  // match expected one.
8444  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8445 
8446  VkDeviceSize lastSize = 0;
8447  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8448  {
8449  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8450 
8451  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8452  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8453  // They must be sorted by size ascending.
8454  VMA_VALIDATE(suballocItem->size >= lastSize);
8455 
8456  lastSize = suballocItem->size;
8457  }
8458 
8459  // Check if totals match calculacted values.
8460  VMA_VALIDATE(ValidateFreeSuballocationList());
8461  VMA_VALIDATE(calculatedOffset == GetSize());
8462  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8463  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8464 
8465  return true;
8466 }
8467 
8468 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8469 {
8470  if(!m_FreeSuballocationsBySize.empty())
8471  {
8472  return m_FreeSuballocationsBySize.back()->size;
8473  }
8474  else
8475  {
8476  return 0;
8477  }
8478 }
8479 
8480 bool VmaBlockMetadata_Generic::IsEmpty() const
8481 {
8482  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8483 }
8484 
8485 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8486 {
8487  outInfo.blockCount = 1;
8488 
8489  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8490  outInfo.allocationCount = rangeCount - m_FreeCount;
8491  outInfo.unusedRangeCount = m_FreeCount;
8492 
8493  outInfo.unusedBytes = m_SumFreeSize;
8494  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8495 
8496  outInfo.allocationSizeMin = UINT64_MAX;
8497  outInfo.allocationSizeMax = 0;
8498  outInfo.unusedRangeSizeMin = UINT64_MAX;
8499  outInfo.unusedRangeSizeMax = 0;
8500 
8501  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8502  suballocItem != m_Suballocations.cend();
8503  ++suballocItem)
8504  {
8505  const VmaSuballocation& suballoc = *suballocItem;
8506  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8507  {
8508  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8509  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8510  }
8511  else
8512  {
8513  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8514  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8515  }
8516  }
8517 }
8518 
8519 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8520 {
8521  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8522 
8523  inoutStats.size += GetSize();
8524  inoutStats.unusedSize += m_SumFreeSize;
8525  inoutStats.allocationCount += rangeCount - m_FreeCount;
8526  inoutStats.unusedRangeCount += m_FreeCount;
8527  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8528 }
8529 
8530 #if VMA_STATS_STRING_ENABLED
8531 
8532 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8533 {
8534  PrintDetailedMap_Begin(json,
8535  m_SumFreeSize, // unusedBytes
8536  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8537  m_FreeCount); // unusedRangeCount
8538 
8539  size_t i = 0;
8540  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8541  suballocItem != m_Suballocations.cend();
8542  ++suballocItem, ++i)
8543  {
8544  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8545  {
8546  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8547  }
8548  else
8549  {
8550  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8551  }
8552  }
8553 
8554  PrintDetailedMap_End(json);
8555 }
8556 
8557 #endif // #if VMA_STATS_STRING_ENABLED
8558 
8559 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8560  uint32_t currentFrameIndex,
8561  uint32_t frameInUseCount,
8562  VkDeviceSize bufferImageGranularity,
8563  VkDeviceSize allocSize,
8564  VkDeviceSize allocAlignment,
8565  bool upperAddress,
8566  VmaSuballocationType allocType,
8567  bool canMakeOtherLost,
8568  uint32_t strategy,
8569  VmaAllocationRequest* pAllocationRequest)
8570 {
8571  VMA_ASSERT(allocSize > 0);
8572  VMA_ASSERT(!upperAddress);
8573  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8574  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8575  VMA_HEAVY_ASSERT(Validate());
8576 
8577  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8578 
8579  // There is not enough total free space in this block to fullfill the request: Early return.
8580  if(canMakeOtherLost == false &&
8581  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8582  {
8583  return false;
8584  }
8585 
8586  // New algorithm, efficiently searching freeSuballocationsBySize.
8587  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8588  if(freeSuballocCount > 0)
8589  {
8591  {
8592  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8593  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8594  m_FreeSuballocationsBySize.data(),
8595  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8596  allocSize + 2 * VMA_DEBUG_MARGIN,
8597  VmaSuballocationItemSizeLess());
8598  size_t index = it - m_FreeSuballocationsBySize.data();
8599  for(; index < freeSuballocCount; ++index)
8600  {
8601  if(CheckAllocation(
8602  currentFrameIndex,
8603  frameInUseCount,
8604  bufferImageGranularity,
8605  allocSize,
8606  allocAlignment,
8607  allocType,
8608  m_FreeSuballocationsBySize[index],
8609  false, // canMakeOtherLost
8610  &pAllocationRequest->offset,
8611  &pAllocationRequest->itemsToMakeLostCount,
8612  &pAllocationRequest->sumFreeSize,
8613  &pAllocationRequest->sumItemSize))
8614  {
8615  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8616  return true;
8617  }
8618  }
8619  }
8620  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8621  {
8622  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8623  it != m_Suballocations.end();
8624  ++it)
8625  {
8626  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8627  currentFrameIndex,
8628  frameInUseCount,
8629  bufferImageGranularity,
8630  allocSize,
8631  allocAlignment,
8632  allocType,
8633  it,
8634  false, // canMakeOtherLost
8635  &pAllocationRequest->offset,
8636  &pAllocationRequest->itemsToMakeLostCount,
8637  &pAllocationRequest->sumFreeSize,
8638  &pAllocationRequest->sumItemSize))
8639  {
8640  pAllocationRequest->item = it;
8641  return true;
8642  }
8643  }
8644  }
8645  else // WORST_FIT, FIRST_FIT
8646  {
8647  // Search staring from biggest suballocations.
8648  for(size_t index = freeSuballocCount; index--; )
8649  {
8650  if(CheckAllocation(
8651  currentFrameIndex,
8652  frameInUseCount,
8653  bufferImageGranularity,
8654  allocSize,
8655  allocAlignment,
8656  allocType,
8657  m_FreeSuballocationsBySize[index],
8658  false, // canMakeOtherLost
8659  &pAllocationRequest->offset,
8660  &pAllocationRequest->itemsToMakeLostCount,
8661  &pAllocationRequest->sumFreeSize,
8662  &pAllocationRequest->sumItemSize))
8663  {
8664  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8665  return true;
8666  }
8667  }
8668  }
8669  }
8670 
8671  if(canMakeOtherLost)
8672  {
8673  // Brute-force algorithm. TODO: Come up with something better.
8674 
8675  bool found = false;
8676  VmaAllocationRequest tmpAllocRequest = {};
8677  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8678  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8679  suballocIt != m_Suballocations.end();
8680  ++suballocIt)
8681  {
8682  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8683  suballocIt->hAllocation->CanBecomeLost())
8684  {
8685  if(CheckAllocation(
8686  currentFrameIndex,
8687  frameInUseCount,
8688  bufferImageGranularity,
8689  allocSize,
8690  allocAlignment,
8691  allocType,
8692  suballocIt,
8693  canMakeOtherLost,
8694  &tmpAllocRequest.offset,
8695  &tmpAllocRequest.itemsToMakeLostCount,
8696  &tmpAllocRequest.sumFreeSize,
8697  &tmpAllocRequest.sumItemSize))
8698  {
8700  {
8701  *pAllocationRequest = tmpAllocRequest;
8702  pAllocationRequest->item = suballocIt;
8703  break;
8704  }
8705  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8706  {
8707  *pAllocationRequest = tmpAllocRequest;
8708  pAllocationRequest->item = suballocIt;
8709  found = true;
8710  }
8711  }
8712  }
8713  }
8714 
8715  return found;
8716  }
8717 
8718  return false;
8719 }
8720 
8721 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8722  uint32_t currentFrameIndex,
8723  uint32_t frameInUseCount,
8724  VmaAllocationRequest* pAllocationRequest)
8725 {
8726  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8727 
8728  while(pAllocationRequest->itemsToMakeLostCount > 0)
8729  {
8730  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8731  {
8732  ++pAllocationRequest->item;
8733  }
8734  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8735  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8736  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8737  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8738  {
8739  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8740  --pAllocationRequest->itemsToMakeLostCount;
8741  }
8742  else
8743  {
8744  return false;
8745  }
8746  }
8747 
8748  VMA_HEAVY_ASSERT(Validate());
8749  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8750  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8751 
8752  return true;
8753 }
8754 
8755 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8756 {
8757  uint32_t lostAllocationCount = 0;
8758  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8759  it != m_Suballocations.end();
8760  ++it)
8761  {
8762  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8763  it->hAllocation->CanBecomeLost() &&
8764  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8765  {
8766  it = FreeSuballocation(it);
8767  ++lostAllocationCount;
8768  }
8769  }
8770  return lostAllocationCount;
8771 }
8772 
8773 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8774 {
8775  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8776  it != m_Suballocations.end();
8777  ++it)
8778  {
8779  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8780  {
8781  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8782  {
8783  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8784  return VK_ERROR_VALIDATION_FAILED_EXT;
8785  }
8786  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8787  {
8788  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8789  return VK_ERROR_VALIDATION_FAILED_EXT;
8790  }
8791  }
8792  }
8793 
8794  return VK_SUCCESS;
8795 }
8796 
8797 void VmaBlockMetadata_Generic::Alloc(
8798  const VmaAllocationRequest& request,
8799  VmaSuballocationType type,
8800  VkDeviceSize allocSize,
8801  VmaAllocation hAllocation)
8802 {
8803  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8804  VMA_ASSERT(request.item != m_Suballocations.end());
8805  VmaSuballocation& suballoc = *request.item;
8806  // Given suballocation is a free block.
8807  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8808  // Given offset is inside this suballocation.
8809  VMA_ASSERT(request.offset >= suballoc.offset);
8810  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8811  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8812  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8813 
8814  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8815  // it to become used.
8816  UnregisterFreeSuballocation(request.item);
8817 
8818  suballoc.offset = request.offset;
8819  suballoc.size = allocSize;
8820  suballoc.type = type;
8821  suballoc.hAllocation = hAllocation;
8822 
8823  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8824  if(paddingEnd)
8825  {
8826  VmaSuballocation paddingSuballoc = {};
8827  paddingSuballoc.offset = request.offset + allocSize;
8828  paddingSuballoc.size = paddingEnd;
8829  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8830  VmaSuballocationList::iterator next = request.item;
8831  ++next;
8832  const VmaSuballocationList::iterator paddingEndItem =
8833  m_Suballocations.insert(next, paddingSuballoc);
8834  RegisterFreeSuballocation(paddingEndItem);
8835  }
8836 
8837  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8838  if(paddingBegin)
8839  {
8840  VmaSuballocation paddingSuballoc = {};
8841  paddingSuballoc.offset = request.offset - paddingBegin;
8842  paddingSuballoc.size = paddingBegin;
8843  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8844  const VmaSuballocationList::iterator paddingBeginItem =
8845  m_Suballocations.insert(request.item, paddingSuballoc);
8846  RegisterFreeSuballocation(paddingBeginItem);
8847  }
8848 
8849  // Update totals.
8850  m_FreeCount = m_FreeCount - 1;
8851  if(paddingBegin > 0)
8852  {
8853  ++m_FreeCount;
8854  }
8855  if(paddingEnd > 0)
8856  {
8857  ++m_FreeCount;
8858  }
8859  m_SumFreeSize -= allocSize;
8860 }
8861 
8862 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8863 {
8864  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8865  suballocItem != m_Suballocations.end();
8866  ++suballocItem)
8867  {
8868  VmaSuballocation& suballoc = *suballocItem;
8869  if(suballoc.hAllocation == allocation)
8870  {
8871  FreeSuballocation(suballocItem);
8872  VMA_HEAVY_ASSERT(Validate());
8873  return;
8874  }
8875  }
8876  VMA_ASSERT(0 && "Not found!");
8877 }
8878 
8879 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8880 {
8881  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8882  suballocItem != m_Suballocations.end();
8883  ++suballocItem)
8884  {
8885  VmaSuballocation& suballoc = *suballocItem;
8886  if(suballoc.offset == offset)
8887  {
8888  FreeSuballocation(suballocItem);
8889  return;
8890  }
8891  }
8892  VMA_ASSERT(0 && "Not found!");
8893 }
8894 
8895 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8896 {
8897  VkDeviceSize lastSize = 0;
8898  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8899  {
8900  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8901 
8902  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8903  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8904  VMA_VALIDATE(it->size >= lastSize);
8905  lastSize = it->size;
8906  }
8907  return true;
8908 }
8909 
8910 bool VmaBlockMetadata_Generic::CheckAllocation(
8911  uint32_t currentFrameIndex,
8912  uint32_t frameInUseCount,
8913  VkDeviceSize bufferImageGranularity,
8914  VkDeviceSize allocSize,
8915  VkDeviceSize allocAlignment,
8916  VmaSuballocationType allocType,
8917  VmaSuballocationList::const_iterator suballocItem,
8918  bool canMakeOtherLost,
8919  VkDeviceSize* pOffset,
8920  size_t* itemsToMakeLostCount,
8921  VkDeviceSize* pSumFreeSize,
8922  VkDeviceSize* pSumItemSize) const
8923 {
8924  VMA_ASSERT(allocSize > 0);
8925  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8926  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8927  VMA_ASSERT(pOffset != VMA_NULL);
8928 
8929  *itemsToMakeLostCount = 0;
8930  *pSumFreeSize = 0;
8931  *pSumItemSize = 0;
8932 
8933  if(canMakeOtherLost)
8934  {
8935  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8936  {
8937  *pSumFreeSize = suballocItem->size;
8938  }
8939  else
8940  {
8941  if(suballocItem->hAllocation->CanBecomeLost() &&
8942  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8943  {
8944  ++*itemsToMakeLostCount;
8945  *pSumItemSize = suballocItem->size;
8946  }
8947  else
8948  {
8949  return false;
8950  }
8951  }
8952 
8953  // Remaining size is too small for this request: Early return.
8954  if(GetSize() - suballocItem->offset < allocSize)
8955  {
8956  return false;
8957  }
8958 
8959  // Start from offset equal to beginning of this suballocation.
8960  *pOffset = suballocItem->offset;
8961 
8962  // Apply VMA_DEBUG_MARGIN at the beginning.
8963  if(VMA_DEBUG_MARGIN > 0)
8964  {
8965  *pOffset += VMA_DEBUG_MARGIN;
8966  }
8967 
8968  // Apply alignment.
8969  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8970 
8971  // Check previous suballocations for BufferImageGranularity conflicts.
8972  // Make bigger alignment if necessary.
8973  if(bufferImageGranularity > 1)
8974  {
8975  bool bufferImageGranularityConflict = false;
8976  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8977  while(prevSuballocItem != m_Suballocations.cbegin())
8978  {
8979  --prevSuballocItem;
8980  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8981  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8982  {
8983  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8984  {
8985  bufferImageGranularityConflict = true;
8986  break;
8987  }
8988  }
8989  else
8990  // Already on previous page.
8991  break;
8992  }
8993  if(bufferImageGranularityConflict)
8994  {
8995  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8996  }
8997  }
8998 
8999  // Now that we have final *pOffset, check if we are past suballocItem.
9000  // If yes, return false - this function should be called for another suballocItem as starting point.
9001  if(*pOffset >= suballocItem->offset + suballocItem->size)
9002  {
9003  return false;
9004  }
9005 
9006  // Calculate padding at the beginning based on current offset.
9007  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9008 
9009  // Calculate required margin at the end.
9010  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9011 
9012  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9013  // Another early return check.
9014  if(suballocItem->offset + totalSize > GetSize())
9015  {
9016  return false;
9017  }
9018 
9019  // Advance lastSuballocItem until desired size is reached.
9020  // Update itemsToMakeLostCount.
9021  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9022  if(totalSize > suballocItem->size)
9023  {
9024  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9025  while(remainingSize > 0)
9026  {
9027  ++lastSuballocItem;
9028  if(lastSuballocItem == m_Suballocations.cend())
9029  {
9030  return false;
9031  }
9032  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9033  {
9034  *pSumFreeSize += lastSuballocItem->size;
9035  }
9036  else
9037  {
9038  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9039  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9040  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9041  {
9042  ++*itemsToMakeLostCount;
9043  *pSumItemSize += lastSuballocItem->size;
9044  }
9045  else
9046  {
9047  return false;
9048  }
9049  }
9050  remainingSize = (lastSuballocItem->size < remainingSize) ?
9051  remainingSize - lastSuballocItem->size : 0;
9052  }
9053  }
9054 
9055  // Check next suballocations for BufferImageGranularity conflicts.
9056  // If conflict exists, we must mark more allocations lost or fail.
9057  if(bufferImageGranularity > 1)
9058  {
9059  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9060  ++nextSuballocItem;
9061  while(nextSuballocItem != m_Suballocations.cend())
9062  {
9063  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9064  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9065  {
9066  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9067  {
9068  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9069  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9070  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9071  {
9072  ++*itemsToMakeLostCount;
9073  }
9074  else
9075  {
9076  return false;
9077  }
9078  }
9079  }
9080  else
9081  {
9082  // Already on next page.
9083  break;
9084  }
9085  ++nextSuballocItem;
9086  }
9087  }
9088  }
9089  else
9090  {
9091  const VmaSuballocation& suballoc = *suballocItem;
9092  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9093 
9094  *pSumFreeSize = suballoc.size;
9095 
9096  // Size of this suballocation is too small for this request: Early return.
9097  if(suballoc.size < allocSize)
9098  {
9099  return false;
9100  }
9101 
9102  // Start from offset equal to beginning of this suballocation.
9103  *pOffset = suballoc.offset;
9104 
9105  // Apply VMA_DEBUG_MARGIN at the beginning.
9106  if(VMA_DEBUG_MARGIN > 0)
9107  {
9108  *pOffset += VMA_DEBUG_MARGIN;
9109  }
9110 
9111  // Apply alignment.
9112  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9113 
9114  // Check previous suballocations for BufferImageGranularity conflicts.
9115  // Make bigger alignment if necessary.
9116  if(bufferImageGranularity > 1)
9117  {
9118  bool bufferImageGranularityConflict = false;
9119  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9120  while(prevSuballocItem != m_Suballocations.cbegin())
9121  {
9122  --prevSuballocItem;
9123  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9124  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9125  {
9126  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9127  {
9128  bufferImageGranularityConflict = true;
9129  break;
9130  }
9131  }
9132  else
9133  // Already on previous page.
9134  break;
9135  }
9136  if(bufferImageGranularityConflict)
9137  {
9138  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9139  }
9140  }
9141 
9142  // Calculate padding at the beginning based on current offset.
9143  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9144 
9145  // Calculate required margin at the end.
9146  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9147 
9148  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9149  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9150  {
9151  return false;
9152  }
9153 
9154  // Check next suballocations for BufferImageGranularity conflicts.
9155  // If conflict exists, allocation cannot be made here.
9156  if(bufferImageGranularity > 1)
9157  {
9158  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9159  ++nextSuballocItem;
9160  while(nextSuballocItem != m_Suballocations.cend())
9161  {
9162  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9163  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9164  {
9165  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9166  {
9167  return false;
9168  }
9169  }
9170  else
9171  {
9172  // Already on next page.
9173  break;
9174  }
9175  ++nextSuballocItem;
9176  }
9177  }
9178  }
9179 
9180  // All tests passed: Success. pOffset is already filled.
9181  return true;
9182 }
9183 
9184 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9185 {
9186  VMA_ASSERT(item != m_Suballocations.end());
9187  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9188 
9189  VmaSuballocationList::iterator nextItem = item;
9190  ++nextItem;
9191  VMA_ASSERT(nextItem != m_Suballocations.end());
9192  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9193 
9194  item->size += nextItem->size;
9195  --m_FreeCount;
9196  m_Suballocations.erase(nextItem);
9197 }
9198 
9199 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9200 {
9201  // Change this suballocation to be marked as free.
9202  VmaSuballocation& suballoc = *suballocItem;
9203  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9204  suballoc.hAllocation = VK_NULL_HANDLE;
9205 
9206  // Update totals.
9207  ++m_FreeCount;
9208  m_SumFreeSize += suballoc.size;
9209 
9210  // Merge with previous and/or next suballocation if it's also free.
9211  bool mergeWithNext = false;
9212  bool mergeWithPrev = false;
9213 
9214  VmaSuballocationList::iterator nextItem = suballocItem;
9215  ++nextItem;
9216  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9217  {
9218  mergeWithNext = true;
9219  }
9220 
9221  VmaSuballocationList::iterator prevItem = suballocItem;
9222  if(suballocItem != m_Suballocations.begin())
9223  {
9224  --prevItem;
9225  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9226  {
9227  mergeWithPrev = true;
9228  }
9229  }
9230 
9231  if(mergeWithNext)
9232  {
9233  UnregisterFreeSuballocation(nextItem);
9234  MergeFreeWithNext(suballocItem);
9235  }
9236 
9237  if(mergeWithPrev)
9238  {
9239  UnregisterFreeSuballocation(prevItem);
9240  MergeFreeWithNext(prevItem);
9241  RegisterFreeSuballocation(prevItem);
9242  return prevItem;
9243  }
9244  else
9245  {
9246  RegisterFreeSuballocation(suballocItem);
9247  return suballocItem;
9248  }
9249 }
9250 
9251 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9252 {
9253  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9254  VMA_ASSERT(item->size > 0);
9255 
9256  // You may want to enable this validation at the beginning or at the end of
9257  // this function, depending on what do you want to check.
9258  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9259 
9260  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9261  {
9262  if(m_FreeSuballocationsBySize.empty())
9263  {
9264  m_FreeSuballocationsBySize.push_back(item);
9265  }
9266  else
9267  {
9268  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9269  }
9270  }
9271 
9272  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9273 }
9274 
9275 
9276 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9277 {
9278  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9279  VMA_ASSERT(item->size > 0);
9280 
9281  // You may want to enable this validation at the beginning or at the end of
9282  // this function, depending on what do you want to check.
9283  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9284 
9285  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9286  {
9287  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9288  m_FreeSuballocationsBySize.data(),
9289  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9290  item,
9291  VmaSuballocationItemSizeLess());
9292  for(size_t index = it - m_FreeSuballocationsBySize.data();
9293  index < m_FreeSuballocationsBySize.size();
9294  ++index)
9295  {
9296  if(m_FreeSuballocationsBySize[index] == item)
9297  {
9298  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9299  return;
9300  }
9301  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9302  }
9303  VMA_ASSERT(0 && "Not found.");
9304  }
9305 
9306  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9307 }
9308 
9309 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9310  VkDeviceSize bufferImageGranularity,
9311  VmaSuballocationType& inOutPrevSuballocType) const
9312 {
9313  if(bufferImageGranularity == 1 || IsEmpty())
9314  {
9315  return false;
9316  }
9317 
9318  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9319  bool typeConflictFound = false;
9320  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9321  it != m_Suballocations.cend();
9322  ++it)
9323  {
9324  const VmaSuballocationType suballocType = it->type;
9325  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9326  {
9327  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9328  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9329  {
9330  typeConflictFound = true;
9331  }
9332  inOutPrevSuballocType = suballocType;
9333  }
9334  }
9335 
9336  return typeConflictFound || minAlignment >= bufferImageGranularity;
9337 }
9338 
9340 // class VmaBlockMetadata_Linear
9341 
9342 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9343  VmaBlockMetadata(hAllocator),
9344  m_SumFreeSize(0),
9345  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9346  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9347  m_1stVectorIndex(0),
9348  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9349  m_1stNullItemsBeginCount(0),
9350  m_1stNullItemsMiddleCount(0),
9351  m_2ndNullItemsCount(0)
9352 {
9353 }
9354 
9355 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9356 {
9357 }
9358 
9359 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9360 {
9361  VmaBlockMetadata::Init(size);
9362  m_SumFreeSize = size;
9363 }
9364 
9365 bool VmaBlockMetadata_Linear::Validate() const
9366 {
9367  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9368  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9369 
9370  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9371  VMA_VALIDATE(!suballocations1st.empty() ||
9372  suballocations2nd.empty() ||
9373  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9374 
9375  if(!suballocations1st.empty())
9376  {
9377  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9378  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9379  // Null item at the end should be just pop_back().
9380  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9381  }
9382  if(!suballocations2nd.empty())
9383  {
9384  // Null item at the end should be just pop_back().
9385  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9386  }
9387 
9388  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9389  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9390 
9391  VkDeviceSize sumUsedSize = 0;
9392  const size_t suballoc1stCount = suballocations1st.size();
9393  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9394 
9395  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9396  {
9397  const size_t suballoc2ndCount = suballocations2nd.size();
9398  size_t nullItem2ndCount = 0;
9399  for(size_t i = 0; i < suballoc2ndCount; ++i)
9400  {
9401  const VmaSuballocation& suballoc = suballocations2nd[i];
9402  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9403 
9404  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9405  VMA_VALIDATE(suballoc.offset >= offset);
9406 
9407  if(!currFree)
9408  {
9409  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9410  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9411  sumUsedSize += suballoc.size;
9412  }
9413  else
9414  {
9415  ++nullItem2ndCount;
9416  }
9417 
9418  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9419  }
9420 
9421  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9422  }
9423 
9424  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9425  {
9426  const VmaSuballocation& suballoc = suballocations1st[i];
9427  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9428  suballoc.hAllocation == VK_NULL_HANDLE);
9429  }
9430 
9431  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9432 
9433  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9434  {
9435  const VmaSuballocation& suballoc = suballocations1st[i];
9436  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9437 
9438  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9439  VMA_VALIDATE(suballoc.offset >= offset);
9440  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9441 
9442  if(!currFree)
9443  {
9444  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9445  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9446  sumUsedSize += suballoc.size;
9447  }
9448  else
9449  {
9450  ++nullItem1stCount;
9451  }
9452 
9453  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9454  }
9455  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9456 
9457  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9458  {
9459  const size_t suballoc2ndCount = suballocations2nd.size();
9460  size_t nullItem2ndCount = 0;
9461  for(size_t i = suballoc2ndCount; i--; )
9462  {
9463  const VmaSuballocation& suballoc = suballocations2nd[i];
9464  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9465 
9466  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9467  VMA_VALIDATE(suballoc.offset >= offset);
9468 
9469  if(!currFree)
9470  {
9471  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9472  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9473  sumUsedSize += suballoc.size;
9474  }
9475  else
9476  {
9477  ++nullItem2ndCount;
9478  }
9479 
9480  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9481  }
9482 
9483  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9484  }
9485 
9486  VMA_VALIDATE(offset <= GetSize());
9487  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9488 
9489  return true;
9490 }
9491 
9492 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9493 {
9494  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9495  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9496 }
9497 
9498 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9499 {
9500  const VkDeviceSize size = GetSize();
9501 
9502  /*
9503  We don't consider gaps inside allocation vectors with freed allocations because
9504  they are not suitable for reuse in linear allocator. We consider only space that
9505  is available for new allocations.
9506  */
9507  if(IsEmpty())
9508  {
9509  return size;
9510  }
9511 
9512  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9513 
9514  switch(m_2ndVectorMode)
9515  {
9516  case SECOND_VECTOR_EMPTY:
9517  /*
9518  Available space is after end of 1st, as well as before beginning of 1st (which
9519  whould make it a ring buffer).
9520  */
9521  {
9522  const size_t suballocations1stCount = suballocations1st.size();
9523  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9524  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9525  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9526  return VMA_MAX(
9527  firstSuballoc.offset,
9528  size - (lastSuballoc.offset + lastSuballoc.size));
9529  }
9530  break;
9531 
9532  case SECOND_VECTOR_RING_BUFFER:
9533  /*
9534  Available space is only between end of 2nd and beginning of 1st.
9535  */
9536  {
9537  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9538  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9539  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9540  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9541  }
9542  break;
9543 
9544  case SECOND_VECTOR_DOUBLE_STACK:
9545  /*
9546  Available space is only between end of 1st and top of 2nd.
9547  */
9548  {
9549  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9550  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9551  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9552  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9553  }
9554  break;
9555 
9556  default:
9557  VMA_ASSERT(0);
9558  return 0;
9559  }
9560 }
9561 
9562 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9563 {
9564  const VkDeviceSize size = GetSize();
9565  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9566  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9567  const size_t suballoc1stCount = suballocations1st.size();
9568  const size_t suballoc2ndCount = suballocations2nd.size();
9569 
9570  outInfo.blockCount = 1;
9571  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9572  outInfo.unusedRangeCount = 0;
9573  outInfo.usedBytes = 0;
9574  outInfo.allocationSizeMin = UINT64_MAX;
9575  outInfo.allocationSizeMax = 0;
9576  outInfo.unusedRangeSizeMin = UINT64_MAX;
9577  outInfo.unusedRangeSizeMax = 0;
9578 
9579  VkDeviceSize lastOffset = 0;
9580 
9581  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9582  {
9583  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9584  size_t nextAlloc2ndIndex = 0;
9585  while(lastOffset < freeSpace2ndTo1stEnd)
9586  {
9587  // Find next non-null allocation or move nextAllocIndex to the end.
9588  while(nextAlloc2ndIndex < suballoc2ndCount &&
9589  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9590  {
9591  ++nextAlloc2ndIndex;
9592  }
9593 
9594  // Found non-null allocation.
9595  if(nextAlloc2ndIndex < suballoc2ndCount)
9596  {
9597  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9598 
9599  // 1. Process free space before this allocation.
9600  if(lastOffset < suballoc.offset)
9601  {
9602  // There is free space from lastOffset to suballoc.offset.
9603  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9604  ++outInfo.unusedRangeCount;
9605  outInfo.unusedBytes += unusedRangeSize;
9606  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9607  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9608  }
9609 
9610  // 2. Process this allocation.
9611  // There is allocation with suballoc.offset, suballoc.size.
9612  outInfo.usedBytes += suballoc.size;
9613  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9614  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9615 
9616  // 3. Prepare for next iteration.
9617  lastOffset = suballoc.offset + suballoc.size;
9618  ++nextAlloc2ndIndex;
9619  }
9620  // We are at the end.
9621  else
9622  {
9623  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9624  if(lastOffset < freeSpace2ndTo1stEnd)
9625  {
9626  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9627  ++outInfo.unusedRangeCount;
9628  outInfo.unusedBytes += unusedRangeSize;
9629  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9630  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9631  }
9632 
9633  // End of loop.
9634  lastOffset = freeSpace2ndTo1stEnd;
9635  }
9636  }
9637  }
9638 
9639  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9640  const VkDeviceSize freeSpace1stTo2ndEnd =
9641  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9642  while(lastOffset < freeSpace1stTo2ndEnd)
9643  {
9644  // Find next non-null allocation or move nextAllocIndex to the end.
9645  while(nextAlloc1stIndex < suballoc1stCount &&
9646  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9647  {
9648  ++nextAlloc1stIndex;
9649  }
9650 
9651  // Found non-null allocation.
9652  if(nextAlloc1stIndex < suballoc1stCount)
9653  {
9654  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9655 
9656  // 1. Process free space before this allocation.
9657  if(lastOffset < suballoc.offset)
9658  {
9659  // There is free space from lastOffset to suballoc.offset.
9660  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9661  ++outInfo.unusedRangeCount;
9662  outInfo.unusedBytes += unusedRangeSize;
9663  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9664  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9665  }
9666 
9667  // 2. Process this allocation.
9668  // There is allocation with suballoc.offset, suballoc.size.
9669  outInfo.usedBytes += suballoc.size;
9670  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9671  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9672 
9673  // 3. Prepare for next iteration.
9674  lastOffset = suballoc.offset + suballoc.size;
9675  ++nextAlloc1stIndex;
9676  }
9677  // We are at the end.
9678  else
9679  {
9680  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9681  if(lastOffset < freeSpace1stTo2ndEnd)
9682  {
9683  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9684  ++outInfo.unusedRangeCount;
9685  outInfo.unusedBytes += unusedRangeSize;
9686  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9687  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9688  }
9689 
9690  // End of loop.
9691  lastOffset = freeSpace1stTo2ndEnd;
9692  }
9693  }
9694 
9695  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9696  {
9697  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9698  while(lastOffset < size)
9699  {
9700  // Find next non-null allocation or move nextAllocIndex to the end.
9701  while(nextAlloc2ndIndex != SIZE_MAX &&
9702  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9703  {
9704  --nextAlloc2ndIndex;
9705  }
9706 
9707  // Found non-null allocation.
9708  if(nextAlloc2ndIndex != SIZE_MAX)
9709  {
9710  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9711 
9712  // 1. Process free space before this allocation.
9713  if(lastOffset < suballoc.offset)
9714  {
9715  // There is free space from lastOffset to suballoc.offset.
9716  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9717  ++outInfo.unusedRangeCount;
9718  outInfo.unusedBytes += unusedRangeSize;
9719  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9720  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9721  }
9722 
9723  // 2. Process this allocation.
9724  // There is allocation with suballoc.offset, suballoc.size.
9725  outInfo.usedBytes += suballoc.size;
9726  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9727  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9728 
9729  // 3. Prepare for next iteration.
9730  lastOffset = suballoc.offset + suballoc.size;
9731  --nextAlloc2ndIndex;
9732  }
9733  // We are at the end.
9734  else
9735  {
9736  // There is free space from lastOffset to size.
9737  if(lastOffset < size)
9738  {
9739  const VkDeviceSize unusedRangeSize = size - lastOffset;
9740  ++outInfo.unusedRangeCount;
9741  outInfo.unusedBytes += unusedRangeSize;
9742  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9743  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9744  }
9745 
9746  // End of loop.
9747  lastOffset = size;
9748  }
9749  }
9750  }
9751 
9752  outInfo.unusedBytes = size - outInfo.usedBytes;
9753 }
9754 
9755 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9756 {
9757  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9758  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9759  const VkDeviceSize size = GetSize();
9760  const size_t suballoc1stCount = suballocations1st.size();
9761  const size_t suballoc2ndCount = suballocations2nd.size();
9762 
9763  inoutStats.size += size;
9764 
9765  VkDeviceSize lastOffset = 0;
9766 
9767  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9768  {
9769  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9770  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9771  while(lastOffset < freeSpace2ndTo1stEnd)
9772  {
9773  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9774  while(nextAlloc2ndIndex < suballoc2ndCount &&
9775  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9776  {
9777  ++nextAlloc2ndIndex;
9778  }
9779 
9780  // Found non-null allocation.
9781  if(nextAlloc2ndIndex < suballoc2ndCount)
9782  {
9783  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9784 
9785  // 1. Process free space before this allocation.
9786  if(lastOffset < suballoc.offset)
9787  {
9788  // There is free space from lastOffset to suballoc.offset.
9789  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9790  inoutStats.unusedSize += unusedRangeSize;
9791  ++inoutStats.unusedRangeCount;
9792  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9793  }
9794 
9795  // 2. Process this allocation.
9796  // There is allocation with suballoc.offset, suballoc.size.
9797  ++inoutStats.allocationCount;
9798 
9799  // 3. Prepare for next iteration.
9800  lastOffset = suballoc.offset + suballoc.size;
9801  ++nextAlloc2ndIndex;
9802  }
9803  // We are at the end.
9804  else
9805  {
9806  if(lastOffset < freeSpace2ndTo1stEnd)
9807  {
9808  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9809  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9810  inoutStats.unusedSize += unusedRangeSize;
9811  ++inoutStats.unusedRangeCount;
9812  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9813  }
9814 
9815  // End of loop.
9816  lastOffset = freeSpace2ndTo1stEnd;
9817  }
9818  }
9819  }
9820 
9821  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9822  const VkDeviceSize freeSpace1stTo2ndEnd =
9823  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9824  while(lastOffset < freeSpace1stTo2ndEnd)
9825  {
9826  // Find next non-null allocation or move nextAllocIndex to the end.
9827  while(nextAlloc1stIndex < suballoc1stCount &&
9828  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9829  {
9830  ++nextAlloc1stIndex;
9831  }
9832 
9833  // Found non-null allocation.
9834  if(nextAlloc1stIndex < suballoc1stCount)
9835  {
9836  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9837 
9838  // 1. Process free space before this allocation.
9839  if(lastOffset < suballoc.offset)
9840  {
9841  // There is free space from lastOffset to suballoc.offset.
9842  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9843  inoutStats.unusedSize += unusedRangeSize;
9844  ++inoutStats.unusedRangeCount;
9845  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9846  }
9847 
9848  // 2. Process this allocation.
9849  // There is allocation with suballoc.offset, suballoc.size.
9850  ++inoutStats.allocationCount;
9851 
9852  // 3. Prepare for next iteration.
9853  lastOffset = suballoc.offset + suballoc.size;
9854  ++nextAlloc1stIndex;
9855  }
9856  // We are at the end.
9857  else
9858  {
9859  if(lastOffset < freeSpace1stTo2ndEnd)
9860  {
9861  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9862  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9863  inoutStats.unusedSize += unusedRangeSize;
9864  ++inoutStats.unusedRangeCount;
9865  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9866  }
9867 
9868  // End of loop.
9869  lastOffset = freeSpace1stTo2ndEnd;
9870  }
9871  }
9872 
9873  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9874  {
9875  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9876  while(lastOffset < size)
9877  {
9878  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9879  while(nextAlloc2ndIndex != SIZE_MAX &&
9880  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9881  {
9882  --nextAlloc2ndIndex;
9883  }
9884 
9885  // Found non-null allocation.
9886  if(nextAlloc2ndIndex != SIZE_MAX)
9887  {
9888  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9889 
9890  // 1. Process free space before this allocation.
9891  if(lastOffset < suballoc.offset)
9892  {
9893  // There is free space from lastOffset to suballoc.offset.
9894  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9895  inoutStats.unusedSize += unusedRangeSize;
9896  ++inoutStats.unusedRangeCount;
9897  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9898  }
9899 
9900  // 2. Process this allocation.
9901  // There is allocation with suballoc.offset, suballoc.size.
9902  ++inoutStats.allocationCount;
9903 
9904  // 3. Prepare for next iteration.
9905  lastOffset = suballoc.offset + suballoc.size;
9906  --nextAlloc2ndIndex;
9907  }
9908  // We are at the end.
9909  else
9910  {
9911  if(lastOffset < size)
9912  {
9913  // There is free space from lastOffset to size.
9914  const VkDeviceSize unusedRangeSize = size - lastOffset;
9915  inoutStats.unusedSize += unusedRangeSize;
9916  ++inoutStats.unusedRangeCount;
9917  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9918  }
9919 
9920  // End of loop.
9921  lastOffset = size;
9922  }
9923  }
9924  }
9925 }
9926 
9927 #if VMA_STATS_STRING_ENABLED
9928 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9929 {
9930  const VkDeviceSize size = GetSize();
9931  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9932  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9933  const size_t suballoc1stCount = suballocations1st.size();
9934  const size_t suballoc2ndCount = suballocations2nd.size();
9935 
9936  // FIRST PASS
9937 
9938  size_t unusedRangeCount = 0;
9939  VkDeviceSize usedBytes = 0;
9940 
9941  VkDeviceSize lastOffset = 0;
9942 
9943  size_t alloc2ndCount = 0;
9944  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9945  {
9946  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9947  size_t nextAlloc2ndIndex = 0;
9948  while(lastOffset < freeSpace2ndTo1stEnd)
9949  {
9950  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9951  while(nextAlloc2ndIndex < suballoc2ndCount &&
9952  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9953  {
9954  ++nextAlloc2ndIndex;
9955  }
9956 
9957  // Found non-null allocation.
9958  if(nextAlloc2ndIndex < suballoc2ndCount)
9959  {
9960  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9961 
9962  // 1. Process free space before this allocation.
9963  if(lastOffset < suballoc.offset)
9964  {
9965  // There is free space from lastOffset to suballoc.offset.
9966  ++unusedRangeCount;
9967  }
9968 
9969  // 2. Process this allocation.
9970  // There is allocation with suballoc.offset, suballoc.size.
9971  ++alloc2ndCount;
9972  usedBytes += suballoc.size;
9973 
9974  // 3. Prepare for next iteration.
9975  lastOffset = suballoc.offset + suballoc.size;
9976  ++nextAlloc2ndIndex;
9977  }
9978  // We are at the end.
9979  else
9980  {
9981  if(lastOffset < freeSpace2ndTo1stEnd)
9982  {
9983  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9984  ++unusedRangeCount;
9985  }
9986 
9987  // End of loop.
9988  lastOffset = freeSpace2ndTo1stEnd;
9989  }
9990  }
9991  }
9992 
9993  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9994  size_t alloc1stCount = 0;
9995  const VkDeviceSize freeSpace1stTo2ndEnd =
9996  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9997  while(lastOffset < freeSpace1stTo2ndEnd)
9998  {
9999  // Find next non-null allocation or move nextAllocIndex to the end.
10000  while(nextAlloc1stIndex < suballoc1stCount &&
10001  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10002  {
10003  ++nextAlloc1stIndex;
10004  }
10005 
10006  // Found non-null allocation.
10007  if(nextAlloc1stIndex < suballoc1stCount)
10008  {
10009  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10010 
10011  // 1. Process free space before this allocation.
10012  if(lastOffset < suballoc.offset)
10013  {
10014  // There is free space from lastOffset to suballoc.offset.
10015  ++unusedRangeCount;
10016  }
10017 
10018  // 2. Process this allocation.
10019  // There is allocation with suballoc.offset, suballoc.size.
10020  ++alloc1stCount;
10021  usedBytes += suballoc.size;
10022 
10023  // 3. Prepare for next iteration.
10024  lastOffset = suballoc.offset + suballoc.size;
10025  ++nextAlloc1stIndex;
10026  }
10027  // We are at the end.
10028  else
10029  {
10030  if(lastOffset < size)
10031  {
10032  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10033  ++unusedRangeCount;
10034  }
10035 
10036  // End of loop.
10037  lastOffset = freeSpace1stTo2ndEnd;
10038  }
10039  }
10040 
10041  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10042  {
10043  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10044  while(lastOffset < size)
10045  {
10046  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10047  while(nextAlloc2ndIndex != SIZE_MAX &&
10048  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10049  {
10050  --nextAlloc2ndIndex;
10051  }
10052 
10053  // Found non-null allocation.
10054  if(nextAlloc2ndIndex != SIZE_MAX)
10055  {
10056  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10057 
10058  // 1. Process free space before this allocation.
10059  if(lastOffset < suballoc.offset)
10060  {
10061  // There is free space from lastOffset to suballoc.offset.
10062  ++unusedRangeCount;
10063  }
10064 
10065  // 2. Process this allocation.
10066  // There is allocation with suballoc.offset, suballoc.size.
10067  ++alloc2ndCount;
10068  usedBytes += suballoc.size;
10069 
10070  // 3. Prepare for next iteration.
10071  lastOffset = suballoc.offset + suballoc.size;
10072  --nextAlloc2ndIndex;
10073  }
10074  // We are at the end.
10075  else
10076  {
10077  if(lastOffset < size)
10078  {
10079  // There is free space from lastOffset to size.
10080  ++unusedRangeCount;
10081  }
10082 
10083  // End of loop.
10084  lastOffset = size;
10085  }
10086  }
10087  }
10088 
10089  const VkDeviceSize unusedBytes = size - usedBytes;
10090  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10091 
10092  // SECOND PASS
10093  lastOffset = 0;
10094 
10095  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10096  {
10097  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10098  size_t nextAlloc2ndIndex = 0;
10099  while(lastOffset < freeSpace2ndTo1stEnd)
10100  {
10101  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10102  while(nextAlloc2ndIndex < suballoc2ndCount &&
10103  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10104  {
10105  ++nextAlloc2ndIndex;
10106  }
10107 
10108  // Found non-null allocation.
10109  if(nextAlloc2ndIndex < suballoc2ndCount)
10110  {
10111  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10112 
10113  // 1. Process free space before this allocation.
10114  if(lastOffset < suballoc.offset)
10115  {
10116  // There is free space from lastOffset to suballoc.offset.
10117  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10118  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10119  }
10120 
10121  // 2. Process this allocation.
10122  // There is allocation with suballoc.offset, suballoc.size.
10123  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10124 
10125  // 3. Prepare for next iteration.
10126  lastOffset = suballoc.offset + suballoc.size;
10127  ++nextAlloc2ndIndex;
10128  }
10129  // We are at the end.
10130  else
10131  {
10132  if(lastOffset < freeSpace2ndTo1stEnd)
10133  {
10134  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10135  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10136  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10137  }
10138 
10139  // End of loop.
10140  lastOffset = freeSpace2ndTo1stEnd;
10141  }
10142  }
10143  }
10144 
10145  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10146  while(lastOffset < freeSpace1stTo2ndEnd)
10147  {
10148  // Find next non-null allocation or move nextAllocIndex to the end.
10149  while(nextAlloc1stIndex < suballoc1stCount &&
10150  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10151  {
10152  ++nextAlloc1stIndex;
10153  }
10154 
10155  // Found non-null allocation.
10156  if(nextAlloc1stIndex < suballoc1stCount)
10157  {
10158  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10159 
10160  // 1. Process free space before this allocation.
10161  if(lastOffset < suballoc.offset)
10162  {
10163  // There is free space from lastOffset to suballoc.offset.
10164  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10165  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10166  }
10167 
10168  // 2. Process this allocation.
10169  // There is allocation with suballoc.offset, suballoc.size.
10170  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10171 
10172  // 3. Prepare for next iteration.
10173  lastOffset = suballoc.offset + suballoc.size;
10174  ++nextAlloc1stIndex;
10175  }
10176  // We are at the end.
10177  else
10178  {
10179  if(lastOffset < freeSpace1stTo2ndEnd)
10180  {
10181  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10182  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10183  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10184  }
10185 
10186  // End of loop.
10187  lastOffset = freeSpace1stTo2ndEnd;
10188  }
10189  }
10190 
10191  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10192  {
10193  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10194  while(lastOffset < size)
10195  {
10196  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10197  while(nextAlloc2ndIndex != SIZE_MAX &&
10198  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10199  {
10200  --nextAlloc2ndIndex;
10201  }
10202 
10203  // Found non-null allocation.
10204  if(nextAlloc2ndIndex != SIZE_MAX)
10205  {
10206  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10207 
10208  // 1. Process free space before this allocation.
10209  if(lastOffset < suballoc.offset)
10210  {
10211  // There is free space from lastOffset to suballoc.offset.
10212  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10213  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10214  }
10215 
10216  // 2. Process this allocation.
10217  // There is allocation with suballoc.offset, suballoc.size.
10218  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10219 
10220  // 3. Prepare for next iteration.
10221  lastOffset = suballoc.offset + suballoc.size;
10222  --nextAlloc2ndIndex;
10223  }
10224  // We are at the end.
10225  else
10226  {
10227  if(lastOffset < size)
10228  {
10229  // There is free space from lastOffset to size.
10230  const VkDeviceSize unusedRangeSize = size - lastOffset;
10231  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10232  }
10233 
10234  // End of loop.
10235  lastOffset = size;
10236  }
10237  }
10238  }
10239 
10240  PrintDetailedMap_End(json);
10241 }
10242 #endif // #if VMA_STATS_STRING_ENABLED
10243 
10244 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10245  uint32_t currentFrameIndex,
10246  uint32_t frameInUseCount,
10247  VkDeviceSize bufferImageGranularity,
10248  VkDeviceSize allocSize,
10249  VkDeviceSize allocAlignment,
10250  bool upperAddress,
10251  VmaSuballocationType allocType,
10252  bool canMakeOtherLost,
10253  uint32_t strategy,
10254  VmaAllocationRequest* pAllocationRequest)
10255 {
10256  VMA_ASSERT(allocSize > 0);
10257  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10258  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10259  VMA_HEAVY_ASSERT(Validate());
10260  return upperAddress ?
10261  CreateAllocationRequest_UpperAddress(
10262  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10263  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10264  CreateAllocationRequest_LowerAddress(
10265  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10266  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10267 }
10268 
10269 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10270  uint32_t currentFrameIndex,
10271  uint32_t frameInUseCount,
10272  VkDeviceSize bufferImageGranularity,
10273  VkDeviceSize allocSize,
10274  VkDeviceSize allocAlignment,
10275  VmaSuballocationType allocType,
10276  bool canMakeOtherLost,
10277  uint32_t strategy,
10278  VmaAllocationRequest* pAllocationRequest)
10279 {
10280  const VkDeviceSize size = GetSize();
10281  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10282  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10283 
10284  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10285  {
10286  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10287  return false;
10288  }
10289 
10290  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10291  if(allocSize > size)
10292  {
10293  return false;
10294  }
10295  VkDeviceSize resultBaseOffset = size - allocSize;
10296  if(!suballocations2nd.empty())
10297  {
10298  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10299  resultBaseOffset = lastSuballoc.offset - allocSize;
10300  if(allocSize > lastSuballoc.offset)
10301  {
10302  return false;
10303  }
10304  }
10305 
10306  // Start from offset equal to end of free space.
10307  VkDeviceSize resultOffset = resultBaseOffset;
10308 
10309  // Apply VMA_DEBUG_MARGIN at the end.
10310  if(VMA_DEBUG_MARGIN > 0)
10311  {
10312  if(resultOffset < VMA_DEBUG_MARGIN)
10313  {
10314  return false;
10315  }
10316  resultOffset -= VMA_DEBUG_MARGIN;
10317  }
10318 
10319  // Apply alignment.
10320  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10321 
10322  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10323  // Make bigger alignment if necessary.
10324  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10325  {
10326  bool bufferImageGranularityConflict = false;
10327  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10328  {
10329  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10330  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10331  {
10332  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10333  {
10334  bufferImageGranularityConflict = true;
10335  break;
10336  }
10337  }
10338  else
10339  // Already on previous page.
10340  break;
10341  }
10342  if(bufferImageGranularityConflict)
10343  {
10344  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10345  }
10346  }
10347 
10348  // There is enough free space.
10349  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10350  suballocations1st.back().offset + suballocations1st.back().size :
10351  0;
10352  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10353  {
10354  // Check previous suballocations for BufferImageGranularity conflicts.
10355  // If conflict exists, allocation cannot be made here.
10356  if(bufferImageGranularity > 1)
10357  {
10358  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10359  {
10360  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10361  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10362  {
10363  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10364  {
10365  return false;
10366  }
10367  }
10368  else
10369  {
10370  // Already on next page.
10371  break;
10372  }
10373  }
10374  }
10375 
10376  // All tests passed: Success.
10377  pAllocationRequest->offset = resultOffset;
10378  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10379  pAllocationRequest->sumItemSize = 0;
10380  // pAllocationRequest->item unused.
10381  pAllocationRequest->itemsToMakeLostCount = 0;
10382  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10383  return true;
10384  }
10385 
10386  return false;
10387 }
10388 
10389 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10390  uint32_t currentFrameIndex,
10391  uint32_t frameInUseCount,
10392  VkDeviceSize bufferImageGranularity,
10393  VkDeviceSize allocSize,
10394  VkDeviceSize allocAlignment,
10395  VmaSuballocationType allocType,
10396  bool canMakeOtherLost,
10397  uint32_t strategy,
10398  VmaAllocationRequest* pAllocationRequest)
10399 {
10400  const VkDeviceSize size = GetSize();
10401  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10402  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10403 
10404  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10405  {
10406  // Try to allocate at the end of 1st vector.
10407 
10408  VkDeviceSize resultBaseOffset = 0;
10409  if(!suballocations1st.empty())
10410  {
10411  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10412  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10413  }
10414 
10415  // Start from offset equal to beginning of free space.
10416  VkDeviceSize resultOffset = resultBaseOffset;
10417 
10418  // Apply VMA_DEBUG_MARGIN at the beginning.
10419  if(VMA_DEBUG_MARGIN > 0)
10420  {
10421  resultOffset += VMA_DEBUG_MARGIN;
10422  }
10423 
10424  // Apply alignment.
10425  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10426 
10427  // Check previous suballocations for BufferImageGranularity conflicts.
10428  // Make bigger alignment if necessary.
10429  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10430  {
10431  bool bufferImageGranularityConflict = false;
10432  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10433  {
10434  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10435  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10436  {
10437  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10438  {
10439  bufferImageGranularityConflict = true;
10440  break;
10441  }
10442  }
10443  else
10444  // Already on previous page.
10445  break;
10446  }
10447  if(bufferImageGranularityConflict)
10448  {
10449  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10450  }
10451  }
10452 
10453  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10454  suballocations2nd.back().offset : size;
10455 
10456  // There is enough free space at the end after alignment.
10457  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10458  {
10459  // Check next suballocations for BufferImageGranularity conflicts.
10460  // If conflict exists, allocation cannot be made here.
10461  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10462  {
10463  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10464  {
10465  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10466  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10467  {
10468  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10469  {
10470  return false;
10471  }
10472  }
10473  else
10474  {
10475  // Already on previous page.
10476  break;
10477  }
10478  }
10479  }
10480 
10481  // All tests passed: Success.
10482  pAllocationRequest->offset = resultOffset;
10483  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10484  pAllocationRequest->sumItemSize = 0;
10485  // pAllocationRequest->item, customData unused.
10486  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10487  pAllocationRequest->itemsToMakeLostCount = 0;
10488  return true;
10489  }
10490  }
10491 
10492  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10493  // beginning of 1st vector as the end of free space.
10494  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10495  {
10496  VMA_ASSERT(!suballocations1st.empty());
10497 
10498  VkDeviceSize resultBaseOffset = 0;
10499  if(!suballocations2nd.empty())
10500  {
10501  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10502  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10503  }
10504 
10505  // Start from offset equal to beginning of free space.
10506  VkDeviceSize resultOffset = resultBaseOffset;
10507 
10508  // Apply VMA_DEBUG_MARGIN at the beginning.
10509  if(VMA_DEBUG_MARGIN > 0)
10510  {
10511  resultOffset += VMA_DEBUG_MARGIN;
10512  }
10513 
10514  // Apply alignment.
10515  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10516 
10517  // Check previous suballocations for BufferImageGranularity conflicts.
10518  // Make bigger alignment if necessary.
10519  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10520  {
10521  bool bufferImageGranularityConflict = false;
10522  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10523  {
10524  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10525  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10526  {
10527  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10528  {
10529  bufferImageGranularityConflict = true;
10530  break;
10531  }
10532  }
10533  else
10534  // Already on previous page.
10535  break;
10536  }
10537  if(bufferImageGranularityConflict)
10538  {
10539  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10540  }
10541  }
10542 
10543  pAllocationRequest->itemsToMakeLostCount = 0;
10544  pAllocationRequest->sumItemSize = 0;
10545  size_t index1st = m_1stNullItemsBeginCount;
10546 
10547  if(canMakeOtherLost)
10548  {
10549  while(index1st < suballocations1st.size() &&
10550  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10551  {
10552  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10553  const VmaSuballocation& suballoc = suballocations1st[index1st];
10554  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10555  {
10556  // No problem.
10557  }
10558  else
10559  {
10560  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10561  if(suballoc.hAllocation->CanBecomeLost() &&
10562  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10563  {
10564  ++pAllocationRequest->itemsToMakeLostCount;
10565  pAllocationRequest->sumItemSize += suballoc.size;
10566  }
10567  else
10568  {
10569  return false;
10570  }
10571  }
10572  ++index1st;
10573  }
10574 
10575  // Check next suballocations for BufferImageGranularity conflicts.
10576  // If conflict exists, we must mark more allocations lost or fail.
10577  if(bufferImageGranularity > 1)
10578  {
10579  while(index1st < suballocations1st.size())
10580  {
10581  const VmaSuballocation& suballoc = suballocations1st[index1st];
10582  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10583  {
10584  if(suballoc.hAllocation != VK_NULL_HANDLE)
10585  {
10586  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10587  if(suballoc.hAllocation->CanBecomeLost() &&
10588  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10589  {
10590  ++pAllocationRequest->itemsToMakeLostCount;
10591  pAllocationRequest->sumItemSize += suballoc.size;
10592  }
10593  else
10594  {
10595  return false;
10596  }
10597  }
10598  }
10599  else
10600  {
10601  // Already on next page.
10602  break;
10603  }
10604  ++index1st;
10605  }
10606  }
10607 
10608  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10609  if(index1st == suballocations1st.size() &&
10610  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10611  {
10612  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10613  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10614  }
10615  }
10616 
10617  // There is enough free space at the end after alignment.
10618  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10619  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10620  {
10621  // Check next suballocations for BufferImageGranularity conflicts.
10622  // If conflict exists, allocation cannot be made here.
10623  if(bufferImageGranularity > 1)
10624  {
10625  for(size_t nextSuballocIndex = index1st;
10626  nextSuballocIndex < suballocations1st.size();
10627  nextSuballocIndex++)
10628  {
10629  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10630  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10631  {
10632  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10633  {
10634  return false;
10635  }
10636  }
10637  else
10638  {
10639  // Already on next page.
10640  break;
10641  }
10642  }
10643  }
10644 
10645  // All tests passed: Success.
10646  pAllocationRequest->offset = resultOffset;
10647  pAllocationRequest->sumFreeSize =
10648  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10649  - resultBaseOffset
10650  - pAllocationRequest->sumItemSize;
10651  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10652  // pAllocationRequest->item, customData unused.
10653  return true;
10654  }
10655  }
10656 
10657  return false;
10658 }
10659 
10660 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10661  uint32_t currentFrameIndex,
10662  uint32_t frameInUseCount,
10663  VmaAllocationRequest* pAllocationRequest)
10664 {
10665  if(pAllocationRequest->itemsToMakeLostCount == 0)
10666  {
10667  return true;
10668  }
10669 
10670  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10671 
10672  // We always start from 1st.
10673  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10674  size_t index = m_1stNullItemsBeginCount;
10675  size_t madeLostCount = 0;
10676  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10677  {
10678  if(index == suballocations->size())
10679  {
10680  index = 0;
10681  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10682  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10683  {
10684  suballocations = &AccessSuballocations2nd();
10685  }
10686  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10687  // suballocations continues pointing at AccessSuballocations1st().
10688  VMA_ASSERT(!suballocations->empty());
10689  }
10690  VmaSuballocation& suballoc = (*suballocations)[index];
10691  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10692  {
10693  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10694  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10695  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10696  {
10697  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10698  suballoc.hAllocation = VK_NULL_HANDLE;
10699  m_SumFreeSize += suballoc.size;
10700  if(suballocations == &AccessSuballocations1st())
10701  {
10702  ++m_1stNullItemsMiddleCount;
10703  }
10704  else
10705  {
10706  ++m_2ndNullItemsCount;
10707  }
10708  ++madeLostCount;
10709  }
10710  else
10711  {
10712  return false;
10713  }
10714  }
10715  ++index;
10716  }
10717 
10718  CleanupAfterFree();
10719  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10720 
10721  return true;
10722 }
10723 
10724 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10725 {
10726  uint32_t lostAllocationCount = 0;
10727 
10728  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10729  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10730  {
10731  VmaSuballocation& suballoc = suballocations1st[i];
10732  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10733  suballoc.hAllocation->CanBecomeLost() &&
10734  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10735  {
10736  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10737  suballoc.hAllocation = VK_NULL_HANDLE;
10738  ++m_1stNullItemsMiddleCount;
10739  m_SumFreeSize += suballoc.size;
10740  ++lostAllocationCount;
10741  }
10742  }
10743 
10744  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10745  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10746  {
10747  VmaSuballocation& suballoc = suballocations2nd[i];
10748  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10749  suballoc.hAllocation->CanBecomeLost() &&
10750  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10751  {
10752  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10753  suballoc.hAllocation = VK_NULL_HANDLE;
10754  ++m_2ndNullItemsCount;
10755  m_SumFreeSize += suballoc.size;
10756  ++lostAllocationCount;
10757  }
10758  }
10759 
10760  if(lostAllocationCount)
10761  {
10762  CleanupAfterFree();
10763  }
10764 
10765  return lostAllocationCount;
10766 }
10767 
10768 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10769 {
10770  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10771  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10772  {
10773  const VmaSuballocation& suballoc = suballocations1st[i];
10774  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10775  {
10776  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10777  {
10778  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10779  return VK_ERROR_VALIDATION_FAILED_EXT;
10780  }
10781  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10782  {
10783  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10784  return VK_ERROR_VALIDATION_FAILED_EXT;
10785  }
10786  }
10787  }
10788 
10789  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10790  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10791  {
10792  const VmaSuballocation& suballoc = suballocations2nd[i];
10793  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10794  {
10795  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10796  {
10797  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10798  return VK_ERROR_VALIDATION_FAILED_EXT;
10799  }
10800  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10801  {
10802  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10803  return VK_ERROR_VALIDATION_FAILED_EXT;
10804  }
10805  }
10806  }
10807 
10808  return VK_SUCCESS;
10809 }
10810 
10811 void VmaBlockMetadata_Linear::Alloc(
10812  const VmaAllocationRequest& request,
10813  VmaSuballocationType type,
10814  VkDeviceSize allocSize,
10815  VmaAllocation hAllocation)
10816 {
10817  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10818 
10819  switch(request.type)
10820  {
10821  case VmaAllocationRequestType::UpperAddress:
10822  {
10823  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10824  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10825  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10826  suballocations2nd.push_back(newSuballoc);
10827  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10828  }
10829  break;
10830  case VmaAllocationRequestType::EndOf1st:
10831  {
10832  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10833 
10834  VMA_ASSERT(suballocations1st.empty() ||
10835  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10836  // Check if it fits before the end of the block.
10837  VMA_ASSERT(request.offset + allocSize <= GetSize());
10838 
10839  suballocations1st.push_back(newSuballoc);
10840  }
10841  break;
10842  case VmaAllocationRequestType::EndOf2nd:
10843  {
10844  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10845  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10846  VMA_ASSERT(!suballocations1st.empty() &&
10847  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10848  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10849 
10850  switch(m_2ndVectorMode)
10851  {
10852  case SECOND_VECTOR_EMPTY:
10853  // First allocation from second part ring buffer.
10854  VMA_ASSERT(suballocations2nd.empty());
10855  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10856  break;
10857  case SECOND_VECTOR_RING_BUFFER:
10858  // 2-part ring buffer is already started.
10859  VMA_ASSERT(!suballocations2nd.empty());
10860  break;
10861  case SECOND_VECTOR_DOUBLE_STACK:
10862  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10863  break;
10864  default:
10865  VMA_ASSERT(0);
10866  }
10867 
10868  suballocations2nd.push_back(newSuballoc);
10869  }
10870  break;
10871  default:
10872  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10873  }
10874 
10875  m_SumFreeSize -= newSuballoc.size;
10876 }
10877 
10878 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10879 {
10880  FreeAtOffset(allocation->GetOffset());
10881 }
10882 
10883 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10884 {
10885  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10886  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10887 
10888  if(!suballocations1st.empty())
10889  {
10890  // First allocation: Mark it as next empty at the beginning.
10891  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10892  if(firstSuballoc.offset == offset)
10893  {
10894  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10895  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10896  m_SumFreeSize += firstSuballoc.size;
10897  ++m_1stNullItemsBeginCount;
10898  CleanupAfterFree();
10899  return;
10900  }
10901  }
10902 
10903  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10904  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10905  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10906  {
10907  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10908  if(lastSuballoc.offset == offset)
10909  {
10910  m_SumFreeSize += lastSuballoc.size;
10911  suballocations2nd.pop_back();
10912  CleanupAfterFree();
10913  return;
10914  }
10915  }
10916  // Last allocation in 1st vector.
10917  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10918  {
10919  VmaSuballocation& lastSuballoc = suballocations1st.back();
10920  if(lastSuballoc.offset == offset)
10921  {
10922  m_SumFreeSize += lastSuballoc.size;
10923  suballocations1st.pop_back();
10924  CleanupAfterFree();
10925  return;
10926  }
10927  }
10928 
10929  // Item from the middle of 1st vector.
10930  {
10931  VmaSuballocation refSuballoc;
10932  refSuballoc.offset = offset;
10933  // Rest of members stays uninitialized intentionally for better performance.
10934  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10935  suballocations1st.begin() + m_1stNullItemsBeginCount,
10936  suballocations1st.end(),
10937  refSuballoc,
10938  VmaSuballocationOffsetLess());
10939  if(it != suballocations1st.end())
10940  {
10941  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10942  it->hAllocation = VK_NULL_HANDLE;
10943  ++m_1stNullItemsMiddleCount;
10944  m_SumFreeSize += it->size;
10945  CleanupAfterFree();
10946  return;
10947  }
10948  }
10949 
10950  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10951  {
10952  // Item from the middle of 2nd vector.
10953  VmaSuballocation refSuballoc;
10954  refSuballoc.offset = offset;
10955  // Rest of members stays uninitialized intentionally for better performance.
10956  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10957  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10958  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10959  if(it != suballocations2nd.end())
10960  {
10961  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10962  it->hAllocation = VK_NULL_HANDLE;
10963  ++m_2ndNullItemsCount;
10964  m_SumFreeSize += it->size;
10965  CleanupAfterFree();
10966  return;
10967  }
10968  }
10969 
10970  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10971 }
10972 
10973 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10974 {
10975  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10976  const size_t suballocCount = AccessSuballocations1st().size();
10977  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10978 }
10979 
10980 void VmaBlockMetadata_Linear::CleanupAfterFree()
10981 {
10982  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10983  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10984 
10985  if(IsEmpty())
10986  {
10987  suballocations1st.clear();
10988  suballocations2nd.clear();
10989  m_1stNullItemsBeginCount = 0;
10990  m_1stNullItemsMiddleCount = 0;
10991  m_2ndNullItemsCount = 0;
10992  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10993  }
10994  else
10995  {
10996  const size_t suballoc1stCount = suballocations1st.size();
10997  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10998  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10999 
11000  // Find more null items at the beginning of 1st vector.
11001  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11002  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11003  {
11004  ++m_1stNullItemsBeginCount;
11005  --m_1stNullItemsMiddleCount;
11006  }
11007 
11008  // Find more null items at the end of 1st vector.
11009  while(m_1stNullItemsMiddleCount > 0 &&
11010  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11011  {
11012  --m_1stNullItemsMiddleCount;
11013  suballocations1st.pop_back();
11014  }
11015 
11016  // Find more null items at the end of 2nd vector.
11017  while(m_2ndNullItemsCount > 0 &&
11018  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11019  {
11020  --m_2ndNullItemsCount;
11021  suballocations2nd.pop_back();
11022  }
11023 
11024  // Find more null items at the beginning of 2nd vector.
11025  while(m_2ndNullItemsCount > 0 &&
11026  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11027  {
11028  --m_2ndNullItemsCount;
11029  VmaVectorRemove(suballocations2nd, 0);
11030  }
11031 
11032  if(ShouldCompact1st())
11033  {
11034  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11035  size_t srcIndex = m_1stNullItemsBeginCount;
11036  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11037  {
11038  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11039  {
11040  ++srcIndex;
11041  }
11042  if(dstIndex != srcIndex)
11043  {
11044  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11045  }
11046  ++srcIndex;
11047  }
11048  suballocations1st.resize(nonNullItemCount);
11049  m_1stNullItemsBeginCount = 0;
11050  m_1stNullItemsMiddleCount = 0;
11051  }
11052 
11053  // 2nd vector became empty.
11054  if(suballocations2nd.empty())
11055  {
11056  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11057  }
11058 
11059  // 1st vector became empty.
11060  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11061  {
11062  suballocations1st.clear();
11063  m_1stNullItemsBeginCount = 0;
11064 
11065  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11066  {
11067  // Swap 1st with 2nd. Now 2nd is empty.
11068  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11069  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11070  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11071  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11072  {
11073  ++m_1stNullItemsBeginCount;
11074  --m_1stNullItemsMiddleCount;
11075  }
11076  m_2ndNullItemsCount = 0;
11077  m_1stVectorIndex ^= 1;
11078  }
11079  }
11080  }
11081 
11082  VMA_HEAVY_ASSERT(Validate());
11083 }
11084 
11085 
11087 // class VmaBlockMetadata_Buddy
11088 
11089 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11090  VmaBlockMetadata(hAllocator),
11091  m_Root(VMA_NULL),
11092  m_AllocationCount(0),
11093  m_FreeCount(1),
11094  m_SumFreeSize(0)
11095 {
11096  memset(m_FreeList, 0, sizeof(m_FreeList));
11097 }
11098 
11099 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11100 {
11101  DeleteNode(m_Root);
11102 }
11103 
11104 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11105 {
11106  VmaBlockMetadata::Init(size);
11107 
11108  m_UsableSize = VmaPrevPow2(size);
11109  m_SumFreeSize = m_UsableSize;
11110 
11111  // Calculate m_LevelCount.
11112  m_LevelCount = 1;
11113  while(m_LevelCount < MAX_LEVELS &&
11114  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11115  {
11116  ++m_LevelCount;
11117  }
11118 
11119  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11120  rootNode->offset = 0;
11121  rootNode->type = Node::TYPE_FREE;
11122  rootNode->parent = VMA_NULL;
11123  rootNode->buddy = VMA_NULL;
11124 
11125  m_Root = rootNode;
11126  AddToFreeListFront(0, rootNode);
11127 }
11128 
11129 bool VmaBlockMetadata_Buddy::Validate() const
11130 {
11131  // Validate tree.
11132  ValidationContext ctx;
11133  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11134  {
11135  VMA_VALIDATE(false && "ValidateNode failed.");
11136  }
11137  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11138  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11139 
11140  // Validate free node lists.
11141  for(uint32_t level = 0; level < m_LevelCount; ++level)
11142  {
11143  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11144  m_FreeList[level].front->free.prev == VMA_NULL);
11145 
11146  for(Node* node = m_FreeList[level].front;
11147  node != VMA_NULL;
11148  node = node->free.next)
11149  {
11150  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11151 
11152  if(node->free.next == VMA_NULL)
11153  {
11154  VMA_VALIDATE(m_FreeList[level].back == node);
11155  }
11156  else
11157  {
11158  VMA_VALIDATE(node->free.next->free.prev == node);
11159  }
11160  }
11161  }
11162 
11163  // Validate that free lists ar higher levels are empty.
11164  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11165  {
11166  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11167  }
11168 
11169  return true;
11170 }
11171 
11172 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11173 {
11174  for(uint32_t level = 0; level < m_LevelCount; ++level)
11175  {
11176  if(m_FreeList[level].front != VMA_NULL)
11177  {
11178  return LevelToNodeSize(level);
11179  }
11180  }
11181  return 0;
11182 }
11183 
11184 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11185 {
11186  const VkDeviceSize unusableSize = GetUnusableSize();
11187 
11188  outInfo.blockCount = 1;
11189 
11190  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11191  outInfo.usedBytes = outInfo.unusedBytes = 0;
11192 
11193  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11194  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11195  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11196 
11197  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11198 
11199  if(unusableSize > 0)
11200  {
11201  ++outInfo.unusedRangeCount;
11202  outInfo.unusedBytes += unusableSize;
11203  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11204  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11205  }
11206 }
11207 
11208 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11209 {
11210  const VkDeviceSize unusableSize = GetUnusableSize();
11211 
11212  inoutStats.size += GetSize();
11213  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11214  inoutStats.allocationCount += m_AllocationCount;
11215  inoutStats.unusedRangeCount += m_FreeCount;
11216  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11217 
11218  if(unusableSize > 0)
11219  {
11220  ++inoutStats.unusedRangeCount;
11221  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11222  }
11223 }
11224 
11225 #if VMA_STATS_STRING_ENABLED
11226 
11227 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11228 {
11229  // TODO optimize
11230  VmaStatInfo stat;
11231  CalcAllocationStatInfo(stat);
11232 
11233  PrintDetailedMap_Begin(
11234  json,
11235  stat.unusedBytes,
11236  stat.allocationCount,
11237  stat.unusedRangeCount);
11238 
11239  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11240 
11241  const VkDeviceSize unusableSize = GetUnusableSize();
11242  if(unusableSize > 0)
11243  {
11244  PrintDetailedMap_UnusedRange(json,
11245  m_UsableSize, // offset
11246  unusableSize); // size
11247  }
11248 
11249  PrintDetailedMap_End(json);
11250 }
11251 
11252 #endif // #if VMA_STATS_STRING_ENABLED
11253 
11254 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11255  uint32_t currentFrameIndex,
11256  uint32_t frameInUseCount,
11257  VkDeviceSize bufferImageGranularity,
11258  VkDeviceSize allocSize,
11259  VkDeviceSize allocAlignment,
11260  bool upperAddress,
11261  VmaSuballocationType allocType,
11262  bool canMakeOtherLost,
11263  uint32_t strategy,
11264  VmaAllocationRequest* pAllocationRequest)
11265 {
11266  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11267 
11268  // Simple way to respect bufferImageGranularity. May be optimized some day.
11269  // Whenever it might be an OPTIMAL image...
11270  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11271  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11272  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11273  {
11274  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11275  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11276  }
11277 
11278  if(allocSize > m_UsableSize)
11279  {
11280  return false;
11281  }
11282 
11283  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11284  for(uint32_t level = targetLevel + 1; level--; )
11285  {
11286  for(Node* freeNode = m_FreeList[level].front;
11287  freeNode != VMA_NULL;
11288  freeNode = freeNode->free.next)
11289  {
11290  if(freeNode->offset % allocAlignment == 0)
11291  {
11292  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11293  pAllocationRequest->offset = freeNode->offset;
11294  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11295  pAllocationRequest->sumItemSize = 0;
11296  pAllocationRequest->itemsToMakeLostCount = 0;
11297  pAllocationRequest->customData = (void*)(uintptr_t)level;
11298  return true;
11299  }
11300  }
11301  }
11302 
11303  return false;
11304 }
11305 
11306 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11307  uint32_t currentFrameIndex,
11308  uint32_t frameInUseCount,
11309  VmaAllocationRequest* pAllocationRequest)
11310 {
11311  /*
11312  Lost allocations are not supported in buddy allocator at the moment.
11313  Support might be added in the future.
11314  */
11315  return pAllocationRequest->itemsToMakeLostCount == 0;
11316 }
11317 
11318 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11319 {
11320  /*
11321  Lost allocations are not supported in buddy allocator at the moment.
11322  Support might be added in the future.
11323  */
11324  return 0;
11325 }
11326 
11327 void VmaBlockMetadata_Buddy::Alloc(
11328  const VmaAllocationRequest& request,
11329  VmaSuballocationType type,
11330  VkDeviceSize allocSize,
11331  VmaAllocation hAllocation)
11332 {
11333  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11334 
11335  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11336  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11337 
11338  Node* currNode = m_FreeList[currLevel].front;
11339  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11340  while(currNode->offset != request.offset)
11341  {
11342  currNode = currNode->free.next;
11343  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11344  }
11345 
11346  // Go down, splitting free nodes.
11347  while(currLevel < targetLevel)
11348  {
11349  // currNode is already first free node at currLevel.
11350  // Remove it from list of free nodes at this currLevel.
11351  RemoveFromFreeList(currLevel, currNode);
11352 
11353  const uint32_t childrenLevel = currLevel + 1;
11354 
11355  // Create two free sub-nodes.
11356  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11357  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11358 
11359  leftChild->offset = currNode->offset;
11360  leftChild->type = Node::TYPE_FREE;
11361  leftChild->parent = currNode;
11362  leftChild->buddy = rightChild;
11363 
11364  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11365  rightChild->type = Node::TYPE_FREE;
11366  rightChild->parent = currNode;
11367  rightChild->buddy = leftChild;
11368 
11369  // Convert current currNode to split type.
11370  currNode->type = Node::TYPE_SPLIT;
11371  currNode->split.leftChild = leftChild;
11372 
11373  // Add child nodes to free list. Order is important!
11374  AddToFreeListFront(childrenLevel, rightChild);
11375  AddToFreeListFront(childrenLevel, leftChild);
11376 
11377  ++m_FreeCount;
11378  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11379  ++currLevel;
11380  currNode = m_FreeList[currLevel].front;
11381 
11382  /*
11383  We can be sure that currNode, as left child of node previously split,
11384  also fullfills the alignment requirement.
11385  */
11386  }
11387 
11388  // Remove from free list.
11389  VMA_ASSERT(currLevel == targetLevel &&
11390  currNode != VMA_NULL &&
11391  currNode->type == Node::TYPE_FREE);
11392  RemoveFromFreeList(currLevel, currNode);
11393 
11394  // Convert to allocation node.
11395  currNode->type = Node::TYPE_ALLOCATION;
11396  currNode->allocation.alloc = hAllocation;
11397 
11398  ++m_AllocationCount;
11399  --m_FreeCount;
11400  m_SumFreeSize -= allocSize;
11401 }
11402 
11403 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11404 {
11405  if(node->type == Node::TYPE_SPLIT)
11406  {
11407  DeleteNode(node->split.leftChild->buddy);
11408  DeleteNode(node->split.leftChild);
11409  }
11410 
11411  vma_delete(GetAllocationCallbacks(), node);
11412 }
11413 
11414 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11415 {
11416  VMA_VALIDATE(level < m_LevelCount);
11417  VMA_VALIDATE(curr->parent == parent);
11418  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11419  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11420  switch(curr->type)
11421  {
11422  case Node::TYPE_FREE:
11423  // curr->free.prev, next are validated separately.
11424  ctx.calculatedSumFreeSize += levelNodeSize;
11425  ++ctx.calculatedFreeCount;
11426  break;
11427  case Node::TYPE_ALLOCATION:
11428  ++ctx.calculatedAllocationCount;
11429  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11430  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11431  break;
11432  case Node::TYPE_SPLIT:
11433  {
11434  const uint32_t childrenLevel = level + 1;
11435  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11436  const Node* const leftChild = curr->split.leftChild;
11437  VMA_VALIDATE(leftChild != VMA_NULL);
11438  VMA_VALIDATE(leftChild->offset == curr->offset);
11439  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11440  {
11441  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11442  }
11443  const Node* const rightChild = leftChild->buddy;
11444  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11445  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11446  {
11447  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11448  }
11449  }
11450  break;
11451  default:
11452  return false;
11453  }
11454 
11455  return true;
11456 }
11457 
11458 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11459 {
11460  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11461  uint32_t level = 0;
11462  VkDeviceSize currLevelNodeSize = m_UsableSize;
11463  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11464  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11465  {
11466  ++level;
11467  currLevelNodeSize = nextLevelNodeSize;
11468  nextLevelNodeSize = currLevelNodeSize >> 1;
11469  }
11470  return level;
11471 }
11472 
11473 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11474 {
11475  // Find node and level.
11476  Node* node = m_Root;
11477  VkDeviceSize nodeOffset = 0;
11478  uint32_t level = 0;
11479  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11480  while(node->type == Node::TYPE_SPLIT)
11481  {
11482  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11483  if(offset < nodeOffset + nextLevelSize)
11484  {
11485  node = node->split.leftChild;
11486  }
11487  else
11488  {
11489  node = node->split.leftChild->buddy;
11490  nodeOffset += nextLevelSize;
11491  }
11492  ++level;
11493  levelNodeSize = nextLevelSize;
11494  }
11495 
11496  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11497  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11498 
11499  ++m_FreeCount;
11500  --m_AllocationCount;
11501  m_SumFreeSize += alloc->GetSize();
11502 
11503  node->type = Node::TYPE_FREE;
11504 
11505  // Join free nodes if possible.
11506  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11507  {
11508  RemoveFromFreeList(level, node->buddy);
11509  Node* const parent = node->parent;
11510 
11511  vma_delete(GetAllocationCallbacks(), node->buddy);
11512  vma_delete(GetAllocationCallbacks(), node);
11513  parent->type = Node::TYPE_FREE;
11514 
11515  node = parent;
11516  --level;
11517  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11518  --m_FreeCount;
11519  }
11520 
11521  AddToFreeListFront(level, node);
11522 }
11523 
11524 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11525 {
11526  switch(node->type)
11527  {
11528  case Node::TYPE_FREE:
11529  ++outInfo.unusedRangeCount;
11530  outInfo.unusedBytes += levelNodeSize;
11531  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11532  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11533  break;
11534  case Node::TYPE_ALLOCATION:
11535  {
11536  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11537  ++outInfo.allocationCount;
11538  outInfo.usedBytes += allocSize;
11539  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11540  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11541 
11542  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11543  if(unusedRangeSize > 0)
11544  {
11545  ++outInfo.unusedRangeCount;
11546  outInfo.unusedBytes += unusedRangeSize;
11547  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11548  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11549  }
11550  }
11551  break;
11552  case Node::TYPE_SPLIT:
11553  {
11554  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11555  const Node* const leftChild = node->split.leftChild;
11556  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11557  const Node* const rightChild = leftChild->buddy;
11558  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11559  }
11560  break;
11561  default:
11562  VMA_ASSERT(0);
11563  }
11564 }
11565 
11566 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11567 {
11568  VMA_ASSERT(node->type == Node::TYPE_FREE);
11569 
11570  // List is empty.
11571  Node* const frontNode = m_FreeList[level].front;
11572  if(frontNode == VMA_NULL)
11573  {
11574  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11575  node->free.prev = node->free.next = VMA_NULL;
11576  m_FreeList[level].front = m_FreeList[level].back = node;
11577  }
11578  else
11579  {
11580  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11581  node->free.prev = VMA_NULL;
11582  node->free.next = frontNode;
11583  frontNode->free.prev = node;
11584  m_FreeList[level].front = node;
11585  }
11586 }
11587 
11588 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11589 {
11590  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11591 
11592  // It is at the front.
11593  if(node->free.prev == VMA_NULL)
11594  {
11595  VMA_ASSERT(m_FreeList[level].front == node);
11596  m_FreeList[level].front = node->free.next;
11597  }
11598  else
11599  {
11600  Node* const prevFreeNode = node->free.prev;
11601  VMA_ASSERT(prevFreeNode->free.next == node);
11602  prevFreeNode->free.next = node->free.next;
11603  }
11604 
11605  // It is at the back.
11606  if(node->free.next == VMA_NULL)
11607  {
11608  VMA_ASSERT(m_FreeList[level].back == node);
11609  m_FreeList[level].back = node->free.prev;
11610  }
11611  else
11612  {
11613  Node* const nextFreeNode = node->free.next;
11614  VMA_ASSERT(nextFreeNode->free.prev == node);
11615  nextFreeNode->free.prev = node->free.prev;
11616  }
11617 }
11618 
11619 #if VMA_STATS_STRING_ENABLED
11620 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11621 {
11622  switch(node->type)
11623  {
11624  case Node::TYPE_FREE:
11625  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11626  break;
11627  case Node::TYPE_ALLOCATION:
11628  {
11629  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11630  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11631  if(allocSize < levelNodeSize)
11632  {
11633  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11634  }
11635  }
11636  break;
11637  case Node::TYPE_SPLIT:
11638  {
11639  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11640  const Node* const leftChild = node->split.leftChild;
11641  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11642  const Node* const rightChild = leftChild->buddy;
11643  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11644  }
11645  break;
11646  default:
11647  VMA_ASSERT(0);
11648  }
11649 }
11650 #endif // #if VMA_STATS_STRING_ENABLED
11651 
11652 
11654 // class VmaDeviceMemoryBlock
11655 
11656 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11657  m_pMetadata(VMA_NULL),
11658  m_MemoryTypeIndex(UINT32_MAX),
11659  m_Id(0),
11660  m_hMemory(VK_NULL_HANDLE),
11661  m_MapCount(0),
11662  m_pMappedData(VMA_NULL)
11663 {
11664 }
11665 
11666 void VmaDeviceMemoryBlock::Init(
11667  VmaAllocator hAllocator,
11668  VmaPool hParentPool,
11669  uint32_t newMemoryTypeIndex,
11670  VkDeviceMemory newMemory,
11671  VkDeviceSize newSize,
11672  uint32_t id,
11673  uint32_t algorithm)
11674 {
11675  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11676 
11677  m_hParentPool = hParentPool;
11678  m_MemoryTypeIndex = newMemoryTypeIndex;
11679  m_Id = id;
11680  m_hMemory = newMemory;
11681 
11682  switch(algorithm)
11683  {
11685  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11686  break;
11688  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11689  break;
11690  default:
11691  VMA_ASSERT(0);
11692  // Fall-through.
11693  case 0:
11694  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11695  }
11696  m_pMetadata->Init(newSize);
11697 }
11698 
11699 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11700 {
11701  // This is the most important assert in the entire library.
11702  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11703  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11704 
11705  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11706  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11707  m_hMemory = VK_NULL_HANDLE;
11708 
11709  vma_delete(allocator, m_pMetadata);
11710  m_pMetadata = VMA_NULL;
11711 }
11712 
11713 bool VmaDeviceMemoryBlock::Validate() const
11714 {
11715  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11716  (m_pMetadata->GetSize() != 0));
11717 
11718  return m_pMetadata->Validate();
11719 }
11720 
11721 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11722 {
11723  void* pData = nullptr;
11724  VkResult res = Map(hAllocator, 1, &pData);
11725  if(res != VK_SUCCESS)
11726  {
11727  return res;
11728  }
11729 
11730  res = m_pMetadata->CheckCorruption(pData);
11731 
11732  Unmap(hAllocator, 1);
11733 
11734  return res;
11735 }
11736 
11737 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11738 {
11739  if(count == 0)
11740  {
11741  return VK_SUCCESS;
11742  }
11743 
11744  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11745  if(m_MapCount != 0)
11746  {
11747  m_MapCount += count;
11748  VMA_ASSERT(m_pMappedData != VMA_NULL);
11749  if(ppData != VMA_NULL)
11750  {
11751  *ppData = m_pMappedData;
11752  }
11753  return VK_SUCCESS;
11754  }
11755  else
11756  {
11757  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11758  hAllocator->m_hDevice,
11759  m_hMemory,
11760  0, // offset
11761  VK_WHOLE_SIZE,
11762  0, // flags
11763  &m_pMappedData);
11764  if(result == VK_SUCCESS)
11765  {
11766  if(ppData != VMA_NULL)
11767  {
11768  *ppData = m_pMappedData;
11769  }
11770  m_MapCount = count;
11771  }
11772  return result;
11773  }
11774 }
11775 
11776 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11777 {
11778  if(count == 0)
11779  {
11780  return;
11781  }
11782 
11783  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11784  if(m_MapCount >= count)
11785  {
11786  m_MapCount -= count;
11787  if(m_MapCount == 0)
11788  {
11789  m_pMappedData = VMA_NULL;
11790  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11791  }
11792  }
11793  else
11794  {
11795  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11796  }
11797 }
11798 
11799 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11800 {
11801  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11802  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11803 
11804  void* pData;
11805  VkResult res = Map(hAllocator, 1, &pData);
11806  if(res != VK_SUCCESS)
11807  {
11808  return res;
11809  }
11810 
11811  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11812  VmaWriteMagicValue(pData, allocOffset + allocSize);
11813 
11814  Unmap(hAllocator, 1);
11815 
11816  return VK_SUCCESS;
11817 }
11818 
11819 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11820 {
11821  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11822  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11823 
11824  void* pData;
11825  VkResult res = Map(hAllocator, 1, &pData);
11826  if(res != VK_SUCCESS)
11827  {
11828  return res;
11829  }
11830 
11831  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11832  {
11833  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11834  }
11835  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11836  {
11837  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11838  }
11839 
11840  Unmap(hAllocator, 1);
11841 
11842  return VK_SUCCESS;
11843 }
11844 
11845 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11846  const VmaAllocator hAllocator,
11847  const VmaAllocation hAllocation,
11848  VkDeviceSize allocationLocalOffset,
11849  VkBuffer hBuffer,
11850  const void* pNext)
11851 {
11852  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11853  hAllocation->GetBlock() == this);
11854  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11855  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11856  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11857  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11858  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11859  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11860 }
11861 
11862 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11863  const VmaAllocator hAllocator,
11864  const VmaAllocation hAllocation,
11865  VkDeviceSize allocationLocalOffset,
11866  VkImage hImage,
11867  const void* pNext)
11868 {
11869  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11870  hAllocation->GetBlock() == this);
11871  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11872  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11873  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11874  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11875  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11876  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11877 }
11878 
11879 static void InitStatInfo(VmaStatInfo& outInfo)
11880 {
11881  memset(&outInfo, 0, sizeof(outInfo));
11882  outInfo.allocationSizeMin = UINT64_MAX;
11883  outInfo.unusedRangeSizeMin = UINT64_MAX;
11884 }
11885 
11886 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11887 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11888 {
11889  inoutInfo.blockCount += srcInfo.blockCount;
11890  inoutInfo.allocationCount += srcInfo.allocationCount;
11891  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11892  inoutInfo.usedBytes += srcInfo.usedBytes;
11893  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11894  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11895  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11896  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11897  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11898 }
11899 
11900 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11901 {
11902  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11903  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11904  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11905  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11906 }
11907 
11908 VmaPool_T::VmaPool_T(
11909  VmaAllocator hAllocator,
11910  const VmaPoolCreateInfo& createInfo,
11911  VkDeviceSize preferredBlockSize) :
11912  m_BlockVector(
11913  hAllocator,
11914  this, // hParentPool
11915  createInfo.memoryTypeIndex,
11916  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11917  createInfo.minBlockCount,
11918  createInfo.maxBlockCount,
11919  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11920  createInfo.frameInUseCount,
11921  createInfo.blockSize != 0, // explicitBlockSize
11922  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11923  m_Id(0),
11924  m_Name(VMA_NULL)
11925 {
11926 }
11927 
11928 VmaPool_T::~VmaPool_T()
11929 {
11930 }
11931 
11932 void VmaPool_T::SetName(const char* pName)
11933 {
11934  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
11935  VmaFreeString(allocs, m_Name);
11936 
11937  if(pName != VMA_NULL)
11938  {
11939  m_Name = VmaCreateStringCopy(allocs, pName);
11940  }
11941  else
11942  {
11943  m_Name = VMA_NULL;
11944  }
11945 }
11946 
11947 #if VMA_STATS_STRING_ENABLED
11948 
11949 #endif // #if VMA_STATS_STRING_ENABLED
11950 
11951 VmaBlockVector::VmaBlockVector(
11952  VmaAllocator hAllocator,
11953  VmaPool hParentPool,
11954  uint32_t memoryTypeIndex,
11955  VkDeviceSize preferredBlockSize,
11956  size_t minBlockCount,
11957  size_t maxBlockCount,
11958  VkDeviceSize bufferImageGranularity,
11959  uint32_t frameInUseCount,
11960  bool explicitBlockSize,
11961  uint32_t algorithm) :
11962  m_hAllocator(hAllocator),
11963  m_hParentPool(hParentPool),
11964  m_MemoryTypeIndex(memoryTypeIndex),
11965  m_PreferredBlockSize(preferredBlockSize),
11966  m_MinBlockCount(minBlockCount),
11967  m_MaxBlockCount(maxBlockCount),
11968  m_BufferImageGranularity(bufferImageGranularity),
11969  m_FrameInUseCount(frameInUseCount),
11970  m_ExplicitBlockSize(explicitBlockSize),
11971  m_Algorithm(algorithm),
11972  m_HasEmptyBlock(false),
11973  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11974  m_NextBlockId(0)
11975 {
11976 }
11977 
11978 VmaBlockVector::~VmaBlockVector()
11979 {
11980  for(size_t i = m_Blocks.size(); i--; )
11981  {
11982  m_Blocks[i]->Destroy(m_hAllocator);
11983  vma_delete(m_hAllocator, m_Blocks[i]);
11984  }
11985 }
11986 
11987 VkResult VmaBlockVector::CreateMinBlocks()
11988 {
11989  for(size_t i = 0; i < m_MinBlockCount; ++i)
11990  {
11991  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11992  if(res != VK_SUCCESS)
11993  {
11994  return res;
11995  }
11996  }
11997  return VK_SUCCESS;
11998 }
11999 
12000 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12001 {
12002  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12003 
12004  const size_t blockCount = m_Blocks.size();
12005 
12006  pStats->size = 0;
12007  pStats->unusedSize = 0;
12008  pStats->allocationCount = 0;
12009  pStats->unusedRangeCount = 0;
12010  pStats->unusedRangeSizeMax = 0;
12011  pStats->blockCount = blockCount;
12012 
12013  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12014  {
12015  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12016  VMA_ASSERT(pBlock);
12017  VMA_HEAVY_ASSERT(pBlock->Validate());
12018  pBlock->m_pMetadata->AddPoolStats(*pStats);
12019  }
12020 }
12021 
12022 bool VmaBlockVector::IsEmpty()
12023 {
12024  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12025  return m_Blocks.empty();
12026 }
12027 
12028 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12029 {
12030  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12031  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12032  (VMA_DEBUG_MARGIN > 0) &&
12033  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12034  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12035 }
12036 
12037 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12038 
12039 VkResult VmaBlockVector::Allocate(
12040  uint32_t currentFrameIndex,
12041  VkDeviceSize size,
12042  VkDeviceSize alignment,
12043  const VmaAllocationCreateInfo& createInfo,
12044  VmaSuballocationType suballocType,
12045  size_t allocationCount,
12046  VmaAllocation* pAllocations)
12047 {
12048  size_t allocIndex;
12049  VkResult res = VK_SUCCESS;
12050 
12051  if(IsCorruptionDetectionEnabled())
12052  {
12053  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12054  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12055  }
12056 
12057  {
12058  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12059  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12060  {
12061  res = AllocatePage(
12062  currentFrameIndex,
12063  size,
12064  alignment,
12065  createInfo,
12066  suballocType,
12067  pAllocations + allocIndex);
12068  if(res != VK_SUCCESS)
12069  {
12070  break;
12071  }
12072  }
12073  }
12074 
12075  if(res != VK_SUCCESS)
12076  {
12077  // Free all already created allocations.
12078  while(allocIndex--)
12079  {
12080  Free(pAllocations[allocIndex]);
12081  }
12082  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12083  }
12084 
12085  return res;
12086 }
12087 
12088 VkResult VmaBlockVector::AllocatePage(
12089  uint32_t currentFrameIndex,
12090  VkDeviceSize size,
12091  VkDeviceSize alignment,
12092  const VmaAllocationCreateInfo& createInfo,
12093  VmaSuballocationType suballocType,
12094  VmaAllocation* pAllocation)
12095 {
12096  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12097  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12098  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12099  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12100 
12101  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
12102  VkDeviceSize freeMemory;
12103  {
12104  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12105  VmaBudget heapBudget = {};
12106  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12107  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12108  }
12109 
12110  const bool canFallbackToDedicated = !IsCustomPool();
12111  const bool canCreateNewBlock =
12112  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12113  (m_Blocks.size() < m_MaxBlockCount) &&
12114  (freeMemory >= size || !canFallbackToDedicated);
12115  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12116 
12117  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12118  // Which in turn is available only when maxBlockCount = 1.
12119  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12120  {
12121  canMakeOtherLost = false;
12122  }
12123 
12124  // Upper address can only be used with linear allocator and within single memory block.
12125  if(isUpperAddress &&
12126  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12127  {
12128  return VK_ERROR_FEATURE_NOT_PRESENT;
12129  }
12130 
12131  // Validate strategy.
12132  switch(strategy)
12133  {
12134  case 0:
12136  break;
12140  break;
12141  default:
12142  return VK_ERROR_FEATURE_NOT_PRESENT;
12143  }
12144 
12145  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12146  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12147  {
12148  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12149  }
12150 
12151  /*
12152  Under certain condition, this whole section can be skipped for optimization, so
12153  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12154  e.g. for custom pools with linear algorithm.
12155  */
12156  if(!canMakeOtherLost || canCreateNewBlock)
12157  {
12158  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12159  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12161 
12162  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12163  {
12164  // Use only last block.
12165  if(!m_Blocks.empty())
12166  {
12167  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12168  VMA_ASSERT(pCurrBlock);
12169  VkResult res = AllocateFromBlock(
12170  pCurrBlock,
12171  currentFrameIndex,
12172  size,
12173  alignment,
12174  allocFlagsCopy,
12175  createInfo.pUserData,
12176  suballocType,
12177  strategy,
12178  pAllocation);
12179  if(res == VK_SUCCESS)
12180  {
12181  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12182  return VK_SUCCESS;
12183  }
12184  }
12185  }
12186  else
12187  {
12189  {
12190  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12191  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12192  {
12193  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12194  VMA_ASSERT(pCurrBlock);
12195  VkResult res = AllocateFromBlock(
12196  pCurrBlock,
12197  currentFrameIndex,
12198  size,
12199  alignment,
12200  allocFlagsCopy,
12201  createInfo.pUserData,
12202  suballocType,
12203  strategy,
12204  pAllocation);
12205  if(res == VK_SUCCESS)
12206  {
12207  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12208  return VK_SUCCESS;
12209  }
12210  }
12211  }
12212  else // WORST_FIT, FIRST_FIT
12213  {
12214  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12215  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12216  {
12217  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12218  VMA_ASSERT(pCurrBlock);
12219  VkResult res = AllocateFromBlock(
12220  pCurrBlock,
12221  currentFrameIndex,
12222  size,
12223  alignment,
12224  allocFlagsCopy,
12225  createInfo.pUserData,
12226  suballocType,
12227  strategy,
12228  pAllocation);
12229  if(res == VK_SUCCESS)
12230  {
12231  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12232  return VK_SUCCESS;
12233  }
12234  }
12235  }
12236  }
12237 
12238  // 2. Try to create new block.
12239  if(canCreateNewBlock)
12240  {
12241  // Calculate optimal size for new block.
12242  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12243  uint32_t newBlockSizeShift = 0;
12244  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12245 
12246  if(!m_ExplicitBlockSize)
12247  {
12248  // Allocate 1/8, 1/4, 1/2 as first blocks.
12249  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12250  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12251  {
12252  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12253  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12254  {
12255  newBlockSize = smallerNewBlockSize;
12256  ++newBlockSizeShift;
12257  }
12258  else
12259  {
12260  break;
12261  }
12262  }
12263  }
12264 
12265  size_t newBlockIndex = 0;
12266  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12267  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12268  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12269  if(!m_ExplicitBlockSize)
12270  {
12271  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12272  {
12273  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12274  if(smallerNewBlockSize >= size)
12275  {
12276  newBlockSize = smallerNewBlockSize;
12277  ++newBlockSizeShift;
12278  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12279  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12280  }
12281  else
12282  {
12283  break;
12284  }
12285  }
12286  }
12287 
12288  if(res == VK_SUCCESS)
12289  {
12290  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12291  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12292 
12293  res = AllocateFromBlock(
12294  pBlock,
12295  currentFrameIndex,
12296  size,
12297  alignment,
12298  allocFlagsCopy,
12299  createInfo.pUserData,
12300  suballocType,
12301  strategy,
12302  pAllocation);
12303  if(res == VK_SUCCESS)
12304  {
12305  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12306  return VK_SUCCESS;
12307  }
12308  else
12309  {
12310  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12311  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12312  }
12313  }
12314  }
12315  }
12316 
12317  // 3. Try to allocate from existing blocks with making other allocations lost.
12318  if(canMakeOtherLost)
12319  {
12320  uint32_t tryIndex = 0;
12321  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12322  {
12323  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12324  VmaAllocationRequest bestRequest = {};
12325  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12326 
12327  // 1. Search existing allocations.
12329  {
12330  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12331  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12332  {
12333  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12334  VMA_ASSERT(pCurrBlock);
12335  VmaAllocationRequest currRequest = {};
12336  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12337  currentFrameIndex,
12338  m_FrameInUseCount,
12339  m_BufferImageGranularity,
12340  size,
12341  alignment,
12342  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12343  suballocType,
12344  canMakeOtherLost,
12345  strategy,
12346  &currRequest))
12347  {
12348  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12349  if(pBestRequestBlock == VMA_NULL ||
12350  currRequestCost < bestRequestCost)
12351  {
12352  pBestRequestBlock = pCurrBlock;
12353  bestRequest = currRequest;
12354  bestRequestCost = currRequestCost;
12355 
12356  if(bestRequestCost == 0)
12357  {
12358  break;
12359  }
12360  }
12361  }
12362  }
12363  }
12364  else // WORST_FIT, FIRST_FIT
12365  {
12366  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12367  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12368  {
12369  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12370  VMA_ASSERT(pCurrBlock);
12371  VmaAllocationRequest currRequest = {};
12372  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12373  currentFrameIndex,
12374  m_FrameInUseCount,
12375  m_BufferImageGranularity,
12376  size,
12377  alignment,
12378  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12379  suballocType,
12380  canMakeOtherLost,
12381  strategy,
12382  &currRequest))
12383  {
12384  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12385  if(pBestRequestBlock == VMA_NULL ||
12386  currRequestCost < bestRequestCost ||
12388  {
12389  pBestRequestBlock = pCurrBlock;
12390  bestRequest = currRequest;
12391  bestRequestCost = currRequestCost;
12392 
12393  if(bestRequestCost == 0 ||
12395  {
12396  break;
12397  }
12398  }
12399  }
12400  }
12401  }
12402 
12403  if(pBestRequestBlock != VMA_NULL)
12404  {
12405  if(mapped)
12406  {
12407  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12408  if(res != VK_SUCCESS)
12409  {
12410  return res;
12411  }
12412  }
12413 
12414  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12415  currentFrameIndex,
12416  m_FrameInUseCount,
12417  &bestRequest))
12418  {
12419  // Allocate from this pBlock.
12420  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12421  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12422  UpdateHasEmptyBlock();
12423  (*pAllocation)->InitBlockAllocation(
12424  pBestRequestBlock,
12425  bestRequest.offset,
12426  alignment,
12427  size,
12428  m_MemoryTypeIndex,
12429  suballocType,
12430  mapped,
12431  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12432  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12433  VMA_DEBUG_LOG(" Returned from existing block");
12434  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12435  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12436  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12437  {
12438  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12439  }
12440  if(IsCorruptionDetectionEnabled())
12441  {
12442  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12443  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12444  }
12445  return VK_SUCCESS;
12446  }
12447  // else: Some allocations must have been touched while we are here. Next try.
12448  }
12449  else
12450  {
12451  // Could not find place in any of the blocks - break outer loop.
12452  break;
12453  }
12454  }
12455  /* Maximum number of tries exceeded - a very unlike event when many other
12456  threads are simultaneously touching allocations making it impossible to make
12457  lost at the same time as we try to allocate. */
12458  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12459  {
12460  return VK_ERROR_TOO_MANY_OBJECTS;
12461  }
12462  }
12463 
12464  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12465 }
12466 
12467 void VmaBlockVector::Free(
12468  const VmaAllocation hAllocation)
12469 {
12470  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12471 
12472  bool budgetExceeded = false;
12473  {
12474  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12475  VmaBudget heapBudget = {};
12476  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12477  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12478  }
12479 
12480  // Scope for lock.
12481  {
12482  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12483 
12484  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12485 
12486  if(IsCorruptionDetectionEnabled())
12487  {
12488  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12489  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12490  }
12491 
12492  if(hAllocation->IsPersistentMap())
12493  {
12494  pBlock->Unmap(m_hAllocator, 1);
12495  }
12496 
12497  pBlock->m_pMetadata->Free(hAllocation);
12498  VMA_HEAVY_ASSERT(pBlock->Validate());
12499 
12500  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12501 
12502  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12503  // pBlock became empty after this deallocation.
12504  if(pBlock->m_pMetadata->IsEmpty())
12505  {
12506  // Already has empty block. We don't want to have two, so delete this one.
12507  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12508  {
12509  pBlockToDelete = pBlock;
12510  Remove(pBlock);
12511  }
12512  // else: We now have an empty block - leave it.
12513  }
12514  // pBlock didn't become empty, but we have another empty block - find and free that one.
12515  // (This is optional, heuristics.)
12516  else if(m_HasEmptyBlock && canDeleteBlock)
12517  {
12518  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12519  if(pLastBlock->m_pMetadata->IsEmpty())
12520  {
12521  pBlockToDelete = pLastBlock;
12522  m_Blocks.pop_back();
12523  }
12524  }
12525 
12526  UpdateHasEmptyBlock();
12527  IncrementallySortBlocks();
12528  }
12529 
12530  // Destruction of a free block. Deferred until this point, outside of mutex
12531  // lock, for performance reason.
12532  if(pBlockToDelete != VMA_NULL)
12533  {
12534  VMA_DEBUG_LOG(" Deleted empty block");
12535  pBlockToDelete->Destroy(m_hAllocator);
12536  vma_delete(m_hAllocator, pBlockToDelete);
12537  }
12538 }
12539 
12540 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12541 {
12542  VkDeviceSize result = 0;
12543  for(size_t i = m_Blocks.size(); i--; )
12544  {
12545  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12546  if(result >= m_PreferredBlockSize)
12547  {
12548  break;
12549  }
12550  }
12551  return result;
12552 }
12553 
12554 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12555 {
12556  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12557  {
12558  if(m_Blocks[blockIndex] == pBlock)
12559  {
12560  VmaVectorRemove(m_Blocks, blockIndex);
12561  return;
12562  }
12563  }
12564  VMA_ASSERT(0);
12565 }
12566 
12567 void VmaBlockVector::IncrementallySortBlocks()
12568 {
12569  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12570  {
12571  // Bubble sort only until first swap.
12572  for(size_t i = 1; i < m_Blocks.size(); ++i)
12573  {
12574  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12575  {
12576  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12577  return;
12578  }
12579  }
12580  }
12581 }
12582 
12583 VkResult VmaBlockVector::AllocateFromBlock(
12584  VmaDeviceMemoryBlock* pBlock,
12585  uint32_t currentFrameIndex,
12586  VkDeviceSize size,
12587  VkDeviceSize alignment,
12588  VmaAllocationCreateFlags allocFlags,
12589  void* pUserData,
12590  VmaSuballocationType suballocType,
12591  uint32_t strategy,
12592  VmaAllocation* pAllocation)
12593 {
12594  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12595  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12596  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12597  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12598 
12599  VmaAllocationRequest currRequest = {};
12600  if(pBlock->m_pMetadata->CreateAllocationRequest(
12601  currentFrameIndex,
12602  m_FrameInUseCount,
12603  m_BufferImageGranularity,
12604  size,
12605  alignment,
12606  isUpperAddress,
12607  suballocType,
12608  false, // canMakeOtherLost
12609  strategy,
12610  &currRequest))
12611  {
12612  // Allocate from pCurrBlock.
12613  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12614 
12615  if(mapped)
12616  {
12617  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12618  if(res != VK_SUCCESS)
12619  {
12620  return res;
12621  }
12622  }
12623 
12624  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
12625  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12626  UpdateHasEmptyBlock();
12627  (*pAllocation)->InitBlockAllocation(
12628  pBlock,
12629  currRequest.offset,
12630  alignment,
12631  size,
12632  m_MemoryTypeIndex,
12633  suballocType,
12634  mapped,
12635  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12636  VMA_HEAVY_ASSERT(pBlock->Validate());
12637  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12638  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12639  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12640  {
12641  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12642  }
12643  if(IsCorruptionDetectionEnabled())
12644  {
12645  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12646  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12647  }
12648  return VK_SUCCESS;
12649  }
12650  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12651 }
12652 
12653 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12654 {
12655  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12656  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12657  allocInfo.allocationSize = blockSize;
12658  VkDeviceMemory mem = VK_NULL_HANDLE;
12659  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12660  if(res < 0)
12661  {
12662  return res;
12663  }
12664 
12665  // New VkDeviceMemory successfully created.
12666 
12667  // Create new Allocation for it.
12668  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12669  pBlock->Init(
12670  m_hAllocator,
12671  m_hParentPool,
12672  m_MemoryTypeIndex,
12673  mem,
12674  allocInfo.allocationSize,
12675  m_NextBlockId++,
12676  m_Algorithm);
12677 
12678  m_Blocks.push_back(pBlock);
12679  if(pNewBlockIndex != VMA_NULL)
12680  {
12681  *pNewBlockIndex = m_Blocks.size() - 1;
12682  }
12683 
12684  return VK_SUCCESS;
12685 }
12686 
12687 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12688  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12689  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12690 {
12691  const size_t blockCount = m_Blocks.size();
12692  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12693 
12694  enum BLOCK_FLAG
12695  {
12696  BLOCK_FLAG_USED = 0x00000001,
12697  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12698  };
12699 
12700  struct BlockInfo
12701  {
12702  uint32_t flags;
12703  void* pMappedData;
12704  };
12705  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12706  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12707  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12708 
12709  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12710  const size_t moveCount = moves.size();
12711  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12712  {
12713  const VmaDefragmentationMove& move = moves[moveIndex];
12714  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12715  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12716  }
12717 
12718  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12719 
12720  // Go over all blocks. Get mapped pointer or map if necessary.
12721  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12722  {
12723  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12724  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12725  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12726  {
12727  currBlockInfo.pMappedData = pBlock->GetMappedData();
12728  // It is not originally mapped - map it.
12729  if(currBlockInfo.pMappedData == VMA_NULL)
12730  {
12731  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12732  if(pDefragCtx->res == VK_SUCCESS)
12733  {
12734  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12735  }
12736  }
12737  }
12738  }
12739 
12740  // Go over all moves. Do actual data transfer.
12741  if(pDefragCtx->res == VK_SUCCESS)
12742  {
12743  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12744  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12745 
12746  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12747  {
12748  const VmaDefragmentationMove& move = moves[moveIndex];
12749 
12750  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12751  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12752 
12753  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12754 
12755  // Invalidate source.
12756  if(isNonCoherent)
12757  {
12758  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12759  memRange.memory = pSrcBlock->GetDeviceMemory();
12760  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12761  memRange.size = VMA_MIN(
12762  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12763  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12764  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12765  }
12766 
12767  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12768  memmove(
12769  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12770  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12771  static_cast<size_t>(move.size));
12772 
12773  if(IsCorruptionDetectionEnabled())
12774  {
12775  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12776  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12777  }
12778 
12779  // Flush destination.
12780  if(isNonCoherent)
12781  {
12782  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12783  memRange.memory = pDstBlock->GetDeviceMemory();
12784  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12785  memRange.size = VMA_MIN(
12786  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12787  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12788  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12789  }
12790  }
12791  }
12792 
12793  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12794  // Regardless of pCtx->res == VK_SUCCESS.
12795  for(size_t blockIndex = blockCount; blockIndex--; )
12796  {
12797  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12798  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12799  {
12800  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12801  pBlock->Unmap(m_hAllocator, 1);
12802  }
12803  }
12804 }
12805 
12806 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12807  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12808  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12809  VkCommandBuffer commandBuffer)
12810 {
12811  const size_t blockCount = m_Blocks.size();
12812 
12813  pDefragCtx->blockContexts.resize(blockCount);
12814  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12815 
12816  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12817  const size_t moveCount = moves.size();
12818  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12819  {
12820  const VmaDefragmentationMove& move = moves[moveIndex];
12821 
12822  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
12823  {
12824  // Old school move still require us to map the whole block
12825  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12826  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12827  }
12828  }
12829 
12830  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12831 
12832  // Go over all blocks. Create and bind buffer for whole block if necessary.
12833  {
12834  VkBufferCreateInfo bufCreateInfo;
12835  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12836 
12837  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12838  {
12839  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12840  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12841  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12842  {
12843  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12844  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12845  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12846  if(pDefragCtx->res == VK_SUCCESS)
12847  {
12848  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12849  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12850  }
12851  }
12852  }
12853  }
12854 
12855  // Go over all moves. Post data transfer commands to command buffer.
12856  if(pDefragCtx->res == VK_SUCCESS)
12857  {
12858  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12859  {
12860  const VmaDefragmentationMove& move = moves[moveIndex];
12861 
12862  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12863  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12864 
12865  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12866 
12867  VkBufferCopy region = {
12868  move.srcOffset,
12869  move.dstOffset,
12870  move.size };
12871  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12872  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12873  }
12874  }
12875 
12876  // Save buffers to defrag context for later destruction.
12877  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12878  {
12879  pDefragCtx->res = VK_NOT_READY;
12880  }
12881 }
12882 
12883 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12884 {
12885  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12886  {
12887  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12888  if(pBlock->m_pMetadata->IsEmpty())
12889  {
12890  if(m_Blocks.size() > m_MinBlockCount)
12891  {
12892  if(pDefragmentationStats != VMA_NULL)
12893  {
12894  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12895  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12896  }
12897 
12898  VmaVectorRemove(m_Blocks, blockIndex);
12899  pBlock->Destroy(m_hAllocator);
12900  vma_delete(m_hAllocator, pBlock);
12901  }
12902  else
12903  {
12904  break;
12905  }
12906  }
12907  }
12908  UpdateHasEmptyBlock();
12909 }
12910 
12911 void VmaBlockVector::UpdateHasEmptyBlock()
12912 {
12913  m_HasEmptyBlock = false;
12914  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12915  {
12916  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12917  if(pBlock->m_pMetadata->IsEmpty())
12918  {
12919  m_HasEmptyBlock = true;
12920  break;
12921  }
12922  }
12923 }
12924 
12925 #if VMA_STATS_STRING_ENABLED
12926 
12927 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12928 {
12929  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12930 
12931  json.BeginObject();
12932 
12933  if(IsCustomPool())
12934  {
12935  const char* poolName = m_hParentPool->GetName();
12936  if(poolName != VMA_NULL && poolName[0] != '\0')
12937  {
12938  json.WriteString("Name");
12939  json.WriteString(poolName);
12940  }
12941 
12942  json.WriteString("MemoryTypeIndex");
12943  json.WriteNumber(m_MemoryTypeIndex);
12944 
12945  json.WriteString("BlockSize");
12946  json.WriteNumber(m_PreferredBlockSize);
12947 
12948  json.WriteString("BlockCount");
12949  json.BeginObject(true);
12950  if(m_MinBlockCount > 0)
12951  {
12952  json.WriteString("Min");
12953  json.WriteNumber((uint64_t)m_MinBlockCount);
12954  }
12955  if(m_MaxBlockCount < SIZE_MAX)
12956  {
12957  json.WriteString("Max");
12958  json.WriteNumber((uint64_t)m_MaxBlockCount);
12959  }
12960  json.WriteString("Cur");
12961  json.WriteNumber((uint64_t)m_Blocks.size());
12962  json.EndObject();
12963 
12964  if(m_FrameInUseCount > 0)
12965  {
12966  json.WriteString("FrameInUseCount");
12967  json.WriteNumber(m_FrameInUseCount);
12968  }
12969 
12970  if(m_Algorithm != 0)
12971  {
12972  json.WriteString("Algorithm");
12973  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12974  }
12975  }
12976  else
12977  {
12978  json.WriteString("PreferredBlockSize");
12979  json.WriteNumber(m_PreferredBlockSize);
12980  }
12981 
12982  json.WriteString("Blocks");
12983  json.BeginObject();
12984  for(size_t i = 0; i < m_Blocks.size(); ++i)
12985  {
12986  json.BeginString();
12987  json.ContinueString(m_Blocks[i]->GetId());
12988  json.EndString();
12989 
12990  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12991  }
12992  json.EndObject();
12993 
12994  json.EndObject();
12995 }
12996 
12997 #endif // #if VMA_STATS_STRING_ENABLED
12998 
12999 void VmaBlockVector::Defragment(
13000  class VmaBlockVectorDefragmentationContext* pCtx,
13002  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13003  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13004  VkCommandBuffer commandBuffer)
13005 {
13006  pCtx->res = VK_SUCCESS;
13007 
13008  const VkMemoryPropertyFlags memPropFlags =
13009  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13010  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13011 
13012  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13013  isHostVisible;
13014  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13015  !IsCorruptionDetectionEnabled() &&
13016  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13017 
13018  // There are options to defragment this memory type.
13019  if(canDefragmentOnCpu || canDefragmentOnGpu)
13020  {
13021  bool defragmentOnGpu;
13022  // There is only one option to defragment this memory type.
13023  if(canDefragmentOnGpu != canDefragmentOnCpu)
13024  {
13025  defragmentOnGpu = canDefragmentOnGpu;
13026  }
13027  // Both options are available: Heuristics to choose the best one.
13028  else
13029  {
13030  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13031  m_hAllocator->IsIntegratedGpu();
13032  }
13033 
13034  bool overlappingMoveSupported = !defragmentOnGpu;
13035 
13036  if(m_hAllocator->m_UseMutex)
13037  {
13039  {
13040  if(!m_Mutex.TryLockWrite())
13041  {
13042  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13043  return;
13044  }
13045  }
13046  else
13047  {
13048  m_Mutex.LockWrite();
13049  pCtx->mutexLocked = true;
13050  }
13051  }
13052 
13053  pCtx->Begin(overlappingMoveSupported, flags);
13054 
13055  // Defragment.
13056 
13057  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13058  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13059  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13060 
13061  // Accumulate statistics.
13062  if(pStats != VMA_NULL)
13063  {
13064  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13065  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13066  pStats->bytesMoved += bytesMoved;
13067  pStats->allocationsMoved += allocationsMoved;
13068  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13069  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13070  if(defragmentOnGpu)
13071  {
13072  maxGpuBytesToMove -= bytesMoved;
13073  maxGpuAllocationsToMove -= allocationsMoved;
13074  }
13075  else
13076  {
13077  maxCpuBytesToMove -= bytesMoved;
13078  maxCpuAllocationsToMove -= allocationsMoved;
13079  }
13080  }
13081 
13083  {
13084  if(m_hAllocator->m_UseMutex)
13085  m_Mutex.UnlockWrite();
13086 
13087  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13088  pCtx->res = VK_NOT_READY;
13089 
13090  return;
13091  }
13092 
13093  if(pCtx->res >= VK_SUCCESS)
13094  {
13095  if(defragmentOnGpu)
13096  {
13097  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13098  }
13099  else
13100  {
13101  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13102  }
13103  }
13104  }
13105 }
13106 
13107 void VmaBlockVector::DefragmentationEnd(
13108  class VmaBlockVectorDefragmentationContext* pCtx,
13109  VmaDefragmentationStats* pStats)
13110 {
13111  // Destroy buffers.
13112  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
13113  {
13114  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
13115  if(blockCtx.hBuffer)
13116  {
13117  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
13118  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13119  }
13120  }
13121 
13122  if(pCtx->res >= VK_SUCCESS)
13123  {
13124  FreeEmptyBlocks(pStats);
13125  }
13126 
13127  if(pCtx->mutexLocked)
13128  {
13129  VMA_ASSERT(m_hAllocator->m_UseMutex);
13130  m_Mutex.UnlockWrite();
13131  }
13132 }
13133 
13134 uint32_t VmaBlockVector::ProcessDefragmentations(
13135  class VmaBlockVectorDefragmentationContext *pCtx,
13136  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13137 {
13138  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13139 
13140  const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13141 
13142  for(uint32_t i = 0; i < moveCount; ++ i)
13143  {
13144  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13145 
13146  pMove->allocation = move.hAllocation;
13147  pMove->memory = move.pDstBlock->GetDeviceMemory();
13148  pMove->offset = move.dstOffset;
13149 
13150  ++ pMove;
13151  }
13152 
13153  pCtx->defragmentationMovesProcessed += moveCount;
13154 
13155  return moveCount;
13156 }
13157 
13158 void VmaBlockVector::CommitDefragmentations(
13159  class VmaBlockVectorDefragmentationContext *pCtx,
13160  VmaDefragmentationStats* pStats)
13161 {
13162  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13163 
13164  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13165  {
13166  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13167 
13168  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13169  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13170  }
13171 
13172  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13173  FreeEmptyBlocks(pStats);
13174 }
13175 
13176 size_t VmaBlockVector::CalcAllocationCount() const
13177 {
13178  size_t result = 0;
13179  for(size_t i = 0; i < m_Blocks.size(); ++i)
13180  {
13181  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13182  }
13183  return result;
13184 }
13185 
13186 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13187 {
13188  if(m_BufferImageGranularity == 1)
13189  {
13190  return false;
13191  }
13192  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13193  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13194  {
13195  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13196  VMA_ASSERT(m_Algorithm == 0);
13197  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13198  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13199  {
13200  return true;
13201  }
13202  }
13203  return false;
13204 }
13205 
13206 void VmaBlockVector::MakePoolAllocationsLost(
13207  uint32_t currentFrameIndex,
13208  size_t* pLostAllocationCount)
13209 {
13210  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13211  size_t lostAllocationCount = 0;
13212  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13213  {
13214  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13215  VMA_ASSERT(pBlock);
13216  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13217  }
13218  if(pLostAllocationCount != VMA_NULL)
13219  {
13220  *pLostAllocationCount = lostAllocationCount;
13221  }
13222 }
13223 
13224 VkResult VmaBlockVector::CheckCorruption()
13225 {
13226  if(!IsCorruptionDetectionEnabled())
13227  {
13228  return VK_ERROR_FEATURE_NOT_PRESENT;
13229  }
13230 
13231  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13232  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13233  {
13234  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13235  VMA_ASSERT(pBlock);
13236  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13237  if(res != VK_SUCCESS)
13238  {
13239  return res;
13240  }
13241  }
13242  return VK_SUCCESS;
13243 }
13244 
13245 void VmaBlockVector::AddStats(VmaStats* pStats)
13246 {
13247  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13248  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13249 
13250  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13251 
13252  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13253  {
13254  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13255  VMA_ASSERT(pBlock);
13256  VMA_HEAVY_ASSERT(pBlock->Validate());
13257  VmaStatInfo allocationStatInfo;
13258  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13259  VmaAddStatInfo(pStats->total, allocationStatInfo);
13260  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13261  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13262  }
13263 }
13264 
13266 // VmaDefragmentationAlgorithm_Generic members definition
13267 
13268 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13269  VmaAllocator hAllocator,
13270  VmaBlockVector* pBlockVector,
13271  uint32_t currentFrameIndex,
13272  bool overlappingMoveSupported) :
13273  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13274  m_AllocationCount(0),
13275  m_AllAllocations(false),
13276  m_BytesMoved(0),
13277  m_AllocationsMoved(0),
13278  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13279 {
13280  // Create block info for each block.
13281  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13282  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13283  {
13284  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13285  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13286  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13287  m_Blocks.push_back(pBlockInfo);
13288  }
13289 
13290  // Sort them by m_pBlock pointer value.
13291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13292 }
13293 
13294 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13295 {
13296  for(size_t i = m_Blocks.size(); i--; )
13297  {
13298  vma_delete(m_hAllocator, m_Blocks[i]);
13299  }
13300 }
13301 
13302 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13303 {
13304  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13305  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13306  {
13307  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13308  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13309  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13310  {
13311  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13312  (*it)->m_Allocations.push_back(allocInfo);
13313  }
13314  else
13315  {
13316  VMA_ASSERT(0);
13317  }
13318 
13319  ++m_AllocationCount;
13320  }
13321 }
13322 
13323 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13324  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13325  VkDeviceSize maxBytesToMove,
13326  uint32_t maxAllocationsToMove,
13327  bool freeOldAllocations)
13328 {
13329  if(m_Blocks.empty())
13330  {
13331  return VK_SUCCESS;
13332  }
13333 
13334  // This is a choice based on research.
13335  // Option 1:
13336  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13337  // Option 2:
13338  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13339  // Option 3:
13340  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13341 
13342  size_t srcBlockMinIndex = 0;
13343  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13344  /*
13345  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13346  {
13347  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13348  if(blocksWithNonMovableCount > 0)
13349  {
13350  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13351  }
13352  }
13353  */
13354 
13355  size_t srcBlockIndex = m_Blocks.size() - 1;
13356  size_t srcAllocIndex = SIZE_MAX;
13357  for(;;)
13358  {
13359  // 1. Find next allocation to move.
13360  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13361  // 1.2. Then start from last to first m_Allocations.
13362  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13363  {
13364  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13365  {
13366  // Finished: no more allocations to process.
13367  if(srcBlockIndex == srcBlockMinIndex)
13368  {
13369  return VK_SUCCESS;
13370  }
13371  else
13372  {
13373  --srcBlockIndex;
13374  srcAllocIndex = SIZE_MAX;
13375  }
13376  }
13377  else
13378  {
13379  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13380  }
13381  }
13382 
13383  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13384  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13385 
13386  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13387  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13388  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13389  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13390 
13391  // 2. Try to find new place for this allocation in preceding or current block.
13392  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13393  {
13394  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13395  VmaAllocationRequest dstAllocRequest;
13396  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13397  m_CurrentFrameIndex,
13398  m_pBlockVector->GetFrameInUseCount(),
13399  m_pBlockVector->GetBufferImageGranularity(),
13400  size,
13401  alignment,
13402  false, // upperAddress
13403  suballocType,
13404  false, // canMakeOtherLost
13405  strategy,
13406  &dstAllocRequest) &&
13407  MoveMakesSense(
13408  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13409  {
13410  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13411 
13412  // Reached limit on number of allocations or bytes to move.
13413  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13414  (m_BytesMoved + size > maxBytesToMove))
13415  {
13416  return VK_SUCCESS;
13417  }
13418 
13419  VmaDefragmentationMove move = {};
13420  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13421  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13422  move.srcOffset = srcOffset;
13423  move.dstOffset = dstAllocRequest.offset;
13424  move.size = size;
13425  move.hAllocation = allocInfo.m_hAllocation;
13426  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
13427  move.pDstBlock = pDstBlockInfo->m_pBlock;
13428 
13429  moves.push_back(move);
13430 
13431  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13432  dstAllocRequest,
13433  suballocType,
13434  size,
13435  allocInfo.m_hAllocation);
13436 
13437  if(freeOldAllocations)
13438  {
13439  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13440  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13441  }
13442 
13443  if(allocInfo.m_pChanged != VMA_NULL)
13444  {
13445  *allocInfo.m_pChanged = VK_TRUE;
13446  }
13447 
13448  ++m_AllocationsMoved;
13449  m_BytesMoved += size;
13450 
13451  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13452 
13453  break;
13454  }
13455  }
13456 
13457  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13458 
13459  if(srcAllocIndex > 0)
13460  {
13461  --srcAllocIndex;
13462  }
13463  else
13464  {
13465  if(srcBlockIndex > 0)
13466  {
13467  --srcBlockIndex;
13468  srcAllocIndex = SIZE_MAX;
13469  }
13470  else
13471  {
13472  return VK_SUCCESS;
13473  }
13474  }
13475  }
13476 }
13477 
13478 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13479 {
13480  size_t result = 0;
13481  for(size_t i = 0; i < m_Blocks.size(); ++i)
13482  {
13483  if(m_Blocks[i]->m_HasNonMovableAllocations)
13484  {
13485  ++result;
13486  }
13487  }
13488  return result;
13489 }
13490 
13491 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13492  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13493  VkDeviceSize maxBytesToMove,
13494  uint32_t maxAllocationsToMove,
13496 {
13497  if(!m_AllAllocations && m_AllocationCount == 0)
13498  {
13499  return VK_SUCCESS;
13500  }
13501 
13502  const size_t blockCount = m_Blocks.size();
13503  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13504  {
13505  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13506 
13507  if(m_AllAllocations)
13508  {
13509  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13510  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13511  it != pMetadata->m_Suballocations.end();
13512  ++it)
13513  {
13514  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13515  {
13516  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13517  pBlockInfo->m_Allocations.push_back(allocInfo);
13518  }
13519  }
13520  }
13521 
13522  pBlockInfo->CalcHasNonMovableAllocations();
13523 
13524  // This is a choice based on research.
13525  // Option 1:
13526  pBlockInfo->SortAllocationsByOffsetDescending();
13527  // Option 2:
13528  //pBlockInfo->SortAllocationsBySizeDescending();
13529  }
13530 
13531  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13532  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13533 
13534  // This is a choice based on research.
13535  const uint32_t roundCount = 2;
13536 
13537  // Execute defragmentation rounds (the main part).
13538  VkResult result = VK_SUCCESS;
13539  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13540  {
13541  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
13542  }
13543 
13544  return result;
13545 }
13546 
13547 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13548  size_t dstBlockIndex, VkDeviceSize dstOffset,
13549  size_t srcBlockIndex, VkDeviceSize srcOffset)
13550 {
13551  if(dstBlockIndex < srcBlockIndex)
13552  {
13553  return true;
13554  }
13555  if(dstBlockIndex > srcBlockIndex)
13556  {
13557  return false;
13558  }
13559  if(dstOffset < srcOffset)
13560  {
13561  return true;
13562  }
13563  return false;
13564 }
13565 
13567 // VmaDefragmentationAlgorithm_Fast
13568 
13569 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13570  VmaAllocator hAllocator,
13571  VmaBlockVector* pBlockVector,
13572  uint32_t currentFrameIndex,
13573  bool overlappingMoveSupported) :
13574  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13575  m_OverlappingMoveSupported(overlappingMoveSupported),
13576  m_AllocationCount(0),
13577  m_AllAllocations(false),
13578  m_BytesMoved(0),
13579  m_AllocationsMoved(0),
13580  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13581 {
13582  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13583 
13584 }
13585 
13586 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13587 {
13588 }
13589 
13590 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13591  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13592  VkDeviceSize maxBytesToMove,
13593  uint32_t maxAllocationsToMove,
13595 {
13596  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13597 
13598  const size_t blockCount = m_pBlockVector->GetBlockCount();
13599  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13600  {
13601  return VK_SUCCESS;
13602  }
13603 
13604  PreprocessMetadata();
13605 
13606  // Sort blocks in order from most destination.
13607 
13608  m_BlockInfos.resize(blockCount);
13609  for(size_t i = 0; i < blockCount; ++i)
13610  {
13611  m_BlockInfos[i].origBlockIndex = i;
13612  }
13613 
13614  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13615  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13616  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13617  });
13618 
13619  // THE MAIN ALGORITHM
13620 
13621  FreeSpaceDatabase freeSpaceDb;
13622 
13623  size_t dstBlockInfoIndex = 0;
13624  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13625  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13626  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13627  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13628  VkDeviceSize dstOffset = 0;
13629 
13630  bool end = false;
13631  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13632  {
13633  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13634  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13635  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13636  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13637  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13638  {
13639  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13640  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13641  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13642  if(m_AllocationsMoved == maxAllocationsToMove ||
13643  m_BytesMoved + srcAllocSize > maxBytesToMove)
13644  {
13645  end = true;
13646  break;
13647  }
13648  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13649 
13650  VmaDefragmentationMove move = {};
13651  // Try to place it in one of free spaces from the database.
13652  size_t freeSpaceInfoIndex;
13653  VkDeviceSize dstAllocOffset;
13654  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13655  freeSpaceInfoIndex, dstAllocOffset))
13656  {
13657  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13658  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13659  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13660 
13661  // Same block
13662  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13663  {
13664  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13665 
13666  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13667 
13668  VmaSuballocation suballoc = *srcSuballocIt;
13669  suballoc.offset = dstAllocOffset;
13670  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13671  m_BytesMoved += srcAllocSize;
13672  ++m_AllocationsMoved;
13673 
13674  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13675  ++nextSuballocIt;
13676  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13677  srcSuballocIt = nextSuballocIt;
13678 
13679  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13680 
13681  move.srcBlockIndex = srcOrigBlockIndex;
13682  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13683  move.srcOffset = srcAllocOffset;
13684  move.dstOffset = dstAllocOffset;
13685  move.size = srcAllocSize;
13686 
13687  moves.push_back(move);
13688  }
13689  // Different block
13690  else
13691  {
13692  // MOVE OPTION 2: Move the allocation to a different block.
13693 
13694  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13695 
13696  VmaSuballocation suballoc = *srcSuballocIt;
13697  suballoc.offset = dstAllocOffset;
13698  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13699  m_BytesMoved += srcAllocSize;
13700  ++m_AllocationsMoved;
13701 
13702  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13703  ++nextSuballocIt;
13704  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13705  srcSuballocIt = nextSuballocIt;
13706 
13707  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13708 
13709  move.srcBlockIndex = srcOrigBlockIndex;
13710  move.dstBlockIndex = freeSpaceOrigBlockIndex;
13711  move.srcOffset = srcAllocOffset;
13712  move.dstOffset = dstAllocOffset;
13713  move.size = srcAllocSize;
13714 
13715  moves.push_back(move);
13716  }
13717  }
13718  else
13719  {
13720  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13721 
13722  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13723  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13724  dstAllocOffset + srcAllocSize > dstBlockSize)
13725  {
13726  // But before that, register remaining free space at the end of dst block.
13727  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13728 
13729  ++dstBlockInfoIndex;
13730  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13731  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13732  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13733  dstBlockSize = pDstMetadata->GetSize();
13734  dstOffset = 0;
13735  dstAllocOffset = 0;
13736  }
13737 
13738  // Same block
13739  if(dstBlockInfoIndex == srcBlockInfoIndex)
13740  {
13741  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13742 
13743  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13744 
13745  bool skipOver = overlap;
13746  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13747  {
13748  // If destination and source place overlap, skip if it would move it
13749  // by only < 1/64 of its size.
13750  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13751  }
13752 
13753  if(skipOver)
13754  {
13755  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13756 
13757  dstOffset = srcAllocOffset + srcAllocSize;
13758  ++srcSuballocIt;
13759  }
13760  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13761  else
13762  {
13763  srcSuballocIt->offset = dstAllocOffset;
13764  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13765  dstOffset = dstAllocOffset + srcAllocSize;
13766  m_BytesMoved += srcAllocSize;
13767  ++m_AllocationsMoved;
13768  ++srcSuballocIt;
13769 
13770  move.srcBlockIndex = srcOrigBlockIndex;
13771  move.dstBlockIndex = dstOrigBlockIndex;
13772  move.srcOffset = srcAllocOffset;
13773  move.dstOffset = dstAllocOffset;
13774  move.size = srcAllocSize;
13775 
13776  moves.push_back(move);
13777  }
13778  }
13779  // Different block
13780  else
13781  {
13782  // MOVE OPTION 2: Move the allocation to a different block.
13783 
13784  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13785  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13786 
13787  VmaSuballocation suballoc = *srcSuballocIt;
13788  suballoc.offset = dstAllocOffset;
13789  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13790  dstOffset = dstAllocOffset + srcAllocSize;
13791  m_BytesMoved += srcAllocSize;
13792  ++m_AllocationsMoved;
13793 
13794  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13795  ++nextSuballocIt;
13796  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13797  srcSuballocIt = nextSuballocIt;
13798 
13799  pDstMetadata->m_Suballocations.push_back(suballoc);
13800 
13801  move.srcBlockIndex = srcOrigBlockIndex;
13802  move.dstBlockIndex = dstOrigBlockIndex;
13803  move.srcOffset = srcAllocOffset;
13804  move.dstOffset = dstAllocOffset;
13805  move.size = srcAllocSize;
13806 
13807  moves.push_back(move);
13808  }
13809  }
13810  }
13811  }
13812 
13813  m_BlockInfos.clear();
13814 
13815  PostprocessMetadata();
13816 
13817  return VK_SUCCESS;
13818 }
13819 
13820 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13821 {
13822  const size_t blockCount = m_pBlockVector->GetBlockCount();
13823  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13824  {
13825  VmaBlockMetadata_Generic* const pMetadata =
13826  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13827  pMetadata->m_FreeCount = 0;
13828  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13829  pMetadata->m_FreeSuballocationsBySize.clear();
13830  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13831  it != pMetadata->m_Suballocations.end(); )
13832  {
13833  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13834  {
13835  VmaSuballocationList::iterator nextIt = it;
13836  ++nextIt;
13837  pMetadata->m_Suballocations.erase(it);
13838  it = nextIt;
13839  }
13840  else
13841  {
13842  ++it;
13843  }
13844  }
13845  }
13846 }
13847 
13848 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13849 {
13850  const size_t blockCount = m_pBlockVector->GetBlockCount();
13851  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13852  {
13853  VmaBlockMetadata_Generic* const pMetadata =
13854  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13855  const VkDeviceSize blockSize = pMetadata->GetSize();
13856 
13857  // No allocations in this block - entire area is free.
13858  if(pMetadata->m_Suballocations.empty())
13859  {
13860  pMetadata->m_FreeCount = 1;
13861  //pMetadata->m_SumFreeSize is already set to blockSize.
13862  VmaSuballocation suballoc = {
13863  0, // offset
13864  blockSize, // size
13865  VMA_NULL, // hAllocation
13866  VMA_SUBALLOCATION_TYPE_FREE };
13867  pMetadata->m_Suballocations.push_back(suballoc);
13868  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13869  }
13870  // There are some allocations in this block.
13871  else
13872  {
13873  VkDeviceSize offset = 0;
13874  VmaSuballocationList::iterator it;
13875  for(it = pMetadata->m_Suballocations.begin();
13876  it != pMetadata->m_Suballocations.end();
13877  ++it)
13878  {
13879  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13880  VMA_ASSERT(it->offset >= offset);
13881 
13882  // Need to insert preceding free space.
13883  if(it->offset > offset)
13884  {
13885  ++pMetadata->m_FreeCount;
13886  const VkDeviceSize freeSize = it->offset - offset;
13887  VmaSuballocation suballoc = {
13888  offset, // offset
13889  freeSize, // size
13890  VMA_NULL, // hAllocation
13891  VMA_SUBALLOCATION_TYPE_FREE };
13892  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13893  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13894  {
13895  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13896  }
13897  }
13898 
13899  pMetadata->m_SumFreeSize -= it->size;
13900  offset = it->offset + it->size;
13901  }
13902 
13903  // Need to insert trailing free space.
13904  if(offset < blockSize)
13905  {
13906  ++pMetadata->m_FreeCount;
13907  const VkDeviceSize freeSize = blockSize - offset;
13908  VmaSuballocation suballoc = {
13909  offset, // offset
13910  freeSize, // size
13911  VMA_NULL, // hAllocation
13912  VMA_SUBALLOCATION_TYPE_FREE };
13913  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13914  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13915  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13916  {
13917  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13918  }
13919  }
13920 
13921  VMA_SORT(
13922  pMetadata->m_FreeSuballocationsBySize.begin(),
13923  pMetadata->m_FreeSuballocationsBySize.end(),
13924  VmaSuballocationItemSizeLess());
13925  }
13926 
13927  VMA_HEAVY_ASSERT(pMetadata->Validate());
13928  }
13929 }
13930 
13931 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13932 {
13933  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13934  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13935  while(it != pMetadata->m_Suballocations.end())
13936  {
13937  if(it->offset < suballoc.offset)
13938  {
13939  ++it;
13940  }
13941  }
13942  pMetadata->m_Suballocations.insert(it, suballoc);
13943 }
13944 
13946 // VmaBlockVectorDefragmentationContext
13947 
13948 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13949  VmaAllocator hAllocator,
13950  VmaPool hCustomPool,
13951  VmaBlockVector* pBlockVector,
13952  uint32_t currFrameIndex) :
13953  res(VK_SUCCESS),
13954  mutexLocked(false),
13955  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13956  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
13957  defragmentationMovesProcessed(0),
13958  defragmentationMovesCommitted(0),
13959  hasDefragmentationPlan(0),
13960  m_hAllocator(hAllocator),
13961  m_hCustomPool(hCustomPool),
13962  m_pBlockVector(pBlockVector),
13963  m_CurrFrameIndex(currFrameIndex),
13964  m_pAlgorithm(VMA_NULL),
13965  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13966  m_AllAllocations(false)
13967 {
13968 }
13969 
13970 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13971 {
13972  vma_delete(m_hAllocator, m_pAlgorithm);
13973 }
13974 
13975 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13976 {
13977  AllocInfo info = { hAlloc, pChanged };
13978  m_Allocations.push_back(info);
13979 }
13980 
13981 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
13982 {
13983  const bool allAllocations = m_AllAllocations ||
13984  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13985 
13986  /********************************
13987  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13988  ********************************/
13989 
13990  /*
13991  Fast algorithm is supported only when certain criteria are met:
13992  - VMA_DEBUG_MARGIN is 0.
13993  - All allocations in this block vector are moveable.
13994  - There is no possibility of image/buffer granularity conflict.
13995  - The defragmentation is not incremental
13996  */
13997  if(VMA_DEBUG_MARGIN == 0 &&
13998  allAllocations &&
13999  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14001  {
14002  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14003  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14004  }
14005  else
14006  {
14007  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14008  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14009  }
14010 
14011  if(allAllocations)
14012  {
14013  m_pAlgorithm->AddAll();
14014  }
14015  else
14016  {
14017  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14018  {
14019  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14020  }
14021  }
14022 }
14023 
14025 // VmaDefragmentationContext
14026 
14027 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14028  VmaAllocator hAllocator,
14029  uint32_t currFrameIndex,
14030  uint32_t flags,
14031  VmaDefragmentationStats* pStats) :
14032  m_hAllocator(hAllocator),
14033  m_CurrFrameIndex(currFrameIndex),
14034  m_Flags(flags),
14035  m_pStats(pStats),
14036  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14037 {
14038  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14039 }
14040 
14041 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14042 {
14043  for(size_t i = m_CustomPoolContexts.size(); i--; )
14044  {
14045  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14046  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
14047  vma_delete(m_hAllocator, pBlockVectorCtx);
14048  }
14049  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14050  {
14051  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14052  if(pBlockVectorCtx)
14053  {
14054  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
14055  vma_delete(m_hAllocator, pBlockVectorCtx);
14056  }
14057  }
14058 }
14059 
14060 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
14061 {
14062  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14063  {
14064  VmaPool pool = pPools[poolIndex];
14065  VMA_ASSERT(pool);
14066  // Pools with algorithm other than default are not defragmented.
14067  if(pool->m_BlockVector.GetAlgorithm() == 0)
14068  {
14069  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14070 
14071  for(size_t i = m_CustomPoolContexts.size(); i--; )
14072  {
14073  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14074  {
14075  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14076  break;
14077  }
14078  }
14079 
14080  if(!pBlockVectorDefragCtx)
14081  {
14082  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14083  m_hAllocator,
14084  pool,
14085  &pool->m_BlockVector,
14086  m_CurrFrameIndex);
14087  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14088  }
14089 
14090  pBlockVectorDefragCtx->AddAll();
14091  }
14092  }
14093 }
14094 
14095 void VmaDefragmentationContext_T::AddAllocations(
14096  uint32_t allocationCount,
14097  VmaAllocation* pAllocations,
14098  VkBool32* pAllocationsChanged)
14099 {
14100  // Dispatch pAllocations among defragmentators. Create them when necessary.
14101  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14102  {
14103  const VmaAllocation hAlloc = pAllocations[allocIndex];
14104  VMA_ASSERT(hAlloc);
14105  // DedicatedAlloc cannot be defragmented.
14106  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14107  // Lost allocation cannot be defragmented.
14108  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14109  {
14110  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14111 
14112  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14113  // This allocation belongs to custom pool.
14114  if(hAllocPool != VK_NULL_HANDLE)
14115  {
14116  // Pools with algorithm other than default are not defragmented.
14117  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14118  {
14119  for(size_t i = m_CustomPoolContexts.size(); i--; )
14120  {
14121  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14122  {
14123  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14124  break;
14125  }
14126  }
14127  if(!pBlockVectorDefragCtx)
14128  {
14129  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14130  m_hAllocator,
14131  hAllocPool,
14132  &hAllocPool->m_BlockVector,
14133  m_CurrFrameIndex);
14134  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14135  }
14136  }
14137  }
14138  // This allocation belongs to default pool.
14139  else
14140  {
14141  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14142  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14143  if(!pBlockVectorDefragCtx)
14144  {
14145  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14146  m_hAllocator,
14147  VMA_NULL, // hCustomPool
14148  m_hAllocator->m_pBlockVectors[memTypeIndex],
14149  m_CurrFrameIndex);
14150  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14151  }
14152  }
14153 
14154  if(pBlockVectorDefragCtx)
14155  {
14156  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14157  &pAllocationsChanged[allocIndex] : VMA_NULL;
14158  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14159  }
14160  }
14161  }
14162 }
14163 
14164 VkResult VmaDefragmentationContext_T::Defragment(
14165  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14166  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14167  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14168 {
14169  if(pStats)
14170  {
14171  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14172  }
14173 
14175  {
14176  // For incremental defragmetnations, we just earmark how much we can move
14177  // The real meat is in the defragmentation steps
14178  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14179  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14180 
14181  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14182  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14183 
14184  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14185  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14186  return VK_SUCCESS;
14187 
14188  return VK_NOT_READY;
14189  }
14190 
14191  if(commandBuffer == VK_NULL_HANDLE)
14192  {
14193  maxGpuBytesToMove = 0;
14194  maxGpuAllocationsToMove = 0;
14195  }
14196 
14197  VkResult res = VK_SUCCESS;
14198 
14199  // Process default pools.
14200  for(uint32_t memTypeIndex = 0;
14201  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14202  ++memTypeIndex)
14203  {
14204  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14205  if(pBlockVectorCtx)
14206  {
14207  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14208  pBlockVectorCtx->GetBlockVector()->Defragment(
14209  pBlockVectorCtx,
14210  pStats, flags,
14211  maxCpuBytesToMove, maxCpuAllocationsToMove,
14212  maxGpuBytesToMove, maxGpuAllocationsToMove,
14213  commandBuffer);
14214  if(pBlockVectorCtx->res != VK_SUCCESS)
14215  {
14216  res = pBlockVectorCtx->res;
14217  }
14218  }
14219  }
14220 
14221  // Process custom pools.
14222  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14223  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14224  ++customCtxIndex)
14225  {
14226  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14227  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14228  pBlockVectorCtx->GetBlockVector()->Defragment(
14229  pBlockVectorCtx,
14230  pStats, flags,
14231  maxCpuBytesToMove, maxCpuAllocationsToMove,
14232  maxGpuBytesToMove, maxGpuAllocationsToMove,
14233  commandBuffer);
14234  if(pBlockVectorCtx->res != VK_SUCCESS)
14235  {
14236  res = pBlockVectorCtx->res;
14237  }
14238  }
14239 
14240  return res;
14241 }
14242 
14243 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14244 {
14245  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14246  uint32_t movesLeft = pInfo->moveCount;
14247 
14248  // Process default pools.
14249  for(uint32_t memTypeIndex = 0;
14250  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14251  ++memTypeIndex)
14252  {
14253  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14254  if(pBlockVectorCtx)
14255  {
14256  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14257 
14258  if(!pBlockVectorCtx->hasDefragmentationPlan)
14259  {
14260  pBlockVectorCtx->GetBlockVector()->Defragment(
14261  pBlockVectorCtx,
14262  m_pStats, m_Flags,
14263  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14264  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14265  VK_NULL_HANDLE);
14266 
14267  if(pBlockVectorCtx->res < VK_SUCCESS)
14268  continue;
14269 
14270  pBlockVectorCtx->hasDefragmentationPlan = true;
14271  }
14272 
14273  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14274  pBlockVectorCtx,
14275  pCurrentMove, movesLeft);
14276 
14277  movesLeft -= processed;
14278  pCurrentMove += processed;
14279  }
14280  }
14281 
14282  // Process custom pools.
14283  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14284  customCtxIndex < customCtxCount;
14285  ++customCtxIndex)
14286  {
14287  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14288  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14289 
14290  if(!pBlockVectorCtx->hasDefragmentationPlan)
14291  {
14292  pBlockVectorCtx->GetBlockVector()->Defragment(
14293  pBlockVectorCtx,
14294  m_pStats, m_Flags,
14295  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14296  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14297  VK_NULL_HANDLE);
14298 
14299  if(pBlockVectorCtx->res < VK_SUCCESS)
14300  continue;
14301 
14302  pBlockVectorCtx->hasDefragmentationPlan = true;
14303  }
14304 
14305  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14306  pBlockVectorCtx,
14307  pCurrentMove, movesLeft);
14308 
14309  movesLeft -= processed;
14310  pCurrentMove += processed;
14311  }
14312 
14313  pInfo->moveCount = pInfo->moveCount - movesLeft;
14314 
14315  return VK_SUCCESS;
14316 }
14317 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14318 {
14319  VkResult res = VK_SUCCESS;
14320 
14321  // Process default pools.
14322  for(uint32_t memTypeIndex = 0;
14323  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14324  ++memTypeIndex)
14325  {
14326  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14327  if(pBlockVectorCtx)
14328  {
14329  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14330 
14331  if(!pBlockVectorCtx->hasDefragmentationPlan)
14332  {
14333  res = VK_NOT_READY;
14334  continue;
14335  }
14336 
14337  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14338  pBlockVectorCtx, m_pStats);
14339 
14340  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14341  res = VK_NOT_READY;
14342  }
14343  }
14344 
14345  // Process custom pools.
14346  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14347  customCtxIndex < customCtxCount;
14348  ++customCtxIndex)
14349  {
14350  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14351  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14352 
14353  if(!pBlockVectorCtx->hasDefragmentationPlan)
14354  {
14355  res = VK_NOT_READY;
14356  continue;
14357  }
14358 
14359  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14360  pBlockVectorCtx, m_pStats);
14361 
14362  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14363  res = VK_NOT_READY;
14364  }
14365 
14366  return res;
14367 }
14368 
14370 // VmaRecorder
14371 
14372 #if VMA_RECORDING_ENABLED
14373 
14374 VmaRecorder::VmaRecorder() :
14375  m_UseMutex(true),
14376  m_Flags(0),
14377  m_File(VMA_NULL),
14378  m_Freq(INT64_MAX),
14379  m_StartCounter(INT64_MAX)
14380 {
14381 }
14382 
14383 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
14384 {
14385  m_UseMutex = useMutex;
14386  m_Flags = settings.flags;
14387 
14388  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
14389  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
14390 
14391  // Open file for writing.
14392  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
14393  if(err != 0)
14394  {
14395  return VK_ERROR_INITIALIZATION_FAILED;
14396  }
14397 
14398  // Write header.
14399  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
14400  fprintf(m_File, "%s\n", "1,8");
14401 
14402  return VK_SUCCESS;
14403 }
14404 
14405 VmaRecorder::~VmaRecorder()
14406 {
14407  if(m_File != VMA_NULL)
14408  {
14409  fclose(m_File);
14410  }
14411 }
14412 
14413 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
14414 {
14415  CallParams callParams;
14416  GetBasicParams(callParams);
14417 
14418  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14419  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
14420  Flush();
14421 }
14422 
14423 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14424 {
14425  CallParams callParams;
14426  GetBasicParams(callParams);
14427 
14428  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14429  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14430  Flush();
14431 }
14432 
14433 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14434 {
14435  CallParams callParams;
14436  GetBasicParams(callParams);
14437 
14438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14439  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14440  createInfo.memoryTypeIndex,
14441  createInfo.flags,
14442  createInfo.blockSize,
14443  (uint64_t)createInfo.minBlockCount,
14444  (uint64_t)createInfo.maxBlockCount,
14445  createInfo.frameInUseCount,
14446  pool);
14447  Flush();
14448 }
14449 
14450 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14451 {
14452  CallParams callParams;
14453  GetBasicParams(callParams);
14454 
14455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14456  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14457  pool);
14458  Flush();
14459 }
14460 
14461 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14462  const VkMemoryRequirements& vkMemReq,
14463  const VmaAllocationCreateInfo& createInfo,
14464  VmaAllocation allocation)
14465 {
14466  CallParams callParams;
14467  GetBasicParams(callParams);
14468 
14469  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14470  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14471  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14472  vkMemReq.size,
14473  vkMemReq.alignment,
14474  vkMemReq.memoryTypeBits,
14475  createInfo.flags,
14476  createInfo.usage,
14477  createInfo.requiredFlags,
14478  createInfo.preferredFlags,
14479  createInfo.memoryTypeBits,
14480  createInfo.pool,
14481  allocation,
14482  userDataStr.GetString());
14483  Flush();
14484 }
14485 
14486 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14487  const VkMemoryRequirements& vkMemReq,
14488  const VmaAllocationCreateInfo& createInfo,
14489  uint64_t allocationCount,
14490  const VmaAllocation* pAllocations)
14491 {
14492  CallParams callParams;
14493  GetBasicParams(callParams);
14494 
14495  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14496  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14497  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14498  vkMemReq.size,
14499  vkMemReq.alignment,
14500  vkMemReq.memoryTypeBits,
14501  createInfo.flags,
14502  createInfo.usage,
14503  createInfo.requiredFlags,
14504  createInfo.preferredFlags,
14505  createInfo.memoryTypeBits,
14506  createInfo.pool);
14507  PrintPointerList(allocationCount, pAllocations);
14508  fprintf(m_File, ",%s\n", userDataStr.GetString());
14509  Flush();
14510 }
14511 
14512 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14513  const VkMemoryRequirements& vkMemReq,
14514  bool requiresDedicatedAllocation,
14515  bool prefersDedicatedAllocation,
14516  const VmaAllocationCreateInfo& createInfo,
14517  VmaAllocation allocation)
14518 {
14519  CallParams callParams;
14520  GetBasicParams(callParams);
14521 
14522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14523  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14524  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14525  vkMemReq.size,
14526  vkMemReq.alignment,
14527  vkMemReq.memoryTypeBits,
14528  requiresDedicatedAllocation ? 1 : 0,
14529  prefersDedicatedAllocation ? 1 : 0,
14530  createInfo.flags,
14531  createInfo.usage,
14532  createInfo.requiredFlags,
14533  createInfo.preferredFlags,
14534  createInfo.memoryTypeBits,
14535  createInfo.pool,
14536  allocation,
14537  userDataStr.GetString());
14538  Flush();
14539 }
14540 
14541 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14542  const VkMemoryRequirements& vkMemReq,
14543  bool requiresDedicatedAllocation,
14544  bool prefersDedicatedAllocation,
14545  const VmaAllocationCreateInfo& createInfo,
14546  VmaAllocation allocation)
14547 {
14548  CallParams callParams;
14549  GetBasicParams(callParams);
14550 
14551  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14552  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14553  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14554  vkMemReq.size,
14555  vkMemReq.alignment,
14556  vkMemReq.memoryTypeBits,
14557  requiresDedicatedAllocation ? 1 : 0,
14558  prefersDedicatedAllocation ? 1 : 0,
14559  createInfo.flags,
14560  createInfo.usage,
14561  createInfo.requiredFlags,
14562  createInfo.preferredFlags,
14563  createInfo.memoryTypeBits,
14564  createInfo.pool,
14565  allocation,
14566  userDataStr.GetString());
14567  Flush();
14568 }
14569 
14570 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14571  VmaAllocation allocation)
14572 {
14573  CallParams callParams;
14574  GetBasicParams(callParams);
14575 
14576  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14577  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14578  allocation);
14579  Flush();
14580 }
14581 
14582 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14583  uint64_t allocationCount,
14584  const VmaAllocation* pAllocations)
14585 {
14586  CallParams callParams;
14587  GetBasicParams(callParams);
14588 
14589  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14590  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14591  PrintPointerList(allocationCount, pAllocations);
14592  fprintf(m_File, "\n");
14593  Flush();
14594 }
14595 
14596 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14597  VmaAllocation allocation,
14598  const void* pUserData)
14599 {
14600  CallParams callParams;
14601  GetBasicParams(callParams);
14602 
14603  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14604  UserDataString userDataStr(
14605  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14606  pUserData);
14607  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14608  allocation,
14609  userDataStr.GetString());
14610  Flush();
14611 }
14612 
14613 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14614  VmaAllocation allocation)
14615 {
14616  CallParams callParams;
14617  GetBasicParams(callParams);
14618 
14619  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14620  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14621  allocation);
14622  Flush();
14623 }
14624 
14625 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14626  VmaAllocation allocation)
14627 {
14628  CallParams callParams;
14629  GetBasicParams(callParams);
14630 
14631  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14632  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14633  allocation);
14634  Flush();
14635 }
14636 
14637 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14638  VmaAllocation allocation)
14639 {
14640  CallParams callParams;
14641  GetBasicParams(callParams);
14642 
14643  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14644  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14645  allocation);
14646  Flush();
14647 }
14648 
14649 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14650  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14651 {
14652  CallParams callParams;
14653  GetBasicParams(callParams);
14654 
14655  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14656  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14657  allocation,
14658  offset,
14659  size);
14660  Flush();
14661 }
14662 
14663 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14664  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14665 {
14666  CallParams callParams;
14667  GetBasicParams(callParams);
14668 
14669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14670  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14671  allocation,
14672  offset,
14673  size);
14674  Flush();
14675 }
14676 
14677 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14678  const VkBufferCreateInfo& bufCreateInfo,
14679  const VmaAllocationCreateInfo& allocCreateInfo,
14680  VmaAllocation allocation)
14681 {
14682  CallParams callParams;
14683  GetBasicParams(callParams);
14684 
14685  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14686  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14687  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14688  bufCreateInfo.flags,
14689  bufCreateInfo.size,
14690  bufCreateInfo.usage,
14691  bufCreateInfo.sharingMode,
14692  allocCreateInfo.flags,
14693  allocCreateInfo.usage,
14694  allocCreateInfo.requiredFlags,
14695  allocCreateInfo.preferredFlags,
14696  allocCreateInfo.memoryTypeBits,
14697  allocCreateInfo.pool,
14698  allocation,
14699  userDataStr.GetString());
14700  Flush();
14701 }
14702 
14703 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14704  const VkImageCreateInfo& imageCreateInfo,
14705  const VmaAllocationCreateInfo& allocCreateInfo,
14706  VmaAllocation allocation)
14707 {
14708  CallParams callParams;
14709  GetBasicParams(callParams);
14710 
14711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14712  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14713  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14714  imageCreateInfo.flags,
14715  imageCreateInfo.imageType,
14716  imageCreateInfo.format,
14717  imageCreateInfo.extent.width,
14718  imageCreateInfo.extent.height,
14719  imageCreateInfo.extent.depth,
14720  imageCreateInfo.mipLevels,
14721  imageCreateInfo.arrayLayers,
14722  imageCreateInfo.samples,
14723  imageCreateInfo.tiling,
14724  imageCreateInfo.usage,
14725  imageCreateInfo.sharingMode,
14726  imageCreateInfo.initialLayout,
14727  allocCreateInfo.flags,
14728  allocCreateInfo.usage,
14729  allocCreateInfo.requiredFlags,
14730  allocCreateInfo.preferredFlags,
14731  allocCreateInfo.memoryTypeBits,
14732  allocCreateInfo.pool,
14733  allocation,
14734  userDataStr.GetString());
14735  Flush();
14736 }
14737 
14738 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14739  VmaAllocation allocation)
14740 {
14741  CallParams callParams;
14742  GetBasicParams(callParams);
14743 
14744  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14745  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14746  allocation);
14747  Flush();
14748 }
14749 
14750 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14751  VmaAllocation allocation)
14752 {
14753  CallParams callParams;
14754  GetBasicParams(callParams);
14755 
14756  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14757  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14758  allocation);
14759  Flush();
14760 }
14761 
14762 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14763  VmaAllocation allocation)
14764 {
14765  CallParams callParams;
14766  GetBasicParams(callParams);
14767 
14768  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14769  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14770  allocation);
14771  Flush();
14772 }
14773 
14774 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14775  VmaAllocation allocation)
14776 {
14777  CallParams callParams;
14778  GetBasicParams(callParams);
14779 
14780  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14781  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14782  allocation);
14783  Flush();
14784 }
14785 
14786 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14787  VmaPool pool)
14788 {
14789  CallParams callParams;
14790  GetBasicParams(callParams);
14791 
14792  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14793  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14794  pool);
14795  Flush();
14796 }
14797 
14798 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14799  const VmaDefragmentationInfo2& info,
14801 {
14802  CallParams callParams;
14803  GetBasicParams(callParams);
14804 
14805  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14806  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14807  info.flags);
14808  PrintPointerList(info.allocationCount, info.pAllocations);
14809  fprintf(m_File, ",");
14810  PrintPointerList(info.poolCount, info.pPools);
14811  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14812  info.maxCpuBytesToMove,
14814  info.maxGpuBytesToMove,
14816  info.commandBuffer,
14817  ctx);
14818  Flush();
14819 }
14820 
14821 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14823 {
14824  CallParams callParams;
14825  GetBasicParams(callParams);
14826 
14827  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14828  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14829  ctx);
14830  Flush();
14831 }
14832 
14833 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14834  VmaPool pool,
14835  const char* name)
14836 {
14837  CallParams callParams;
14838  GetBasicParams(callParams);
14839 
14840  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14841  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14842  pool, name != VMA_NULL ? name : "");
14843  Flush();
14844 }
14845 
14846 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14847 {
14848  if(pUserData != VMA_NULL)
14849  {
14850  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14851  {
14852  m_Str = (const char*)pUserData;
14853  }
14854  else
14855  {
14856  sprintf_s(m_PtrStr, "%p", pUserData);
14857  m_Str = m_PtrStr;
14858  }
14859  }
14860  else
14861  {
14862  m_Str = "";
14863  }
14864 }
14865 
14866 void VmaRecorder::WriteConfiguration(
14867  const VkPhysicalDeviceProperties& devProps,
14868  const VkPhysicalDeviceMemoryProperties& memProps,
14869  uint32_t vulkanApiVersion,
14870  bool dedicatedAllocationExtensionEnabled,
14871  bool bindMemory2ExtensionEnabled,
14872  bool memoryBudgetExtensionEnabled,
14873  bool deviceCoherentMemoryExtensionEnabled)
14874 {
14875  fprintf(m_File, "Config,Begin\n");
14876 
14877  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14878 
14879  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14880  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14881  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14882  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14883  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14884  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14885 
14886  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14887  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14888  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14889 
14890  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14891  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14892  {
14893  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14894  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14895  }
14896  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14897  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14898  {
14899  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14900  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14901  }
14902 
14903  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14904  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14905  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
14906  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
14907 
14908  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14909  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14910  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14911  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14912  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14913  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14914  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14915  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14916  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14917 
14918  fprintf(m_File, "Config,End\n");
14919 }
14920 
14921 void VmaRecorder::GetBasicParams(CallParams& outParams)
14922 {
14923  outParams.threadId = GetCurrentThreadId();
14924 
14925  LARGE_INTEGER counter;
14926  QueryPerformanceCounter(&counter);
14927  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14928 }
14929 
14930 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14931 {
14932  if(count)
14933  {
14934  fprintf(m_File, "%p", pItems[0]);
14935  for(uint64_t i = 1; i < count; ++i)
14936  {
14937  fprintf(m_File, " %p", pItems[i]);
14938  }
14939  }
14940 }
14941 
14942 void VmaRecorder::Flush()
14943 {
14944  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14945  {
14946  fflush(m_File);
14947  }
14948 }
14949 
14950 #endif // #if VMA_RECORDING_ENABLED
14951 
14953 // VmaAllocationObjectAllocator
14954 
14955 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14956  m_Allocator(pAllocationCallbacks, 1024)
14957 {
14958 }
14959 
14960 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
14961 {
14962  VmaMutexLock mutexLock(m_Mutex);
14963  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
14964 }
14965 
14966 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14967 {
14968  VmaMutexLock mutexLock(m_Mutex);
14969  m_Allocator.Free(hAlloc);
14970 }
14971 
14973 // VmaAllocator_T
14974 
14975 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14976  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14977  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
14978  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14979  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14980  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14981  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
14982  m_hDevice(pCreateInfo->device),
14983  m_hInstance(pCreateInfo->instance),
14984  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14985  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14986  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14987  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14988  m_HeapSizeLimitMask(0),
14989  m_PreferredLargeHeapBlockSize(0),
14990  m_PhysicalDevice(pCreateInfo->physicalDevice),
14991  m_CurrentFrameIndex(0),
14992  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14993  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14994  m_NextPoolId(0),
14995  m_GlobalMemoryTypeBits(UINT32_MAX)
14997  ,m_pRecorder(VMA_NULL)
14998 #endif
14999 {
15000  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15001  {
15002  m_UseKhrDedicatedAllocation = false;
15003  m_UseKhrBindMemory2 = false;
15004  }
15005 
15006  if(VMA_DEBUG_DETECT_CORRUPTION)
15007  {
15008  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15009  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15010  }
15011 
15012  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
15013 
15014  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15015  {
15016 #if !(VMA_DEDICATED_ALLOCATION)
15018  {
15019  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15020  }
15021 #endif
15022 #if !(VMA_BIND_MEMORY2)
15023  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15024  {
15025  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15026  }
15027 #endif
15028  }
15029 #if !(VMA_MEMORY_BUDGET)
15030  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15031  {
15032  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15033  }
15034 #endif
15035 #if VMA_VULKAN_VERSION < 1001000
15036  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15037  {
15038  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15039  }
15040 #endif
15041 
15042  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15043  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15044  memset(&m_MemProps, 0, sizeof(m_MemProps));
15045 
15046  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15047  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15048  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15049 
15050  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15051  {
15052  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15053  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15054  }
15055 
15056  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15057 
15058  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15059  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15060 
15061  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15062  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15063  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15064  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15065 
15066  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15067  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15068 
15069  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15070 
15071  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15072  {
15073  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15074  {
15075  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15076  if(limit != VK_WHOLE_SIZE)
15077  {
15078  m_HeapSizeLimitMask |= 1u << heapIndex;
15079  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15080  {
15081  m_MemProps.memoryHeaps[heapIndex].size = limit;
15082  }
15083  }
15084  }
15085  }
15086 
15087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15088  {
15089  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15090 
15091  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15092  this,
15093  VK_NULL_HANDLE, // hParentPool
15094  memTypeIndex,
15095  preferredBlockSize,
15096  0,
15097  SIZE_MAX,
15098  GetBufferImageGranularity(),
15099  pCreateInfo->frameInUseCount,
15100  false, // explicitBlockSize
15101  false); // linearAlgorithm
15102  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15103  // becase minBlockCount is 0.
15104  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15105 
15106  }
15107 }
15108 
15109 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15110 {
15111  VkResult res = VK_SUCCESS;
15112 
15113  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15114  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15115  {
15116 #if VMA_RECORDING_ENABLED
15117  m_pRecorder = vma_new(this, VmaRecorder)();
15118  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15119  if(res != VK_SUCCESS)
15120  {
15121  return res;
15122  }
15123  m_pRecorder->WriteConfiguration(
15124  m_PhysicalDeviceProperties,
15125  m_MemProps,
15126  m_VulkanApiVersion,
15127  m_UseKhrDedicatedAllocation,
15128  m_UseKhrBindMemory2,
15129  m_UseExtMemoryBudget,
15130  m_UseAmdDeviceCoherentMemory);
15131  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15132 #else
15133  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15134  return VK_ERROR_FEATURE_NOT_PRESENT;
15135 #endif
15136  }
15137 
15138 #if VMA_MEMORY_BUDGET
15139  if(m_UseExtMemoryBudget)
15140  {
15141  UpdateVulkanBudget();
15142  }
15143 #endif // #if VMA_MEMORY_BUDGET
15144 
15145  return res;
15146 }
15147 
15148 VmaAllocator_T::~VmaAllocator_T()
15149 {
15150 #if VMA_RECORDING_ENABLED
15151  if(m_pRecorder != VMA_NULL)
15152  {
15153  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15154  vma_delete(this, m_pRecorder);
15155  }
15156 #endif
15157 
15158  VMA_ASSERT(m_Pools.empty());
15159 
15160  for(size_t i = GetMemoryTypeCount(); i--; )
15161  {
15162  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15163  {
15164  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15165  }
15166 
15167  vma_delete(this, m_pDedicatedAllocations[i]);
15168  vma_delete(this, m_pBlockVectors[i]);
15169  }
15170 }
15171 
15172 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15173 {
15174 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15175  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15176  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15177  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15178  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15179  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15180  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15181  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15182  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15183  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15184  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15185  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15186  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15187  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15188  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15189  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15190  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15191  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15192 #if VMA_VULKAN_VERSION >= 1001000
15193  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15194  {
15195  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
15196  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
15197  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
15198  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
15199  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
15200  m_VulkanFunctions.vkBindBufferMemory2KHR =
15201  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
15202  m_VulkanFunctions.vkBindImageMemory2KHR =
15203  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
15204  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
15205  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
15206  }
15207 #endif
15208 #if VMA_DEDICATED_ALLOCATION
15209  if(m_UseKhrDedicatedAllocation)
15210  {
15211  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
15212  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
15213  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
15214  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
15215  }
15216 #endif
15217 #if VMA_BIND_MEMORY2
15218  if(m_UseKhrBindMemory2)
15219  {
15220  m_VulkanFunctions.vkBindBufferMemory2KHR =
15221  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
15222  m_VulkanFunctions.vkBindImageMemory2KHR =
15223  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
15224  }
15225 #endif // #if VMA_BIND_MEMORY2
15226 #if VMA_MEMORY_BUDGET
15227  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15228  {
15229  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
15230  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
15231  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
15232  }
15233 #endif // #if VMA_MEMORY_BUDGET
15234 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15235 
15236 #define VMA_COPY_IF_NOT_NULL(funcName) \
15237  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15238 
15239  if(pVulkanFunctions != VMA_NULL)
15240  {
15241  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15242  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15243  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15244  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15245  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15246  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15247  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15248  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15249  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15250  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15251  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15252  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15253  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15254  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15255  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15256  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15257  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15258 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15259  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15260  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15261 #endif
15262 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15263  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15264  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15265 #endif
15266 #if VMA_MEMORY_BUDGET
15267  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15268 #endif
15269  }
15270 
15271 #undef VMA_COPY_IF_NOT_NULL
15272 
15273  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
15274  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
15275  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
15276  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
15277  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
15278  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
15279  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
15280  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
15281  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
15282  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
15283  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
15284  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
15285  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
15286  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
15287  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
15288  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
15289  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
15290  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
15291  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
15292 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15293  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
15294  {
15295  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
15296  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
15297  }
15298 #endif
15299 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15300  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
15301  {
15302  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
15303  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
15304  }
15305 #endif
15306 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
15307  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15308  {
15309  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
15310  }
15311 #endif
15312 }
15313 
15314 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
15315 {
15316  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15317  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15318  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
15319  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
15320 }
15321 
15322 VkResult VmaAllocator_T::AllocateMemoryOfType(
15323  VkDeviceSize size,
15324  VkDeviceSize alignment,
15325  bool dedicatedAllocation,
15326  VkBuffer dedicatedBuffer,
15327  VkImage dedicatedImage,
15328  const VmaAllocationCreateInfo& createInfo,
15329  uint32_t memTypeIndex,
15330  VmaSuballocationType suballocType,
15331  size_t allocationCount,
15332  VmaAllocation* pAllocations)
15333 {
15334  VMA_ASSERT(pAllocations != VMA_NULL);
15335  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
15336 
15337  VmaAllocationCreateInfo finalCreateInfo = createInfo;
15338 
15339  // If memory type is not HOST_VISIBLE, disable MAPPED.
15340  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15341  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15342  {
15343  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15344  }
15345  // If memory is lazily allocated, it should be always dedicated.
15346  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
15347  {
15349  }
15350 
15351  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
15352  VMA_ASSERT(blockVector);
15353 
15354  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
15355  bool preferDedicatedMemory =
15356  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
15357  dedicatedAllocation ||
15358  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
15359  size > preferredBlockSize / 2;
15360 
15361  if(preferDedicatedMemory &&
15362  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
15363  finalCreateInfo.pool == VK_NULL_HANDLE)
15364  {
15366  }
15367 
15368  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
15369  {
15370  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15371  {
15372  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15373  }
15374  else
15375  {
15376  return AllocateDedicatedMemory(
15377  size,
15378  suballocType,
15379  memTypeIndex,
15380  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15381  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15382  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15383  finalCreateInfo.pUserData,
15384  dedicatedBuffer,
15385  dedicatedImage,
15386  allocationCount,
15387  pAllocations);
15388  }
15389  }
15390  else
15391  {
15392  VkResult res = blockVector->Allocate(
15393  m_CurrentFrameIndex.load(),
15394  size,
15395  alignment,
15396  finalCreateInfo,
15397  suballocType,
15398  allocationCount,
15399  pAllocations);
15400  if(res == VK_SUCCESS)
15401  {
15402  return res;
15403  }
15404 
15405  // 5. Try dedicated memory.
15406  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15407  {
15408  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15409  }
15410  else
15411  {
15412  res = AllocateDedicatedMemory(
15413  size,
15414  suballocType,
15415  memTypeIndex,
15416  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
15417  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
15418  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
15419  finalCreateInfo.pUserData,
15420  dedicatedBuffer,
15421  dedicatedImage,
15422  allocationCount,
15423  pAllocations);
15424  if(res == VK_SUCCESS)
15425  {
15426  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
15427  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
15428  return VK_SUCCESS;
15429  }
15430  else
15431  {
15432  // Everything failed: Return error code.
15433  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15434  return res;
15435  }
15436  }
15437  }
15438 }
15439 
15440 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15441  VkDeviceSize size,
15442  VmaSuballocationType suballocType,
15443  uint32_t memTypeIndex,
15444  bool withinBudget,
15445  bool map,
15446  bool isUserDataString,
15447  void* pUserData,
15448  VkBuffer dedicatedBuffer,
15449  VkImage dedicatedImage,
15450  size_t allocationCount,
15451  VmaAllocation* pAllocations)
15452 {
15453  VMA_ASSERT(allocationCount > 0 && pAllocations);
15454 
15455  if(withinBudget)
15456  {
15457  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15458  VmaBudget heapBudget = {};
15459  GetBudget(&heapBudget, heapIndex, 1);
15460  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15461  {
15462  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15463  }
15464  }
15465 
15466  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15467  allocInfo.memoryTypeIndex = memTypeIndex;
15468  allocInfo.allocationSize = size;
15469 
15470 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15471  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15472  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15473  {
15474  if(dedicatedBuffer != VK_NULL_HANDLE)
15475  {
15476  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15477  dedicatedAllocInfo.buffer = dedicatedBuffer;
15478  allocInfo.pNext = &dedicatedAllocInfo;
15479  }
15480  else if(dedicatedImage != VK_NULL_HANDLE)
15481  {
15482  dedicatedAllocInfo.image = dedicatedImage;
15483  allocInfo.pNext = &dedicatedAllocInfo;
15484  }
15485  }
15486 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15487 
15488  size_t allocIndex;
15489  VkResult res = VK_SUCCESS;
15490  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15491  {
15492  res = AllocateDedicatedMemoryPage(
15493  size,
15494  suballocType,
15495  memTypeIndex,
15496  allocInfo,
15497  map,
15498  isUserDataString,
15499  pUserData,
15500  pAllocations + allocIndex);
15501  if(res != VK_SUCCESS)
15502  {
15503  break;
15504  }
15505  }
15506 
15507  if(res == VK_SUCCESS)
15508  {
15509  // Register them in m_pDedicatedAllocations.
15510  {
15511  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15512  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15513  VMA_ASSERT(pDedicatedAllocations);
15514  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15515  {
15516  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15517  }
15518  }
15519 
15520  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15521  }
15522  else
15523  {
15524  // Free all already created allocations.
15525  while(allocIndex--)
15526  {
15527  VmaAllocation currAlloc = pAllocations[allocIndex];
15528  VkDeviceMemory hMemory = currAlloc->GetMemory();
15529 
15530  /*
15531  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15532  before vkFreeMemory.
15533 
15534  if(currAlloc->GetMappedData() != VMA_NULL)
15535  {
15536  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15537  }
15538  */
15539 
15540  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15541  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15542  currAlloc->SetUserData(this, VMA_NULL);
15543  m_AllocationObjectAllocator.Free(currAlloc);
15544  }
15545 
15546  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15547  }
15548 
15549  return res;
15550 }
15551 
15552 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15553  VkDeviceSize size,
15554  VmaSuballocationType suballocType,
15555  uint32_t memTypeIndex,
15556  const VkMemoryAllocateInfo& allocInfo,
15557  bool map,
15558  bool isUserDataString,
15559  void* pUserData,
15560  VmaAllocation* pAllocation)
15561 {
15562  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15563  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15564  if(res < 0)
15565  {
15566  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15567  return res;
15568  }
15569 
15570  void* pMappedData = VMA_NULL;
15571  if(map)
15572  {
15573  res = (*m_VulkanFunctions.vkMapMemory)(
15574  m_hDevice,
15575  hMemory,
15576  0,
15577  VK_WHOLE_SIZE,
15578  0,
15579  &pMappedData);
15580  if(res < 0)
15581  {
15582  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15583  FreeVulkanMemory(memTypeIndex, size, hMemory);
15584  return res;
15585  }
15586  }
15587 
15588  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
15589  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15590  (*pAllocation)->SetUserData(this, pUserData);
15591  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15592  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15593  {
15594  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15595  }
15596 
15597  return VK_SUCCESS;
15598 }
15599 
15600 void VmaAllocator_T::GetBufferMemoryRequirements(
15601  VkBuffer hBuffer,
15602  VkMemoryRequirements& memReq,
15603  bool& requiresDedicatedAllocation,
15604  bool& prefersDedicatedAllocation) const
15605 {
15606 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15607  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15608  {
15609  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15610  memReqInfo.buffer = hBuffer;
15611 
15612  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15613 
15614  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15615  memReq2.pNext = &memDedicatedReq;
15616 
15617  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15618 
15619  memReq = memReq2.memoryRequirements;
15620  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15621  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15622  }
15623  else
15624 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15625  {
15626  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15627  requiresDedicatedAllocation = false;
15628  prefersDedicatedAllocation = false;
15629  }
15630 }
15631 
15632 void VmaAllocator_T::GetImageMemoryRequirements(
15633  VkImage hImage,
15634  VkMemoryRequirements& memReq,
15635  bool& requiresDedicatedAllocation,
15636  bool& prefersDedicatedAllocation) const
15637 {
15638 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15639  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15640  {
15641  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15642  memReqInfo.image = hImage;
15643 
15644  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15645 
15646  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15647  memReq2.pNext = &memDedicatedReq;
15648 
15649  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15650 
15651  memReq = memReq2.memoryRequirements;
15652  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15653  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15654  }
15655  else
15656 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15657  {
15658  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15659  requiresDedicatedAllocation = false;
15660  prefersDedicatedAllocation = false;
15661  }
15662 }
15663 
15664 VkResult VmaAllocator_T::AllocateMemory(
15665  const VkMemoryRequirements& vkMemReq,
15666  bool requiresDedicatedAllocation,
15667  bool prefersDedicatedAllocation,
15668  VkBuffer dedicatedBuffer,
15669  VkImage dedicatedImage,
15670  const VmaAllocationCreateInfo& createInfo,
15671  VmaSuballocationType suballocType,
15672  size_t allocationCount,
15673  VmaAllocation* pAllocations)
15674 {
15675  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15676 
15677  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15678 
15679  if(vkMemReq.size == 0)
15680  {
15681  return VK_ERROR_VALIDATION_FAILED_EXT;
15682  }
15683  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15684  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15685  {
15686  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15687  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15688  }
15689  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15691  {
15692  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15693  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15694  }
15695  if(requiresDedicatedAllocation)
15696  {
15697  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15698  {
15699  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15700  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15701  }
15702  if(createInfo.pool != VK_NULL_HANDLE)
15703  {
15704  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15705  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15706  }
15707  }
15708  if((createInfo.pool != VK_NULL_HANDLE) &&
15709  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15710  {
15711  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15712  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15713  }
15714 
15715  if(createInfo.pool != VK_NULL_HANDLE)
15716  {
15717  const VkDeviceSize alignmentForPool = VMA_MAX(
15718  vkMemReq.alignment,
15719  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15720 
15721  VmaAllocationCreateInfo createInfoForPool = createInfo;
15722  // If memory type is not HOST_VISIBLE, disable MAPPED.
15723  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15724  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15725  {
15726  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15727  }
15728 
15729  return createInfo.pool->m_BlockVector.Allocate(
15730  m_CurrentFrameIndex.load(),
15731  vkMemReq.size,
15732  alignmentForPool,
15733  createInfoForPool,
15734  suballocType,
15735  allocationCount,
15736  pAllocations);
15737  }
15738  else
15739  {
15740  // Bit mask of memory Vulkan types acceptable for this allocation.
15741  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15742  uint32_t memTypeIndex = UINT32_MAX;
15743  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15744  if(res == VK_SUCCESS)
15745  {
15746  VkDeviceSize alignmentForMemType = VMA_MAX(
15747  vkMemReq.alignment,
15748  GetMemoryTypeMinAlignment(memTypeIndex));
15749 
15750  res = AllocateMemoryOfType(
15751  vkMemReq.size,
15752  alignmentForMemType,
15753  requiresDedicatedAllocation || prefersDedicatedAllocation,
15754  dedicatedBuffer,
15755  dedicatedImage,
15756  createInfo,
15757  memTypeIndex,
15758  suballocType,
15759  allocationCount,
15760  pAllocations);
15761  // Succeeded on first try.
15762  if(res == VK_SUCCESS)
15763  {
15764  return res;
15765  }
15766  // Allocation from this memory type failed. Try other compatible memory types.
15767  else
15768  {
15769  for(;;)
15770  {
15771  // Remove old memTypeIndex from list of possibilities.
15772  memoryTypeBits &= ~(1u << memTypeIndex);
15773  // Find alternative memTypeIndex.
15774  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15775  if(res == VK_SUCCESS)
15776  {
15777  alignmentForMemType = VMA_MAX(
15778  vkMemReq.alignment,
15779  GetMemoryTypeMinAlignment(memTypeIndex));
15780 
15781  res = AllocateMemoryOfType(
15782  vkMemReq.size,
15783  alignmentForMemType,
15784  requiresDedicatedAllocation || prefersDedicatedAllocation,
15785  dedicatedBuffer,
15786  dedicatedImage,
15787  createInfo,
15788  memTypeIndex,
15789  suballocType,
15790  allocationCount,
15791  pAllocations);
15792  // Allocation from this alternative memory type succeeded.
15793  if(res == VK_SUCCESS)
15794  {
15795  return res;
15796  }
15797  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15798  }
15799  // No other matching memory type index could be found.
15800  else
15801  {
15802  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15803  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15804  }
15805  }
15806  }
15807  }
15808  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15809  else
15810  return res;
15811  }
15812 }
15813 
15814 void VmaAllocator_T::FreeMemory(
15815  size_t allocationCount,
15816  const VmaAllocation* pAllocations)
15817 {
15818  VMA_ASSERT(pAllocations);
15819 
15820  for(size_t allocIndex = allocationCount; allocIndex--; )
15821  {
15822  VmaAllocation allocation = pAllocations[allocIndex];
15823 
15824  if(allocation != VK_NULL_HANDLE)
15825  {
15826  if(TouchAllocation(allocation))
15827  {
15828  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15829  {
15830  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15831  }
15832 
15833  switch(allocation->GetType())
15834  {
15835  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15836  {
15837  VmaBlockVector* pBlockVector = VMA_NULL;
15838  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15839  if(hPool != VK_NULL_HANDLE)
15840  {
15841  pBlockVector = &hPool->m_BlockVector;
15842  }
15843  else
15844  {
15845  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15846  pBlockVector = m_pBlockVectors[memTypeIndex];
15847  }
15848  pBlockVector->Free(allocation);
15849  }
15850  break;
15851  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15852  FreeDedicatedMemory(allocation);
15853  break;
15854  default:
15855  VMA_ASSERT(0);
15856  }
15857  }
15858 
15859  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
15860  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15861  allocation->SetUserData(this, VMA_NULL);
15862  m_AllocationObjectAllocator.Free(allocation);
15863  }
15864  }
15865 }
15866 
15867 VkResult VmaAllocator_T::ResizeAllocation(
15868  const VmaAllocation alloc,
15869  VkDeviceSize newSize)
15870 {
15871  // This function is deprecated and so it does nothing. It's left for backward compatibility.
15872  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
15873  {
15874  return VK_ERROR_VALIDATION_FAILED_EXT;
15875  }
15876  if(newSize == alloc->GetSize())
15877  {
15878  return VK_SUCCESS;
15879  }
15880  return VK_ERROR_OUT_OF_POOL_MEMORY;
15881 }
15882 
15883 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15884 {
15885  // Initialize.
15886  InitStatInfo(pStats->total);
15887  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15888  InitStatInfo(pStats->memoryType[i]);
15889  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15890  InitStatInfo(pStats->memoryHeap[i]);
15891 
15892  // Process default pools.
15893  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15894  {
15895  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15896  VMA_ASSERT(pBlockVector);
15897  pBlockVector->AddStats(pStats);
15898  }
15899 
15900  // Process custom pools.
15901  {
15902  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15903  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15904  {
15905  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15906  }
15907  }
15908 
15909  // Process dedicated allocations.
15910  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15911  {
15912  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15913  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15914  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15915  VMA_ASSERT(pDedicatedAllocVector);
15916  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15917  {
15918  VmaStatInfo allocationStatInfo;
15919  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15920  VmaAddStatInfo(pStats->total, allocationStatInfo);
15921  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15922  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15923  }
15924  }
15925 
15926  // Postprocess.
15927  VmaPostprocessCalcStatInfo(pStats->total);
15928  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15929  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15930  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15931  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15932 }
15933 
15934 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
15935 {
15936 #if VMA_MEMORY_BUDGET
15937  if(m_UseExtMemoryBudget)
15938  {
15939  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15940  {
15941  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15942  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15943  {
15944  const uint32_t heapIndex = firstHeap + i;
15945 
15946  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15947  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15948 
15949  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15950  {
15951  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
15952  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15953  }
15954  else
15955  {
15956  outBudget->usage = 0;
15957  }
15958 
15959  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15960  outBudget->budget = VMA_MIN(
15961  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15962  }
15963  }
15964  else
15965  {
15966  UpdateVulkanBudget(); // Outside of mutex lock
15967  GetBudget(outBudget, firstHeap, heapCount); // Recursion
15968  }
15969  }
15970  else
15971 #endif
15972  {
15973  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15974  {
15975  const uint32_t heapIndex = firstHeap + i;
15976 
15977  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15978  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15979 
15980  outBudget->usage = outBudget->blockBytes;
15981  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15982  }
15983  }
15984 }
15985 
15986 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15987 
15988 VkResult VmaAllocator_T::DefragmentationBegin(
15989  const VmaDefragmentationInfo2& info,
15990  VmaDefragmentationStats* pStats,
15991  VmaDefragmentationContext* pContext)
15992 {
15993  if(info.pAllocationsChanged != VMA_NULL)
15994  {
15995  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15996  }
15997 
15998  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15999  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16000 
16001  (*pContext)->AddPools(info.poolCount, info.pPools);
16002  (*pContext)->AddAllocations(
16004 
16005  VkResult res = (*pContext)->Defragment(
16008  info.commandBuffer, pStats, info.flags);
16009 
16010  if(res != VK_NOT_READY)
16011  {
16012  vma_delete(this, *pContext);
16013  *pContext = VMA_NULL;
16014  }
16015 
16016  return res;
16017 }
16018 
16019 VkResult VmaAllocator_T::DefragmentationEnd(
16020  VmaDefragmentationContext context)
16021 {
16022  vma_delete(this, context);
16023  return VK_SUCCESS;
16024 }
16025 
16026 VkResult VmaAllocator_T::DefragmentationPassBegin(
16028  VmaDefragmentationContext context)
16029 {
16030  return context->DefragmentPassBegin(pInfo);
16031 }
16032 VkResult VmaAllocator_T::DefragmentationPassEnd(
16033  VmaDefragmentationContext context)
16034 {
16035  return context->DefragmentPassEnd();
16036 
16037 }
16038 
16039 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16040 {
16041  if(hAllocation->CanBecomeLost())
16042  {
16043  /*
16044  Warning: This is a carefully designed algorithm.
16045  Do not modify unless you really know what you're doing :)
16046  */
16047  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16048  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16049  for(;;)
16050  {
16051  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16052  {
16053  pAllocationInfo->memoryType = UINT32_MAX;
16054  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16055  pAllocationInfo->offset = 0;
16056  pAllocationInfo->size = hAllocation->GetSize();
16057  pAllocationInfo->pMappedData = VMA_NULL;
16058  pAllocationInfo->pUserData = hAllocation->GetUserData();
16059  return;
16060  }
16061  else if(localLastUseFrameIndex == localCurrFrameIndex)
16062  {
16063  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16064  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16065  pAllocationInfo->offset = hAllocation->GetOffset();
16066  pAllocationInfo->size = hAllocation->GetSize();
16067  pAllocationInfo->pMappedData = VMA_NULL;
16068  pAllocationInfo->pUserData = hAllocation->GetUserData();
16069  return;
16070  }
16071  else // Last use time earlier than current time.
16072  {
16073  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16074  {
16075  localLastUseFrameIndex = localCurrFrameIndex;
16076  }
16077  }
16078  }
16079  }
16080  else
16081  {
16082 #if VMA_STATS_STRING_ENABLED
16083  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16084  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16085  for(;;)
16086  {
16087  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16088  if(localLastUseFrameIndex == localCurrFrameIndex)
16089  {
16090  break;
16091  }
16092  else // Last use time earlier than current time.
16093  {
16094  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16095  {
16096  localLastUseFrameIndex = localCurrFrameIndex;
16097  }
16098  }
16099  }
16100 #endif
16101 
16102  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16103  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16104  pAllocationInfo->offset = hAllocation->GetOffset();
16105  pAllocationInfo->size = hAllocation->GetSize();
16106  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16107  pAllocationInfo->pUserData = hAllocation->GetUserData();
16108  }
16109 }
16110 
16111 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16112 {
16113  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16114  if(hAllocation->CanBecomeLost())
16115  {
16116  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16117  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16118  for(;;)
16119  {
16120  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16121  {
16122  return false;
16123  }
16124  else if(localLastUseFrameIndex == localCurrFrameIndex)
16125  {
16126  return true;
16127  }
16128  else // Last use time earlier than current time.
16129  {
16130  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16131  {
16132  localLastUseFrameIndex = localCurrFrameIndex;
16133  }
16134  }
16135  }
16136  }
16137  else
16138  {
16139 #if VMA_STATS_STRING_ENABLED
16140  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16141  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16142  for(;;)
16143  {
16144  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16145  if(localLastUseFrameIndex == localCurrFrameIndex)
16146  {
16147  break;
16148  }
16149  else // Last use time earlier than current time.
16150  {
16151  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16152  {
16153  localLastUseFrameIndex = localCurrFrameIndex;
16154  }
16155  }
16156  }
16157 #endif
16158 
16159  return true;
16160  }
16161 }
16162 
16163 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16164 {
16165  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16166 
16167  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16168 
16169  if(newCreateInfo.maxBlockCount == 0)
16170  {
16171  newCreateInfo.maxBlockCount = SIZE_MAX;
16172  }
16173  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16174  {
16175  return VK_ERROR_INITIALIZATION_FAILED;
16176  }
16177  // Memory type index out of range or forbidden.
16178  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16179  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16180  {
16181  return VK_ERROR_FEATURE_NOT_PRESENT;
16182  }
16183 
16184  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16185 
16186  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16187 
16188  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16189  if(res != VK_SUCCESS)
16190  {
16191  vma_delete(this, *pPool);
16192  *pPool = VMA_NULL;
16193  return res;
16194  }
16195 
16196  // Add to m_Pools.
16197  {
16198  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16199  (*pPool)->SetId(m_NextPoolId++);
16200  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16201  }
16202 
16203  return VK_SUCCESS;
16204 }
16205 
16206 void VmaAllocator_T::DestroyPool(VmaPool pool)
16207 {
16208  // Remove from m_Pools.
16209  {
16210  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16211  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16212  VMA_ASSERT(success && "Pool not found in Allocator.");
16213  }
16214 
16215  vma_delete(this, pool);
16216 }
16217 
16218 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16219 {
16220  pool->m_BlockVector.GetPoolStats(pPoolStats);
16221 }
16222 
16223 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16224 {
16225  m_CurrentFrameIndex.store(frameIndex);
16226 
16227 #if VMA_MEMORY_BUDGET
16228  if(m_UseExtMemoryBudget)
16229  {
16230  UpdateVulkanBudget();
16231  }
16232 #endif // #if VMA_MEMORY_BUDGET
16233 }
16234 
16235 void VmaAllocator_T::MakePoolAllocationsLost(
16236  VmaPool hPool,
16237  size_t* pLostAllocationCount)
16238 {
16239  hPool->m_BlockVector.MakePoolAllocationsLost(
16240  m_CurrentFrameIndex.load(),
16241  pLostAllocationCount);
16242 }
16243 
16244 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
16245 {
16246  return hPool->m_BlockVector.CheckCorruption();
16247 }
16248 
16249 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
16250 {
16251  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
16252 
16253  // Process default pools.
16254  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16255  {
16256  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
16257  {
16258  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16259  VMA_ASSERT(pBlockVector);
16260  VkResult localRes = pBlockVector->CheckCorruption();
16261  switch(localRes)
16262  {
16263  case VK_ERROR_FEATURE_NOT_PRESENT:
16264  break;
16265  case VK_SUCCESS:
16266  finalRes = VK_SUCCESS;
16267  break;
16268  default:
16269  return localRes;
16270  }
16271  }
16272  }
16273 
16274  // Process custom pools.
16275  {
16276  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16277  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16278  {
16279  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
16280  {
16281  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
16282  switch(localRes)
16283  {
16284  case VK_ERROR_FEATURE_NOT_PRESENT:
16285  break;
16286  case VK_SUCCESS:
16287  finalRes = VK_SUCCESS;
16288  break;
16289  default:
16290  return localRes;
16291  }
16292  }
16293  }
16294  }
16295 
16296  return finalRes;
16297 }
16298 
16299 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
16300 {
16301  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
16302  (*pAllocation)->InitLost();
16303 }
16304 
16305 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
16306 {
16307  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
16308 
16309  // HeapSizeLimit is in effect for this heap.
16310  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
16311  {
16312  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16313  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
16314  for(;;)
16315  {
16316  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
16317  if(blockBytesAfterAllocation > heapSize)
16318  {
16319  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16320  }
16321  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
16322  {
16323  break;
16324  }
16325  }
16326  }
16327  else
16328  {
16329  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
16330  }
16331 
16332  // VULKAN CALL vkAllocateMemory.
16333  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
16334 
16335  if(res == VK_SUCCESS)
16336  {
16337 #if VMA_MEMORY_BUDGET
16338  ++m_Budget.m_OperationsSinceBudgetFetch;
16339 #endif
16340 
16341  // Informative callback.
16342  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
16343  {
16344  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
16345  }
16346  }
16347  else
16348  {
16349  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
16350  }
16351 
16352  return res;
16353 }
16354 
16355 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
16356 {
16357  // Informative callback.
16358  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
16359  {
16360  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
16361  }
16362 
16363  // VULKAN CALL vkFreeMemory.
16364  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
16365 
16366  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
16367 }
16368 
16369 VkResult VmaAllocator_T::BindVulkanBuffer(
16370  VkDeviceMemory memory,
16371  VkDeviceSize memoryOffset,
16372  VkBuffer buffer,
16373  const void* pNext)
16374 {
16375  if(pNext != VMA_NULL)
16376  {
16377 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16378  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16379  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
16380  {
16381  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
16382  bindBufferMemoryInfo.pNext = pNext;
16383  bindBufferMemoryInfo.buffer = buffer;
16384  bindBufferMemoryInfo.memory = memory;
16385  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16386  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16387  }
16388  else
16389 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16390  {
16391  return VK_ERROR_EXTENSION_NOT_PRESENT;
16392  }
16393  }
16394  else
16395  {
16396  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
16397  }
16398 }
16399 
16400 VkResult VmaAllocator_T::BindVulkanImage(
16401  VkDeviceMemory memory,
16402  VkDeviceSize memoryOffset,
16403  VkImage image,
16404  const void* pNext)
16405 {
16406  if(pNext != VMA_NULL)
16407  {
16408 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
16409  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
16410  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
16411  {
16412  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
16413  bindBufferMemoryInfo.pNext = pNext;
16414  bindBufferMemoryInfo.image = image;
16415  bindBufferMemoryInfo.memory = memory;
16416  bindBufferMemoryInfo.memoryOffset = memoryOffset;
16417  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
16418  }
16419  else
16420 #endif // #if VMA_BIND_MEMORY2
16421  {
16422  return VK_ERROR_EXTENSION_NOT_PRESENT;
16423  }
16424  }
16425  else
16426  {
16427  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
16428  }
16429 }
16430 
16431 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
16432 {
16433  if(hAllocation->CanBecomeLost())
16434  {
16435  return VK_ERROR_MEMORY_MAP_FAILED;
16436  }
16437 
16438  switch(hAllocation->GetType())
16439  {
16440  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16441  {
16442  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16443  char *pBytes = VMA_NULL;
16444  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
16445  if(res == VK_SUCCESS)
16446  {
16447  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16448  hAllocation->BlockAllocMap();
16449  }
16450  return res;
16451  }
16452  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16453  return hAllocation->DedicatedAllocMap(this, ppData);
16454  default:
16455  VMA_ASSERT(0);
16456  return VK_ERROR_MEMORY_MAP_FAILED;
16457  }
16458 }
16459 
16460 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16461 {
16462  switch(hAllocation->GetType())
16463  {
16464  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16465  {
16466  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16467  hAllocation->BlockAllocUnmap();
16468  pBlock->Unmap(this, 1);
16469  }
16470  break;
16471  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16472  hAllocation->DedicatedAllocUnmap(this);
16473  break;
16474  default:
16475  VMA_ASSERT(0);
16476  }
16477 }
16478 
16479 VkResult VmaAllocator_T::BindBufferMemory(
16480  VmaAllocation hAllocation,
16481  VkDeviceSize allocationLocalOffset,
16482  VkBuffer hBuffer,
16483  const void* pNext)
16484 {
16485  VkResult res = VK_SUCCESS;
16486  switch(hAllocation->GetType())
16487  {
16488  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16489  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16490  break;
16491  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16492  {
16493  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16494  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16495  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16496  break;
16497  }
16498  default:
16499  VMA_ASSERT(0);
16500  }
16501  return res;
16502 }
16503 
16504 VkResult VmaAllocator_T::BindImageMemory(
16505  VmaAllocation hAllocation,
16506  VkDeviceSize allocationLocalOffset,
16507  VkImage hImage,
16508  const void* pNext)
16509 {
16510  VkResult res = VK_SUCCESS;
16511  switch(hAllocation->GetType())
16512  {
16513  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16514  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16515  break;
16516  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16517  {
16518  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16519  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16520  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16521  break;
16522  }
16523  default:
16524  VMA_ASSERT(0);
16525  }
16526  return res;
16527 }
16528 
16529 void VmaAllocator_T::FlushOrInvalidateAllocation(
16530  VmaAllocation hAllocation,
16531  VkDeviceSize offset, VkDeviceSize size,
16532  VMA_CACHE_OPERATION op)
16533 {
16534  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16535  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16536  {
16537  const VkDeviceSize allocationSize = hAllocation->GetSize();
16538  VMA_ASSERT(offset <= allocationSize);
16539 
16540  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16541 
16542  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16543  memRange.memory = hAllocation->GetMemory();
16544 
16545  switch(hAllocation->GetType())
16546  {
16547  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16548  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16549  if(size == VK_WHOLE_SIZE)
16550  {
16551  memRange.size = allocationSize - memRange.offset;
16552  }
16553  else
16554  {
16555  VMA_ASSERT(offset + size <= allocationSize);
16556  memRange.size = VMA_MIN(
16557  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16558  allocationSize - memRange.offset);
16559  }
16560  break;
16561 
16562  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16563  {
16564  // 1. Still within this allocation.
16565  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16566  if(size == VK_WHOLE_SIZE)
16567  {
16568  size = allocationSize - offset;
16569  }
16570  else
16571  {
16572  VMA_ASSERT(offset + size <= allocationSize);
16573  }
16574  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16575 
16576  // 2. Adjust to whole block.
16577  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16578  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16579  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16580  memRange.offset += allocationOffset;
16581  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16582 
16583  break;
16584  }
16585 
16586  default:
16587  VMA_ASSERT(0);
16588  }
16589 
16590  switch(op)
16591  {
16592  case VMA_CACHE_FLUSH:
16593  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16594  break;
16595  case VMA_CACHE_INVALIDATE:
16596  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16597  break;
16598  default:
16599  VMA_ASSERT(0);
16600  }
16601  }
16602  // else: Just ignore this call.
16603 }
16604 
16605 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16606 {
16607  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16608 
16609  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16610  {
16611  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16612  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16613  VMA_ASSERT(pDedicatedAllocations);
16614  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16615  VMA_ASSERT(success);
16616  }
16617 
16618  VkDeviceMemory hMemory = allocation->GetMemory();
16619 
16620  /*
16621  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16622  before vkFreeMemory.
16623 
16624  if(allocation->GetMappedData() != VMA_NULL)
16625  {
16626  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16627  }
16628  */
16629 
16630  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16631 
16632  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16633 }
16634 
16635 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16636 {
16637  VkBufferCreateInfo dummyBufCreateInfo;
16638  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16639 
16640  uint32_t memoryTypeBits = 0;
16641 
16642  // Create buffer.
16643  VkBuffer buf = VK_NULL_HANDLE;
16644  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16645  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16646  if(res == VK_SUCCESS)
16647  {
16648  // Query for supported memory types.
16649  VkMemoryRequirements memReq;
16650  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16651  memoryTypeBits = memReq.memoryTypeBits;
16652 
16653  // Destroy buffer.
16654  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16655  }
16656 
16657  return memoryTypeBits;
16658 }
16659 
16660 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
16661 {
16662  // Make sure memory information is already fetched.
16663  VMA_ASSERT(GetMemoryTypeCount() > 0);
16664 
16665  uint32_t memoryTypeBits = UINT32_MAX;
16666 
16667  if(!m_UseAmdDeviceCoherentMemory)
16668  {
16669  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
16670  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16671  {
16672  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
16673  {
16674  memoryTypeBits &= ~(1u << memTypeIndex);
16675  }
16676  }
16677  }
16678 
16679  return memoryTypeBits;
16680 }
16681 
16682 #if VMA_MEMORY_BUDGET
16683 
16684 void VmaAllocator_T::UpdateVulkanBudget()
16685 {
16686  VMA_ASSERT(m_UseExtMemoryBudget);
16687 
16688  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16689 
16690  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16691  memProps.pNext = &budgetProps;
16692 
16693  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16694 
16695  {
16696  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16697 
16698  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16699  {
16700  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16701  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16702  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16703  }
16704  m_Budget.m_OperationsSinceBudgetFetch = 0;
16705  }
16706 }
16707 
16708 #endif // #if VMA_MEMORY_BUDGET
16709 
16710 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16711 {
16712  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16713  !hAllocation->CanBecomeLost() &&
16714  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16715  {
16716  void* pData = VMA_NULL;
16717  VkResult res = Map(hAllocation, &pData);
16718  if(res == VK_SUCCESS)
16719  {
16720  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16721  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16722  Unmap(hAllocation);
16723  }
16724  else
16725  {
16726  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16727  }
16728  }
16729 }
16730 
16731 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16732 {
16733  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16734  if(memoryTypeBits == UINT32_MAX)
16735  {
16736  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16737  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16738  }
16739  return memoryTypeBits;
16740 }
16741 
16742 #if VMA_STATS_STRING_ENABLED
16743 
16744 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16745 {
16746  bool dedicatedAllocationsStarted = false;
16747  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16748  {
16749  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16750  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16751  VMA_ASSERT(pDedicatedAllocVector);
16752  if(pDedicatedAllocVector->empty() == false)
16753  {
16754  if(dedicatedAllocationsStarted == false)
16755  {
16756  dedicatedAllocationsStarted = true;
16757  json.WriteString("DedicatedAllocations");
16758  json.BeginObject();
16759  }
16760 
16761  json.BeginString("Type ");
16762  json.ContinueString(memTypeIndex);
16763  json.EndString();
16764 
16765  json.BeginArray();
16766 
16767  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16768  {
16769  json.BeginObject(true);
16770  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16771  hAlloc->PrintParameters(json);
16772  json.EndObject();
16773  }
16774 
16775  json.EndArray();
16776  }
16777  }
16778  if(dedicatedAllocationsStarted)
16779  {
16780  json.EndObject();
16781  }
16782 
16783  {
16784  bool allocationsStarted = false;
16785  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16786  {
16787  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16788  {
16789  if(allocationsStarted == false)
16790  {
16791  allocationsStarted = true;
16792  json.WriteString("DefaultPools");
16793  json.BeginObject();
16794  }
16795 
16796  json.BeginString("Type ");
16797  json.ContinueString(memTypeIndex);
16798  json.EndString();
16799 
16800  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16801  }
16802  }
16803  if(allocationsStarted)
16804  {
16805  json.EndObject();
16806  }
16807  }
16808 
16809  // Custom pools
16810  {
16811  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16812  const size_t poolCount = m_Pools.size();
16813  if(poolCount > 0)
16814  {
16815  json.WriteString("Pools");
16816  json.BeginObject();
16817  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16818  {
16819  json.BeginString();
16820  json.ContinueString(m_Pools[poolIndex]->GetId());
16821  json.EndString();
16822 
16823  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16824  }
16825  json.EndObject();
16826  }
16827  }
16828 }
16829 
16830 #endif // #if VMA_STATS_STRING_ENABLED
16831 
16833 // Public interface
16834 
16835 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16836  const VmaAllocatorCreateInfo* pCreateInfo,
16837  VmaAllocator* pAllocator)
16838 {
16839  VMA_ASSERT(pCreateInfo && pAllocator);
16840  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16841  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
16842  VMA_DEBUG_LOG("vmaCreateAllocator");
16843  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16844  return (*pAllocator)->Init(pCreateInfo);
16845 }
16846 
16847 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16848  VmaAllocator allocator)
16849 {
16850  if(allocator != VK_NULL_HANDLE)
16851  {
16852  VMA_DEBUG_LOG("vmaDestroyAllocator");
16853  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
16854  vma_delete(&allocationCallbacks, allocator);
16855  }
16856 }
16857 
16858 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
16859 {
16860  VMA_ASSERT(allocator && pAllocatorInfo);
16861  pAllocatorInfo->instance = allocator->m_hInstance;
16862  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
16863  pAllocatorInfo->device = allocator->m_hDevice;
16864 }
16865 
16866 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16867  VmaAllocator allocator,
16868  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16869 {
16870  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16871  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16872 }
16873 
16874 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16875  VmaAllocator allocator,
16876  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16877 {
16878  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16879  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16880 }
16881 
16882 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16883  VmaAllocator allocator,
16884  uint32_t memoryTypeIndex,
16885  VkMemoryPropertyFlags* pFlags)
16886 {
16887  VMA_ASSERT(allocator && pFlags);
16888  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16889  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16890 }
16891 
16892 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16893  VmaAllocator allocator,
16894  uint32_t frameIndex)
16895 {
16896  VMA_ASSERT(allocator);
16897  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
16898 
16899  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16900 
16901  allocator->SetCurrentFrameIndex(frameIndex);
16902 }
16903 
16904 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
16905  VmaAllocator allocator,
16906  VmaStats* pStats)
16907 {
16908  VMA_ASSERT(allocator && pStats);
16909  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16910  allocator->CalculateStats(pStats);
16911 }
16912 
16913 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
16914  VmaAllocator allocator,
16915  VmaBudget* pBudget)
16916 {
16917  VMA_ASSERT(allocator && pBudget);
16918  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16919  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
16920 }
16921 
16922 #if VMA_STATS_STRING_ENABLED
16923 
16924 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16925  VmaAllocator allocator,
16926  char** ppStatsString,
16927  VkBool32 detailedMap)
16928 {
16929  VMA_ASSERT(allocator && ppStatsString);
16930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16931 
16932  VmaStringBuilder sb(allocator);
16933  {
16934  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16935  json.BeginObject();
16936 
16937  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
16938  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
16939 
16940  VmaStats stats;
16941  allocator->CalculateStats(&stats);
16942 
16943  json.WriteString("Total");
16944  VmaPrintStatInfo(json, stats.total);
16945 
16946  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16947  {
16948  json.BeginString("Heap ");
16949  json.ContinueString(heapIndex);
16950  json.EndString();
16951  json.BeginObject();
16952 
16953  json.WriteString("Size");
16954  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
16955 
16956  json.WriteString("Flags");
16957  json.BeginArray(true);
16958  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
16959  {
16960  json.WriteString("DEVICE_LOCAL");
16961  }
16962  json.EndArray();
16963 
16964  json.WriteString("Budget");
16965  json.BeginObject();
16966  {
16967  json.WriteString("BlockBytes");
16968  json.WriteNumber(budget[heapIndex].blockBytes);
16969  json.WriteString("AllocationBytes");
16970  json.WriteNumber(budget[heapIndex].allocationBytes);
16971  json.WriteString("Usage");
16972  json.WriteNumber(budget[heapIndex].usage);
16973  json.WriteString("Budget");
16974  json.WriteNumber(budget[heapIndex].budget);
16975  }
16976  json.EndObject();
16977 
16978  if(stats.memoryHeap[heapIndex].blockCount > 0)
16979  {
16980  json.WriteString("Stats");
16981  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
16982  }
16983 
16984  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16985  {
16986  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16987  {
16988  json.BeginString("Type ");
16989  json.ContinueString(typeIndex);
16990  json.EndString();
16991 
16992  json.BeginObject();
16993 
16994  json.WriteString("Flags");
16995  json.BeginArray(true);
16996  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16997  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
16998  {
16999  json.WriteString("DEVICE_LOCAL");
17000  }
17001  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17002  {
17003  json.WriteString("HOST_VISIBLE");
17004  }
17005  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17006  {
17007  json.WriteString("HOST_COHERENT");
17008  }
17009  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17010  {
17011  json.WriteString("HOST_CACHED");
17012  }
17013  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17014  {
17015  json.WriteString("LAZILY_ALLOCATED");
17016  }
17017  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17018  {
17019  json.WriteString(" PROTECTED");
17020  }
17021  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17022  {
17023  json.WriteString(" DEVICE_COHERENT");
17024  }
17025  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17026  {
17027  json.WriteString(" DEVICE_UNCACHED");
17028  }
17029  json.EndArray();
17030 
17031  if(stats.memoryType[typeIndex].blockCount > 0)
17032  {
17033  json.WriteString("Stats");
17034  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17035  }
17036 
17037  json.EndObject();
17038  }
17039  }
17040 
17041  json.EndObject();
17042  }
17043  if(detailedMap == VK_TRUE)
17044  {
17045  allocator->PrintDetailedMap(json);
17046  }
17047 
17048  json.EndObject();
17049  }
17050 
17051  const size_t len = sb.GetLength();
17052  char* const pChars = vma_new_array(allocator, char, len + 1);
17053  if(len > 0)
17054  {
17055  memcpy(pChars, sb.GetData(), len);
17056  }
17057  pChars[len] = '\0';
17058  *ppStatsString = pChars;
17059 }
17060 
17061 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17062  VmaAllocator allocator,
17063  char* pStatsString)
17064 {
17065  if(pStatsString != VMA_NULL)
17066  {
17067  VMA_ASSERT(allocator);
17068  size_t len = strlen(pStatsString);
17069  vma_delete_array(allocator, pStatsString, len + 1);
17070  }
17071 }
17072 
17073 #endif // #if VMA_STATS_STRING_ENABLED
17074 
17075 /*
17076 This function is not protected by any mutex because it just reads immutable data.
17077 */
17078 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17079  VmaAllocator allocator,
17080  uint32_t memoryTypeBits,
17081  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17082  uint32_t* pMemoryTypeIndex)
17083 {
17084  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17085  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17086  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17087 
17088  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17089 
17090  if(pAllocationCreateInfo->memoryTypeBits != 0)
17091  {
17092  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17093  }
17094 
17095  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17096  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17097  uint32_t notPreferredFlags = 0;
17098 
17099  // Convert usage to requiredFlags and preferredFlags.
17100  switch(pAllocationCreateInfo->usage)
17101  {
17103  break;
17105  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17106  {
17107  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17108  }
17109  break;
17111  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17112  break;
17114  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17115  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17116  {
17117  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17118  }
17119  break;
17121  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17122  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17123  break;
17125  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17126  break;
17128  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17129  break;
17130  default:
17131  VMA_ASSERT(0);
17132  break;
17133  }
17134 
17135  // Avoid DEVICE_COHERENT unless explicitly requested.
17136  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17137  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17138  {
17139  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17140  }
17141 
17142  *pMemoryTypeIndex = UINT32_MAX;
17143  uint32_t minCost = UINT32_MAX;
17144  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17145  memTypeIndex < allocator->GetMemoryTypeCount();
17146  ++memTypeIndex, memTypeBit <<= 1)
17147  {
17148  // This memory type is acceptable according to memoryTypeBits bitmask.
17149  if((memTypeBit & memoryTypeBits) != 0)
17150  {
17151  const VkMemoryPropertyFlags currFlags =
17152  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17153  // This memory type contains requiredFlags.
17154  if((requiredFlags & ~currFlags) == 0)
17155  {
17156  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17157  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17158  VmaCountBitsSet(currFlags & notPreferredFlags);
17159  // Remember memory type with lowest cost.
17160  if(currCost < minCost)
17161  {
17162  *pMemoryTypeIndex = memTypeIndex;
17163  if(currCost == 0)
17164  {
17165  return VK_SUCCESS;
17166  }
17167  minCost = currCost;
17168  }
17169  }
17170  }
17171  }
17172  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
17173 }
17174 
17175 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
17176  VmaAllocator allocator,
17177  const VkBufferCreateInfo* pBufferCreateInfo,
17178  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17179  uint32_t* pMemoryTypeIndex)
17180 {
17181  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17182  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
17183  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17184  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17185 
17186  const VkDevice hDev = allocator->m_hDevice;
17187  VkBuffer hBuffer = VK_NULL_HANDLE;
17188  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
17189  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
17190  if(res == VK_SUCCESS)
17191  {
17192  VkMemoryRequirements memReq = {};
17193  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
17194  hDev, hBuffer, &memReq);
17195 
17196  res = vmaFindMemoryTypeIndex(
17197  allocator,
17198  memReq.memoryTypeBits,
17199  pAllocationCreateInfo,
17200  pMemoryTypeIndex);
17201 
17202  allocator->GetVulkanFunctions().vkDestroyBuffer(
17203  hDev, hBuffer, allocator->GetAllocationCallbacks());
17204  }
17205  return res;
17206 }
17207 
17208 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
17209  VmaAllocator allocator,
17210  const VkImageCreateInfo* pImageCreateInfo,
17211  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17212  uint32_t* pMemoryTypeIndex)
17213 {
17214  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17215  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
17216  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17217  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17218 
17219  const VkDevice hDev = allocator->m_hDevice;
17220  VkImage hImage = VK_NULL_HANDLE;
17221  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
17222  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
17223  if(res == VK_SUCCESS)
17224  {
17225  VkMemoryRequirements memReq = {};
17226  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
17227  hDev, hImage, &memReq);
17228 
17229  res = vmaFindMemoryTypeIndex(
17230  allocator,
17231  memReq.memoryTypeBits,
17232  pAllocationCreateInfo,
17233  pMemoryTypeIndex);
17234 
17235  allocator->GetVulkanFunctions().vkDestroyImage(
17236  hDev, hImage, allocator->GetAllocationCallbacks());
17237  }
17238  return res;
17239 }
17240 
17241 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
17242  VmaAllocator allocator,
17243  const VmaPoolCreateInfo* pCreateInfo,
17244  VmaPool* pPool)
17245 {
17246  VMA_ASSERT(allocator && pCreateInfo && pPool);
17247 
17248  VMA_DEBUG_LOG("vmaCreatePool");
17249 
17250  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17251 
17252  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
17253 
17254 #if VMA_RECORDING_ENABLED
17255  if(allocator->GetRecorder() != VMA_NULL)
17256  {
17257  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
17258  }
17259 #endif
17260 
17261  return res;
17262 }
17263 
17264 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
17265  VmaAllocator allocator,
17266  VmaPool pool)
17267 {
17268  VMA_ASSERT(allocator);
17269 
17270  if(pool == VK_NULL_HANDLE)
17271  {
17272  return;
17273  }
17274 
17275  VMA_DEBUG_LOG("vmaDestroyPool");
17276 
17277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17278 
17279 #if VMA_RECORDING_ENABLED
17280  if(allocator->GetRecorder() != VMA_NULL)
17281  {
17282  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
17283  }
17284 #endif
17285 
17286  allocator->DestroyPool(pool);
17287 }
17288 
17289 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
17290  VmaAllocator allocator,
17291  VmaPool pool,
17292  VmaPoolStats* pPoolStats)
17293 {
17294  VMA_ASSERT(allocator && pool && pPoolStats);
17295 
17296  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17297 
17298  allocator->GetPoolStats(pool, pPoolStats);
17299 }
17300 
17301 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
17302  VmaAllocator allocator,
17303  VmaPool pool,
17304  size_t* pLostAllocationCount)
17305 {
17306  VMA_ASSERT(allocator && pool);
17307 
17308  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17309 
17310 #if VMA_RECORDING_ENABLED
17311  if(allocator->GetRecorder() != VMA_NULL)
17312  {
17313  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
17314  }
17315 #endif
17316 
17317  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
17318 }
17319 
17320 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
17321 {
17322  VMA_ASSERT(allocator && pool);
17323 
17324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17325 
17326  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
17327 
17328  return allocator->CheckPoolCorruption(pool);
17329 }
17330 
17331 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
17332  VmaAllocator allocator,
17333  VmaPool pool,
17334  const char** ppName)
17335 {
17336  VMA_ASSERT(allocator && pool);
17337 
17338  VMA_DEBUG_LOG("vmaGetPoolName");
17339 
17340  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17341 
17342  *ppName = pool->GetName();
17343 }
17344 
17345 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
17346  VmaAllocator allocator,
17347  VmaPool pool,
17348  const char* pName)
17349 {
17350  VMA_ASSERT(allocator && pool);
17351 
17352  VMA_DEBUG_LOG("vmaSetPoolName");
17353 
17354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17355 
17356  pool->SetName(pName);
17357 
17358 #if VMA_RECORDING_ENABLED
17359  if(allocator->GetRecorder() != VMA_NULL)
17360  {
17361  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
17362  }
17363 #endif
17364 }
17365 
17366 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
17367  VmaAllocator allocator,
17368  const VkMemoryRequirements* pVkMemoryRequirements,
17369  const VmaAllocationCreateInfo* pCreateInfo,
17370  VmaAllocation* pAllocation,
17371  VmaAllocationInfo* pAllocationInfo)
17372 {
17373  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
17374 
17375  VMA_DEBUG_LOG("vmaAllocateMemory");
17376 
17377  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17378 
17379  VkResult result = allocator->AllocateMemory(
17380  *pVkMemoryRequirements,
17381  false, // requiresDedicatedAllocation
17382  false, // prefersDedicatedAllocation
17383  VK_NULL_HANDLE, // dedicatedBuffer
17384  VK_NULL_HANDLE, // dedicatedImage
17385  *pCreateInfo,
17386  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17387  1, // allocationCount
17388  pAllocation);
17389 
17390 #if VMA_RECORDING_ENABLED
17391  if(allocator->GetRecorder() != VMA_NULL)
17392  {
17393  allocator->GetRecorder()->RecordAllocateMemory(
17394  allocator->GetCurrentFrameIndex(),
17395  *pVkMemoryRequirements,
17396  *pCreateInfo,
17397  *pAllocation);
17398  }
17399 #endif
17400 
17401  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17402  {
17403  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17404  }
17405 
17406  return result;
17407 }
17408 
17409 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
17410  VmaAllocator allocator,
17411  const VkMemoryRequirements* pVkMemoryRequirements,
17412  const VmaAllocationCreateInfo* pCreateInfo,
17413  size_t allocationCount,
17414  VmaAllocation* pAllocations,
17415  VmaAllocationInfo* pAllocationInfo)
17416 {
17417  if(allocationCount == 0)
17418  {
17419  return VK_SUCCESS;
17420  }
17421 
17422  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
17423 
17424  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
17425 
17426  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17427 
17428  VkResult result = allocator->AllocateMemory(
17429  *pVkMemoryRequirements,
17430  false, // requiresDedicatedAllocation
17431  false, // prefersDedicatedAllocation
17432  VK_NULL_HANDLE, // dedicatedBuffer
17433  VK_NULL_HANDLE, // dedicatedImage
17434  *pCreateInfo,
17435  VMA_SUBALLOCATION_TYPE_UNKNOWN,
17436  allocationCount,
17437  pAllocations);
17438 
17439 #if VMA_RECORDING_ENABLED
17440  if(allocator->GetRecorder() != VMA_NULL)
17441  {
17442  allocator->GetRecorder()->RecordAllocateMemoryPages(
17443  allocator->GetCurrentFrameIndex(),
17444  *pVkMemoryRequirements,
17445  *pCreateInfo,
17446  (uint64_t)allocationCount,
17447  pAllocations);
17448  }
17449 #endif
17450 
17451  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
17452  {
17453  for(size_t i = 0; i < allocationCount; ++i)
17454  {
17455  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
17456  }
17457  }
17458 
17459  return result;
17460 }
17461 
17462 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
17463  VmaAllocator allocator,
17464  VkBuffer buffer,
17465  const VmaAllocationCreateInfo* pCreateInfo,
17466  VmaAllocation* pAllocation,
17467  VmaAllocationInfo* pAllocationInfo)
17468 {
17469  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17470 
17471  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
17472 
17473  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17474 
17475  VkMemoryRequirements vkMemReq = {};
17476  bool requiresDedicatedAllocation = false;
17477  bool prefersDedicatedAllocation = false;
17478  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
17479  requiresDedicatedAllocation,
17480  prefersDedicatedAllocation);
17481 
17482  VkResult result = allocator->AllocateMemory(
17483  vkMemReq,
17484  requiresDedicatedAllocation,
17485  prefersDedicatedAllocation,
17486  buffer, // dedicatedBuffer
17487  VK_NULL_HANDLE, // dedicatedImage
17488  *pCreateInfo,
17489  VMA_SUBALLOCATION_TYPE_BUFFER,
17490  1, // allocationCount
17491  pAllocation);
17492 
17493 #if VMA_RECORDING_ENABLED
17494  if(allocator->GetRecorder() != VMA_NULL)
17495  {
17496  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
17497  allocator->GetCurrentFrameIndex(),
17498  vkMemReq,
17499  requiresDedicatedAllocation,
17500  prefersDedicatedAllocation,
17501  *pCreateInfo,
17502  *pAllocation);
17503  }
17504 #endif
17505 
17506  if(pAllocationInfo && result == VK_SUCCESS)
17507  {
17508  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17509  }
17510 
17511  return result;
17512 }
17513 
17514 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17515  VmaAllocator allocator,
17516  VkImage image,
17517  const VmaAllocationCreateInfo* pCreateInfo,
17518  VmaAllocation* pAllocation,
17519  VmaAllocationInfo* pAllocationInfo)
17520 {
17521  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17522 
17523  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17524 
17525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17526 
17527  VkMemoryRequirements vkMemReq = {};
17528  bool requiresDedicatedAllocation = false;
17529  bool prefersDedicatedAllocation = false;
17530  allocator->GetImageMemoryRequirements(image, vkMemReq,
17531  requiresDedicatedAllocation, prefersDedicatedAllocation);
17532 
17533  VkResult result = allocator->AllocateMemory(
17534  vkMemReq,
17535  requiresDedicatedAllocation,
17536  prefersDedicatedAllocation,
17537  VK_NULL_HANDLE, // dedicatedBuffer
17538  image, // dedicatedImage
17539  *pCreateInfo,
17540  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17541  1, // allocationCount
17542  pAllocation);
17543 
17544 #if VMA_RECORDING_ENABLED
17545  if(allocator->GetRecorder() != VMA_NULL)
17546  {
17547  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17548  allocator->GetCurrentFrameIndex(),
17549  vkMemReq,
17550  requiresDedicatedAllocation,
17551  prefersDedicatedAllocation,
17552  *pCreateInfo,
17553  *pAllocation);
17554  }
17555 #endif
17556 
17557  if(pAllocationInfo && result == VK_SUCCESS)
17558  {
17559  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17560  }
17561 
17562  return result;
17563 }
17564 
17565 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17566  VmaAllocator allocator,
17567  VmaAllocation allocation)
17568 {
17569  VMA_ASSERT(allocator);
17570 
17571  if(allocation == VK_NULL_HANDLE)
17572  {
17573  return;
17574  }
17575 
17576  VMA_DEBUG_LOG("vmaFreeMemory");
17577 
17578  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17579 
17580 #if VMA_RECORDING_ENABLED
17581  if(allocator->GetRecorder() != VMA_NULL)
17582  {
17583  allocator->GetRecorder()->RecordFreeMemory(
17584  allocator->GetCurrentFrameIndex(),
17585  allocation);
17586  }
17587 #endif
17588 
17589  allocator->FreeMemory(
17590  1, // allocationCount
17591  &allocation);
17592 }
17593 
17594 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17595  VmaAllocator allocator,
17596  size_t allocationCount,
17597  VmaAllocation* pAllocations)
17598 {
17599  if(allocationCount == 0)
17600  {
17601  return;
17602  }
17603 
17604  VMA_ASSERT(allocator);
17605 
17606  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17607 
17608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17609 
17610 #if VMA_RECORDING_ENABLED
17611  if(allocator->GetRecorder() != VMA_NULL)
17612  {
17613  allocator->GetRecorder()->RecordFreeMemoryPages(
17614  allocator->GetCurrentFrameIndex(),
17615  (uint64_t)allocationCount,
17616  pAllocations);
17617  }
17618 #endif
17619 
17620  allocator->FreeMemory(allocationCount, pAllocations);
17621 }
17622 
17623 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17624  VmaAllocator allocator,
17625  VmaAllocation allocation,
17626  VkDeviceSize newSize)
17627 {
17628  VMA_ASSERT(allocator && allocation);
17629 
17630  VMA_DEBUG_LOG("vmaResizeAllocation");
17631 
17632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17633 
17634  return allocator->ResizeAllocation(allocation, newSize);
17635 }
17636 
17637 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17638  VmaAllocator allocator,
17639  VmaAllocation allocation,
17640  VmaAllocationInfo* pAllocationInfo)
17641 {
17642  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17643 
17644  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17645 
17646 #if VMA_RECORDING_ENABLED
17647  if(allocator->GetRecorder() != VMA_NULL)
17648  {
17649  allocator->GetRecorder()->RecordGetAllocationInfo(
17650  allocator->GetCurrentFrameIndex(),
17651  allocation);
17652  }
17653 #endif
17654 
17655  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17656 }
17657 
17658 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17659  VmaAllocator allocator,
17660  VmaAllocation allocation)
17661 {
17662  VMA_ASSERT(allocator && allocation);
17663 
17664  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17665 
17666 #if VMA_RECORDING_ENABLED
17667  if(allocator->GetRecorder() != VMA_NULL)
17668  {
17669  allocator->GetRecorder()->RecordTouchAllocation(
17670  allocator->GetCurrentFrameIndex(),
17671  allocation);
17672  }
17673 #endif
17674 
17675  return allocator->TouchAllocation(allocation);
17676 }
17677 
17678 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17679  VmaAllocator allocator,
17680  VmaAllocation allocation,
17681  void* pUserData)
17682 {
17683  VMA_ASSERT(allocator && allocation);
17684 
17685  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17686 
17687  allocation->SetUserData(allocator, pUserData);
17688 
17689 #if VMA_RECORDING_ENABLED
17690  if(allocator->GetRecorder() != VMA_NULL)
17691  {
17692  allocator->GetRecorder()->RecordSetAllocationUserData(
17693  allocator->GetCurrentFrameIndex(),
17694  allocation,
17695  pUserData);
17696  }
17697 #endif
17698 }
17699 
17700 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17701  VmaAllocator allocator,
17702  VmaAllocation* pAllocation)
17703 {
17704  VMA_ASSERT(allocator && pAllocation);
17705 
17706  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17707 
17708  allocator->CreateLostAllocation(pAllocation);
17709 
17710 #if VMA_RECORDING_ENABLED
17711  if(allocator->GetRecorder() != VMA_NULL)
17712  {
17713  allocator->GetRecorder()->RecordCreateLostAllocation(
17714  allocator->GetCurrentFrameIndex(),
17715  *pAllocation);
17716  }
17717 #endif
17718 }
17719 
17720 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17721  VmaAllocator allocator,
17722  VmaAllocation allocation,
17723  void** ppData)
17724 {
17725  VMA_ASSERT(allocator && allocation && ppData);
17726 
17727  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17728 
17729  VkResult res = allocator->Map(allocation, ppData);
17730 
17731 #if VMA_RECORDING_ENABLED
17732  if(allocator->GetRecorder() != VMA_NULL)
17733  {
17734  allocator->GetRecorder()->RecordMapMemory(
17735  allocator->GetCurrentFrameIndex(),
17736  allocation);
17737  }
17738 #endif
17739 
17740  return res;
17741 }
17742 
17743 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17744  VmaAllocator allocator,
17745  VmaAllocation allocation)
17746 {
17747  VMA_ASSERT(allocator && allocation);
17748 
17749  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17750 
17751 #if VMA_RECORDING_ENABLED
17752  if(allocator->GetRecorder() != VMA_NULL)
17753  {
17754  allocator->GetRecorder()->RecordUnmapMemory(
17755  allocator->GetCurrentFrameIndex(),
17756  allocation);
17757  }
17758 #endif
17759 
17760  allocator->Unmap(allocation);
17761 }
17762 
17763 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17764 {
17765  VMA_ASSERT(allocator && allocation);
17766 
17767  VMA_DEBUG_LOG("vmaFlushAllocation");
17768 
17769  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17770 
17771  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17772 
17773 #if VMA_RECORDING_ENABLED
17774  if(allocator->GetRecorder() != VMA_NULL)
17775  {
17776  allocator->GetRecorder()->RecordFlushAllocation(
17777  allocator->GetCurrentFrameIndex(),
17778  allocation, offset, size);
17779  }
17780 #endif
17781 }
17782 
17783 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17784 {
17785  VMA_ASSERT(allocator && allocation);
17786 
17787  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17788 
17789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17790 
17791  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17792 
17793 #if VMA_RECORDING_ENABLED
17794  if(allocator->GetRecorder() != VMA_NULL)
17795  {
17796  allocator->GetRecorder()->RecordInvalidateAllocation(
17797  allocator->GetCurrentFrameIndex(),
17798  allocation, offset, size);
17799  }
17800 #endif
17801 }
17802 
17803 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17804 {
17805  VMA_ASSERT(allocator);
17806 
17807  VMA_DEBUG_LOG("vmaCheckCorruption");
17808 
17809  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17810 
17811  return allocator->CheckCorruption(memoryTypeBits);
17812 }
17813 
17814 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17815  VmaAllocator allocator,
17816  VmaAllocation* pAllocations,
17817  size_t allocationCount,
17818  VkBool32* pAllocationsChanged,
17819  const VmaDefragmentationInfo *pDefragmentationInfo,
17820  VmaDefragmentationStats* pDefragmentationStats)
17821 {
17822  // Deprecated interface, reimplemented using new one.
17823 
17824  VmaDefragmentationInfo2 info2 = {};
17825  info2.allocationCount = (uint32_t)allocationCount;
17826  info2.pAllocations = pAllocations;
17827  info2.pAllocationsChanged = pAllocationsChanged;
17828  if(pDefragmentationInfo != VMA_NULL)
17829  {
17830  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17831  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17832  }
17833  else
17834  {
17835  info2.maxCpuAllocationsToMove = UINT32_MAX;
17836  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
17837  }
17838  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
17839 
17841  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
17842  if(res == VK_NOT_READY)
17843  {
17844  res = vmaDefragmentationEnd( allocator, ctx);
17845  }
17846  return res;
17847 }
17848 
17849 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
17850  VmaAllocator allocator,
17851  const VmaDefragmentationInfo2* pInfo,
17852  VmaDefragmentationStats* pStats,
17853  VmaDefragmentationContext *pContext)
17854 {
17855  VMA_ASSERT(allocator && pInfo && pContext);
17856 
17857  // Degenerate case: Nothing to defragment.
17858  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
17859  {
17860  return VK_SUCCESS;
17861  }
17862 
17863  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
17864  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
17865  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
17866  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17867 
17868  VMA_DEBUG_LOG("vmaDefragmentationBegin");
17869 
17870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17871 
17872  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
17873 
17874 #if VMA_RECORDING_ENABLED
17875  if(allocator->GetRecorder() != VMA_NULL)
17876  {
17877  allocator->GetRecorder()->RecordDefragmentationBegin(
17878  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
17879  }
17880 #endif
17881 
17882  return res;
17883 }
17884 
17885 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
17886  VmaAllocator allocator,
17887  VmaDefragmentationContext context)
17888 {
17889  VMA_ASSERT(allocator);
17890 
17891  VMA_DEBUG_LOG("vmaDefragmentationEnd");
17892 
17893  if(context != VK_NULL_HANDLE)
17894  {
17895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17896 
17897 #if VMA_RECORDING_ENABLED
17898  if(allocator->GetRecorder() != VMA_NULL)
17899  {
17900  allocator->GetRecorder()->RecordDefragmentationEnd(
17901  allocator->GetCurrentFrameIndex(), context);
17902  }
17903 #endif
17904 
17905  return allocator->DefragmentationEnd(context);
17906  }
17907  else
17908  {
17909  return VK_SUCCESS;
17910  }
17911 }
17912 
17913 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
17914  VmaAllocator allocator,
17915  VmaDefragmentationContext context,
17917  )
17918 {
17919  VMA_ASSERT(allocator);
17920  VMA_ASSERT(pInfo);
17921  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
17922 
17923  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
17924 
17925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17926 
17927  if(context == VK_NULL_HANDLE)
17928  {
17929  pInfo->moveCount = 0;
17930  return VK_SUCCESS;
17931  }
17932 
17933  return allocator->DefragmentationPassBegin(pInfo, context);
17934 }
17935 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
17936  VmaAllocator allocator,
17937  VmaDefragmentationContext context)
17938 {
17939  VMA_ASSERT(allocator);
17940 
17941  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
17942  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17943 
17944  if(context == VK_NULL_HANDLE)
17945  return VK_SUCCESS;
17946 
17947  return allocator->DefragmentationPassEnd(context);
17948 }
17949 
17950 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
17951  VmaAllocator allocator,
17952  VmaAllocation allocation,
17953  VkBuffer buffer)
17954 {
17955  VMA_ASSERT(allocator && allocation && buffer);
17956 
17957  VMA_DEBUG_LOG("vmaBindBufferMemory");
17958 
17959  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17960 
17961  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17962 }
17963 
17964 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
17965  VmaAllocator allocator,
17966  VmaAllocation allocation,
17967  VkDeviceSize allocationLocalOffset,
17968  VkBuffer buffer,
17969  const void* pNext)
17970 {
17971  VMA_ASSERT(allocator && allocation && buffer);
17972 
17973  VMA_DEBUG_LOG("vmaBindBufferMemory2");
17974 
17975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17976 
17977  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17978 }
17979 
17980 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
17981  VmaAllocator allocator,
17982  VmaAllocation allocation,
17983  VkImage image)
17984 {
17985  VMA_ASSERT(allocator && allocation && image);
17986 
17987  VMA_DEBUG_LOG("vmaBindImageMemory");
17988 
17989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17990 
17991  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17992 }
17993 
17994 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
17995  VmaAllocator allocator,
17996  VmaAllocation allocation,
17997  VkDeviceSize allocationLocalOffset,
17998  VkImage image,
17999  const void* pNext)
18000 {
18001  VMA_ASSERT(allocator && allocation && image);
18002 
18003  VMA_DEBUG_LOG("vmaBindImageMemory2");
18004 
18005  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18006 
18007  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18008 }
18009 
18010 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18011  VmaAllocator allocator,
18012  const VkBufferCreateInfo* pBufferCreateInfo,
18013  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18014  VkBuffer* pBuffer,
18015  VmaAllocation* pAllocation,
18016  VmaAllocationInfo* pAllocationInfo)
18017 {
18018  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18019 
18020  if(pBufferCreateInfo->size == 0)
18021  {
18022  return VK_ERROR_VALIDATION_FAILED_EXT;
18023  }
18024 
18025  VMA_DEBUG_LOG("vmaCreateBuffer");
18026 
18027  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18028 
18029  *pBuffer = VK_NULL_HANDLE;
18030  *pAllocation = VK_NULL_HANDLE;
18031 
18032  // 1. Create VkBuffer.
18033  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18034  allocator->m_hDevice,
18035  pBufferCreateInfo,
18036  allocator->GetAllocationCallbacks(),
18037  pBuffer);
18038  if(res >= 0)
18039  {
18040  // 2. vkGetBufferMemoryRequirements.
18041  VkMemoryRequirements vkMemReq = {};
18042  bool requiresDedicatedAllocation = false;
18043  bool prefersDedicatedAllocation = false;
18044  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18045  requiresDedicatedAllocation, prefersDedicatedAllocation);
18046 
18047  // Make sure alignment requirements for specific buffer usages reported
18048  // in Physical Device Properties are included in alignment reported by memory requirements.
18049  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
18050  {
18051  VMA_ASSERT(vkMemReq.alignment %
18052  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
18053  }
18054  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
18055  {
18056  VMA_ASSERT(vkMemReq.alignment %
18057  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
18058  }
18059  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
18060  {
18061  VMA_ASSERT(vkMemReq.alignment %
18062  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
18063  }
18064 
18065  // 3. Allocate memory using allocator.
18066  res = allocator->AllocateMemory(
18067  vkMemReq,
18068  requiresDedicatedAllocation,
18069  prefersDedicatedAllocation,
18070  *pBuffer, // dedicatedBuffer
18071  VK_NULL_HANDLE, // dedicatedImage
18072  *pAllocationCreateInfo,
18073  VMA_SUBALLOCATION_TYPE_BUFFER,
18074  1, // allocationCount
18075  pAllocation);
18076 
18077 #if VMA_RECORDING_ENABLED
18078  if(allocator->GetRecorder() != VMA_NULL)
18079  {
18080  allocator->GetRecorder()->RecordCreateBuffer(
18081  allocator->GetCurrentFrameIndex(),
18082  *pBufferCreateInfo,
18083  *pAllocationCreateInfo,
18084  *pAllocation);
18085  }
18086 #endif
18087 
18088  if(res >= 0)
18089  {
18090  // 3. Bind buffer with memory.
18091  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18092  {
18093  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18094  }
18095  if(res >= 0)
18096  {
18097  // All steps succeeded.
18098  #if VMA_STATS_STRING_ENABLED
18099  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
18100  #endif
18101  if(pAllocationInfo != VMA_NULL)
18102  {
18103  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18104  }
18105 
18106  return VK_SUCCESS;
18107  }
18108  allocator->FreeMemory(
18109  1, // allocationCount
18110  pAllocation);
18111  *pAllocation = VK_NULL_HANDLE;
18112  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18113  *pBuffer = VK_NULL_HANDLE;
18114  return res;
18115  }
18116  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
18117  *pBuffer = VK_NULL_HANDLE;
18118  return res;
18119  }
18120  return res;
18121 }
18122 
18123 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
18124  VmaAllocator allocator,
18125  VkBuffer buffer,
18126  VmaAllocation allocation)
18127 {
18128  VMA_ASSERT(allocator);
18129 
18130  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18131  {
18132  return;
18133  }
18134 
18135  VMA_DEBUG_LOG("vmaDestroyBuffer");
18136 
18137  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18138 
18139 #if VMA_RECORDING_ENABLED
18140  if(allocator->GetRecorder() != VMA_NULL)
18141  {
18142  allocator->GetRecorder()->RecordDestroyBuffer(
18143  allocator->GetCurrentFrameIndex(),
18144  allocation);
18145  }
18146 #endif
18147 
18148  if(buffer != VK_NULL_HANDLE)
18149  {
18150  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
18151  }
18152 
18153  if(allocation != VK_NULL_HANDLE)
18154  {
18155  allocator->FreeMemory(
18156  1, // allocationCount
18157  &allocation);
18158  }
18159 }
18160 
18161 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
18162  VmaAllocator allocator,
18163  const VkImageCreateInfo* pImageCreateInfo,
18164  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18165  VkImage* pImage,
18166  VmaAllocation* pAllocation,
18167  VmaAllocationInfo* pAllocationInfo)
18168 {
18169  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
18170 
18171  if(pImageCreateInfo->extent.width == 0 ||
18172  pImageCreateInfo->extent.height == 0 ||
18173  pImageCreateInfo->extent.depth == 0 ||
18174  pImageCreateInfo->mipLevels == 0 ||
18175  pImageCreateInfo->arrayLayers == 0)
18176  {
18177  return VK_ERROR_VALIDATION_FAILED_EXT;
18178  }
18179 
18180  VMA_DEBUG_LOG("vmaCreateImage");
18181 
18182  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18183 
18184  *pImage = VK_NULL_HANDLE;
18185  *pAllocation = VK_NULL_HANDLE;
18186 
18187  // 1. Create VkImage.
18188  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
18189  allocator->m_hDevice,
18190  pImageCreateInfo,
18191  allocator->GetAllocationCallbacks(),
18192  pImage);
18193  if(res >= 0)
18194  {
18195  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
18196  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
18197  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
18198 
18199  // 2. Allocate memory using allocator.
18200  VkMemoryRequirements vkMemReq = {};
18201  bool requiresDedicatedAllocation = false;
18202  bool prefersDedicatedAllocation = false;
18203  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
18204  requiresDedicatedAllocation, prefersDedicatedAllocation);
18205 
18206  res = allocator->AllocateMemory(
18207  vkMemReq,
18208  requiresDedicatedAllocation,
18209  prefersDedicatedAllocation,
18210  VK_NULL_HANDLE, // dedicatedBuffer
18211  *pImage, // dedicatedImage
18212  *pAllocationCreateInfo,
18213  suballocType,
18214  1, // allocationCount
18215  pAllocation);
18216 
18217 #if VMA_RECORDING_ENABLED
18218  if(allocator->GetRecorder() != VMA_NULL)
18219  {
18220  allocator->GetRecorder()->RecordCreateImage(
18221  allocator->GetCurrentFrameIndex(),
18222  *pImageCreateInfo,
18223  *pAllocationCreateInfo,
18224  *pAllocation);
18225  }
18226 #endif
18227 
18228  if(res >= 0)
18229  {
18230  // 3. Bind image with memory.
18231  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18232  {
18233  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
18234  }
18235  if(res >= 0)
18236  {
18237  // All steps succeeded.
18238  #if VMA_STATS_STRING_ENABLED
18239  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
18240  #endif
18241  if(pAllocationInfo != VMA_NULL)
18242  {
18243  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18244  }
18245 
18246  return VK_SUCCESS;
18247  }
18248  allocator->FreeMemory(
18249  1, // allocationCount
18250  pAllocation);
18251  *pAllocation = VK_NULL_HANDLE;
18252  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18253  *pImage = VK_NULL_HANDLE;
18254  return res;
18255  }
18256  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
18257  *pImage = VK_NULL_HANDLE;
18258  return res;
18259  }
18260  return res;
18261 }
18262 
18263 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
18264  VmaAllocator allocator,
18265  VkImage image,
18266  VmaAllocation allocation)
18267 {
18268  VMA_ASSERT(allocator);
18269 
18270  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
18271  {
18272  return;
18273  }
18274 
18275  VMA_DEBUG_LOG("vmaDestroyImage");
18276 
18277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18278 
18279 #if VMA_RECORDING_ENABLED
18280  if(allocator->GetRecorder() != VMA_NULL)
18281  {
18282  allocator->GetRecorder()->RecordDestroyImage(
18283  allocator->GetCurrentFrameIndex(),
18284  allocation);
18285  }
18286 #endif
18287 
18288  if(image != VK_NULL_HANDLE)
18289  {
18290  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
18291  }
18292  if(allocation != VK_NULL_HANDLE)
18293  {
18294  allocator->FreeMemory(
18295  1, // allocationCount
18296  &allocation);
18297  }
18298 }
18299 
18300 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2058
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2016
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1924
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2053
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2014
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1901
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2079
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2683
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3234
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2105
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:1985
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2284
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3302
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2429
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2755
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2512
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1933
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2761
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2492
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2045
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1920
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2479
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2085
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2051
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2019
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2898
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2252
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2586
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2258
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2018
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1827
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3303
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3312
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2523
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2184
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2309
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2030
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2073
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2453
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2194
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3220
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2774
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2516
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1958
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3329
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2507
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2367
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2248
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2711
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2723
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3252
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3300
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2764
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2007
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2005
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3218
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2903
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2435
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2015
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2704
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2579
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2266
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2442
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:1973
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3231
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2023
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3283
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3333
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2758
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2017
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2055
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2399
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3268
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2421
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3219
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2149
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2022
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2024
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3297
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1907
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2262
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2728
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2164
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2245
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3337
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3311
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2389
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2256
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2254
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2088
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1928
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2736
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2708
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1922
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2415
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2486
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2714
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2431
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2250
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2021
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3228
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3324
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2288
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2178
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2702
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2560
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2533
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2257
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2091
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2537
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2082
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2130
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3301
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2320
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2013
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2917
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2076
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3222
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2028
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2258
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2406
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2530
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2527
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2003
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2555
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2257
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2908
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2061
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2020
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2922
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2666
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2265
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2142
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2777
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3310
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2544
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2264
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2155
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2372
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3288
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2027
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2750
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2029
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3273
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2889
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2466
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2026
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2767
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2648
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3335
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2258
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2497
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2549
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2025
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2546
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2565
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3249
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3339
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2573
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2189
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3341
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2069
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2257
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2884
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2694
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2299
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2542
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3243
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2698
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3278
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3319
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2503
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.