Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1760 /*
1761 Define this macro to 0/1 to disable/enable support for recording functionality,
1762 available through VmaAllocatorCreateInfo::pRecordSettings.
1763 */
1764 #ifndef VMA_RECORDING_ENABLED
1765  #define VMA_RECORDING_ENABLED 0
1766 #endif
1767 
1768 #ifndef NOMINMAX
1769  #define NOMINMAX // For windows.h
1770 #endif
1771 
1772 #ifndef VULKAN_H_
1773  #include <vulkan/vulkan.h>
1774 #endif
1775 
1776 #if VMA_RECORDING_ENABLED
1777  #include <windows.h>
1778 #endif
1779 
1780 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1781 // where AAA = major, BBB = minor, CCC = patch.
1782 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1783 #if !defined(VMA_VULKAN_VERSION)
1784  #if defined(VK_VERSION_1_1)
1785  #define VMA_VULKAN_VERSION 1001000
1786  #else
1787  #define VMA_VULKAN_VERSION 1000000
1788  #endif
1789 #endif
1790 
1791 #if !defined(VMA_DEDICATED_ALLOCATION)
1792  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1793  #define VMA_DEDICATED_ALLOCATION 1
1794  #else
1795  #define VMA_DEDICATED_ALLOCATION 0
1796  #endif
1797 #endif
1798 
1799 #if !defined(VMA_BIND_MEMORY2)
1800  #if VK_KHR_bind_memory2
1801  #define VMA_BIND_MEMORY2 1
1802  #else
1803  #define VMA_BIND_MEMORY2 0
1804  #endif
1805 #endif
1806 
1807 #if !defined(VMA_MEMORY_BUDGET)
1808  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1809  #define VMA_MEMORY_BUDGET 1
1810  #else
1811  #define VMA_MEMORY_BUDGET 0
1812  #endif
1813 #endif
1814 
1815 // Define these macros to decorate all public functions with additional code,
1816 // before and after returned type, appropriately. This may be useful for
1817 // exporing the functions when compiling VMA as a separate library. Example:
1818 // #define VMA_CALL_PRE __declspec(dllexport)
1819 // #define VMA_CALL_POST __cdecl
1820 #ifndef VMA_CALL_PRE
1821  #define VMA_CALL_PRE
1822 #endif
1823 #ifndef VMA_CALL_POST
1824  #define VMA_CALL_POST
1825 #endif
1826 
1836 VK_DEFINE_HANDLE(VmaAllocator)
1837 
1838 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1840  VmaAllocator allocator,
1841  uint32_t memoryType,
1842  VkDeviceMemory memory,
1843  VkDeviceSize size);
1845 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1846  VmaAllocator allocator,
1847  uint32_t memoryType,
1848  VkDeviceMemory memory,
1849  VkDeviceSize size);
1850 
1864 
1924 
1927 typedef VkFlags VmaAllocatorCreateFlags;
1928 
1933 typedef struct VmaVulkanFunctions {
1934  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1935  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1936  PFN_vkAllocateMemory vkAllocateMemory;
1937  PFN_vkFreeMemory vkFreeMemory;
1938  PFN_vkMapMemory vkMapMemory;
1939  PFN_vkUnmapMemory vkUnmapMemory;
1940  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1941  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1942  PFN_vkBindBufferMemory vkBindBufferMemory;
1943  PFN_vkBindImageMemory vkBindImageMemory;
1944  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1945  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1946  PFN_vkCreateBuffer vkCreateBuffer;
1947  PFN_vkDestroyBuffer vkDestroyBuffer;
1948  PFN_vkCreateImage vkCreateImage;
1949  PFN_vkDestroyImage vkDestroyImage;
1950  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1951 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
1952  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1953  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1954 #endif
1955 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
1956  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1957  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1958 #endif
1959 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
1960  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
1961 #endif
1963 
1965 typedef enum VmaRecordFlagBits {
1972 
1975 typedef VkFlags VmaRecordFlags;
1976 
1978 typedef struct VmaRecordSettings
1979 {
1989  const char* pFilePath;
1991 
1994 {
1998 
1999  VkPhysicalDevice physicalDevice;
2001 
2002  VkDevice device;
2004 
2007 
2008  const VkAllocationCallbacks* pAllocationCallbacks;
2010 
2050  const VkDeviceSize* pHeapSizeLimit;
2075  VkInstance instance;
2086 
2088 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2089  const VmaAllocatorCreateInfo* pCreateInfo,
2090  VmaAllocator* pAllocator);
2091 
2093 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2094  VmaAllocator allocator);
2095 
2100 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2101  VmaAllocator allocator,
2102  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2103 
2108 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2109  VmaAllocator allocator,
2110  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2111 
2118 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2119  VmaAllocator allocator,
2120  uint32_t memoryTypeIndex,
2121  VkMemoryPropertyFlags* pFlags);
2122 
2131 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2132  VmaAllocator allocator,
2133  uint32_t frameIndex);
2134 
2137 typedef struct VmaStatInfo
2138 {
2140  uint32_t blockCount;
2146  VkDeviceSize usedBytes;
2148  VkDeviceSize unusedBytes;
2151 } VmaStatInfo;
2152 
2154 typedef struct VmaStats
2155 {
2156  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2157  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2159 } VmaStats;
2160 
2170 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2171  VmaAllocator allocator,
2172  VmaStats* pStats);
2173 
2176 typedef struct VmaBudget
2177 {
2180  VkDeviceSize blockBytes;
2181 
2191  VkDeviceSize allocationBytes;
2192 
2201  VkDeviceSize usage;
2202 
2212  VkDeviceSize budget;
2213 } VmaBudget;
2214 
2225 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2226  VmaAllocator allocator,
2227  VmaBudget* pBudget);
2228 
2229 #ifndef VMA_STATS_STRING_ENABLED
2230 #define VMA_STATS_STRING_ENABLED 1
2231 #endif
2232 
2233 #if VMA_STATS_STRING_ENABLED
2234 
2236 
2238 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2239  VmaAllocator allocator,
2240  char** ppStatsString,
2241  VkBool32 detailedMap);
2242 
2243 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2244  VmaAllocator allocator,
2245  char* pStatsString);
2246 
2247 #endif // #if VMA_STATS_STRING_ENABLED
2248 
2257 VK_DEFINE_HANDLE(VmaPool)
2258 
2259 typedef enum VmaMemoryUsage
2260 {
2322 
2324 } VmaMemoryUsage;
2325 
2335 
2400 
2416 
2426 
2433 
2437 
2439 {
2452  VkMemoryPropertyFlags requiredFlags;
2457  VkMemoryPropertyFlags preferredFlags;
2465  uint32_t memoryTypeBits;
2478  void* pUserData;
2480 
2497 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2498  VmaAllocator allocator,
2499  uint32_t memoryTypeBits,
2500  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2501  uint32_t* pMemoryTypeIndex);
2502 
2515 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2516  VmaAllocator allocator,
2517  const VkBufferCreateInfo* pBufferCreateInfo,
2518  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2519  uint32_t* pMemoryTypeIndex);
2520 
2533 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2534  VmaAllocator allocator,
2535  const VkImageCreateInfo* pImageCreateInfo,
2536  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2537  uint32_t* pMemoryTypeIndex);
2538 
2559 
2576 
2587 
2593 
2596 typedef VkFlags VmaPoolCreateFlags;
2597 
2600 typedef struct VmaPoolCreateInfo {
2615  VkDeviceSize blockSize;
2644 
2647 typedef struct VmaPoolStats {
2650  VkDeviceSize size;
2653  VkDeviceSize unusedSize;
2666  VkDeviceSize unusedRangeSizeMax;
2669  size_t blockCount;
2670 } VmaPoolStats;
2671 
2678 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2679  VmaAllocator allocator,
2680  const VmaPoolCreateInfo* pCreateInfo,
2681  VmaPool* pPool);
2682 
2685 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2686  VmaAllocator allocator,
2687  VmaPool pool);
2688 
2695 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2696  VmaAllocator allocator,
2697  VmaPool pool,
2698  VmaPoolStats* pPoolStats);
2699 
2706 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2707  VmaAllocator allocator,
2708  VmaPool pool,
2709  size_t* pLostAllocationCount);
2710 
2725 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2726 
2733 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2734  VmaAllocator allocator,
2735  VmaPool pool,
2736  const char** ppName);
2737 
2743 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2744  VmaAllocator allocator,
2745  VmaPool pool,
2746  const char* pName);
2747 
2772 VK_DEFINE_HANDLE(VmaAllocation)
2773 
2774 
2776 typedef struct VmaAllocationInfo {
2781  uint32_t memoryType;
2790  VkDeviceMemory deviceMemory;
2795  VkDeviceSize offset;
2800  VkDeviceSize size;
2814  void* pUserData;
2816 
2827 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
2828  VmaAllocator allocator,
2829  const VkMemoryRequirements* pVkMemoryRequirements,
2830  const VmaAllocationCreateInfo* pCreateInfo,
2831  VmaAllocation* pAllocation,
2832  VmaAllocationInfo* pAllocationInfo);
2833 
2853 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
2854  VmaAllocator allocator,
2855  const VkMemoryRequirements* pVkMemoryRequirements,
2856  const VmaAllocationCreateInfo* pCreateInfo,
2857  size_t allocationCount,
2858  VmaAllocation* pAllocations,
2859  VmaAllocationInfo* pAllocationInfo);
2860 
2867 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
2868  VmaAllocator allocator,
2869  VkBuffer buffer,
2870  const VmaAllocationCreateInfo* pCreateInfo,
2871  VmaAllocation* pAllocation,
2872  VmaAllocationInfo* pAllocationInfo);
2873 
2875 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
2876  VmaAllocator allocator,
2877  VkImage image,
2878  const VmaAllocationCreateInfo* pCreateInfo,
2879  VmaAllocation* pAllocation,
2880  VmaAllocationInfo* pAllocationInfo);
2881 
2886 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
2887  VmaAllocator allocator,
2888  VmaAllocation allocation);
2889 
2900 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
2901  VmaAllocator allocator,
2902  size_t allocationCount,
2903  VmaAllocation* pAllocations);
2904 
2911 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
2912  VmaAllocator allocator,
2913  VmaAllocation allocation,
2914  VkDeviceSize newSize);
2915 
2932 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
2933  VmaAllocator allocator,
2934  VmaAllocation allocation,
2935  VmaAllocationInfo* pAllocationInfo);
2936 
2951 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
2952  VmaAllocator allocator,
2953  VmaAllocation allocation);
2954 
2968 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
2969  VmaAllocator allocator,
2970  VmaAllocation allocation,
2971  void* pUserData);
2972 
2983 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
2984  VmaAllocator allocator,
2985  VmaAllocation* pAllocation);
2986 
3021 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3022  VmaAllocator allocator,
3023  VmaAllocation allocation,
3024  void** ppData);
3025 
3030 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3031  VmaAllocator allocator,
3032  VmaAllocation allocation);
3033 
3050 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3051 
3068 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3069 
3086 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3087 
3094 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3095 
3096 typedef enum VmaDefragmentationFlagBits {
3100 typedef VkFlags VmaDefragmentationFlags;
3101 
3106 typedef struct VmaDefragmentationInfo2 {
3130  uint32_t poolCount;
3151  VkDeviceSize maxCpuBytesToMove;
3161  VkDeviceSize maxGpuBytesToMove;
3175  VkCommandBuffer commandBuffer;
3177 
3182 typedef struct VmaDefragmentationInfo {
3187  VkDeviceSize maxBytesToMove;
3194 
3196 typedef struct VmaDefragmentationStats {
3198  VkDeviceSize bytesMoved;
3200  VkDeviceSize bytesFreed;
3206 
3236 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3237  VmaAllocator allocator,
3238  const VmaDefragmentationInfo2* pInfo,
3239  VmaDefragmentationStats* pStats,
3240  VmaDefragmentationContext *pContext);
3241 
3247 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3248  VmaAllocator allocator,
3249  VmaDefragmentationContext context);
3250 
3291 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3292  VmaAllocator allocator,
3293  VmaAllocation* pAllocations,
3294  size_t allocationCount,
3295  VkBool32* pAllocationsChanged,
3296  const VmaDefragmentationInfo *pDefragmentationInfo,
3297  VmaDefragmentationStats* pDefragmentationStats);
3298 
3311 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3312  VmaAllocator allocator,
3313  VmaAllocation allocation,
3314  VkBuffer buffer);
3315 
3326 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3327  VmaAllocator allocator,
3328  VmaAllocation allocation,
3329  VkDeviceSize allocationLocalOffset,
3330  VkBuffer buffer,
3331  const void* pNext);
3332 
3345 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3346  VmaAllocator allocator,
3347  VmaAllocation allocation,
3348  VkImage image);
3349 
3360 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3361  VmaAllocator allocator,
3362  VmaAllocation allocation,
3363  VkDeviceSize allocationLocalOffset,
3364  VkImage image,
3365  const void* pNext);
3366 
3393 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3394  VmaAllocator allocator,
3395  const VkBufferCreateInfo* pBufferCreateInfo,
3396  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3397  VkBuffer* pBuffer,
3398  VmaAllocation* pAllocation,
3399  VmaAllocationInfo* pAllocationInfo);
3400 
3412 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3413  VmaAllocator allocator,
3414  VkBuffer buffer,
3415  VmaAllocation allocation);
3416 
3418 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3419  VmaAllocator allocator,
3420  const VkImageCreateInfo* pImageCreateInfo,
3421  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3422  VkImage* pImage,
3423  VmaAllocation* pAllocation,
3424  VmaAllocationInfo* pAllocationInfo);
3425 
3437 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3438  VmaAllocator allocator,
3439  VkImage image,
3440  VmaAllocation allocation);
3441 
3442 #ifdef __cplusplus
3443 }
3444 #endif
3445 
3446 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3447 
3448 // For Visual Studio IntelliSense.
3449 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3450 #define VMA_IMPLEMENTATION
3451 #endif
3452 
3453 #ifdef VMA_IMPLEMENTATION
3454 #undef VMA_IMPLEMENTATION
3455 
3456 #include <cstdint>
3457 #include <cstdlib>
3458 #include <cstring>
3459 
3460 /*******************************************************************************
3461 CONFIGURATION SECTION
3462 
3463 Define some of these macros before each #include of this header or change them
3464 here if you need other then default behavior depending on your environment.
3465 */
3466 
3467 /*
3468 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3469 internally, like:
3470 
3471  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3472 
3473 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3474 VmaAllocatorCreateInfo::pVulkanFunctions.
3475 */
3476 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3477 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3478 #endif
3479 
3480 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3481 //#define VMA_USE_STL_CONTAINERS 1
3482 
3483 /* Set this macro to 1 to make the library including and using STL containers:
3484 std::pair, std::vector, std::list, std::unordered_map.
3485 
3486 Set it to 0 or undefined to make the library using its own implementation of
3487 the containers.
3488 */
3489 #if VMA_USE_STL_CONTAINERS
3490  #define VMA_USE_STL_VECTOR 1
3491  #define VMA_USE_STL_UNORDERED_MAP 1
3492  #define VMA_USE_STL_LIST 1
3493 #endif
3494 
3495 #ifndef VMA_USE_STL_SHARED_MUTEX
3496  // Compiler conforms to C++17.
3497  #if __cplusplus >= 201703L
3498  #define VMA_USE_STL_SHARED_MUTEX 1
3499  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3500  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3501  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3502  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3503  #define VMA_USE_STL_SHARED_MUTEX 1
3504  #else
3505  #define VMA_USE_STL_SHARED_MUTEX 0
3506  #endif
3507 #endif
3508 
3509 /*
3510 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3511 Library has its own container implementation.
3512 */
3513 #if VMA_USE_STL_VECTOR
3514  #include <vector>
3515 #endif
3516 
3517 #if VMA_USE_STL_UNORDERED_MAP
3518  #include <unordered_map>
3519 #endif
3520 
3521 #if VMA_USE_STL_LIST
3522  #include <list>
3523 #endif
3524 
3525 /*
3526 Following headers are used in this CONFIGURATION section only, so feel free to
3527 remove them if not needed.
3528 */
3529 #include <cassert> // for assert
3530 #include <algorithm> // for min, max
3531 #include <mutex>
3532 
3533 #ifndef VMA_NULL
3534  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3535  #define VMA_NULL nullptr
3536 #endif
3537 
3538 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3539 #include <cstdlib>
3540 void *aligned_alloc(size_t alignment, size_t size)
3541 {
3542  // alignment must be >= sizeof(void*)
3543  if(alignment < sizeof(void*))
3544  {
3545  alignment = sizeof(void*);
3546  }
3547 
3548  return memalign(alignment, size);
3549 }
3550 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3551 #include <cstdlib>
3552 void *aligned_alloc(size_t alignment, size_t size)
3553 {
3554  // alignment must be >= sizeof(void*)
3555  if(alignment < sizeof(void*))
3556  {
3557  alignment = sizeof(void*);
3558  }
3559 
3560  void *pointer;
3561  if(posix_memalign(&pointer, alignment, size) == 0)
3562  return pointer;
3563  return VMA_NULL;
3564 }
3565 #endif
3566 
3567 // If your compiler is not compatible with C++11 and definition of
3568 // aligned_alloc() function is missing, uncommeting following line may help:
3569 
3570 //#include <malloc.h>
3571 
3572 // Normal assert to check for programmer's errors, especially in Debug configuration.
3573 #ifndef VMA_ASSERT
3574  #ifdef _DEBUG
3575  #define VMA_ASSERT(expr) assert(expr)
3576  #else
3577  #define VMA_ASSERT(expr)
3578  #endif
3579 #endif
3580 
3581 // Assert that will be called very often, like inside data structures e.g. operator[].
3582 // Making it non-empty can make program slow.
3583 #ifndef VMA_HEAVY_ASSERT
3584  #ifdef _DEBUG
3585  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3586  #else
3587  #define VMA_HEAVY_ASSERT(expr)
3588  #endif
3589 #endif
3590 
3591 #ifndef VMA_ALIGN_OF
3592  #define VMA_ALIGN_OF(type) (__alignof(type))
3593 #endif
3594 
3595 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3596  #if defined(_WIN32)
3597  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3598  #else
3599  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3600  #endif
3601 #endif
3602 
3603 #ifndef VMA_SYSTEM_FREE
3604  #if defined(_WIN32)
3605  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3606  #else
3607  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3608  #endif
3609 #endif
3610 
3611 #ifndef VMA_MIN
3612  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3613 #endif
3614 
3615 #ifndef VMA_MAX
3616  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3617 #endif
3618 
3619 #ifndef VMA_SWAP
3620  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3621 #endif
3622 
3623 #ifndef VMA_SORT
3624  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3625 #endif
3626 
3627 #ifndef VMA_DEBUG_LOG
3628  #define VMA_DEBUG_LOG(format, ...)
3629  /*
3630  #define VMA_DEBUG_LOG(format, ...) do { \
3631  printf(format, __VA_ARGS__); \
3632  printf("\n"); \
3633  } while(false)
3634  */
3635 #endif
3636 
3637 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3638 #if VMA_STATS_STRING_ENABLED
3639  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3640  {
3641  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3642  }
3643  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3644  {
3645  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3646  }
3647  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3648  {
3649  snprintf(outStr, strLen, "%p", ptr);
3650  }
3651 #endif
3652 
3653 #ifndef VMA_MUTEX
3654  class VmaMutex
3655  {
3656  public:
3657  void Lock() { m_Mutex.lock(); }
3658  void Unlock() { m_Mutex.unlock(); }
3659  private:
3660  std::mutex m_Mutex;
3661  };
3662  #define VMA_MUTEX VmaMutex
3663 #endif
3664 
3665 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3666 #ifndef VMA_RW_MUTEX
3667  #if VMA_USE_STL_SHARED_MUTEX
3668  // Use std::shared_mutex from C++17.
3669  #include <shared_mutex>
3670  class VmaRWMutex
3671  {
3672  public:
3673  void LockRead() { m_Mutex.lock_shared(); }
3674  void UnlockRead() { m_Mutex.unlock_shared(); }
3675  void LockWrite() { m_Mutex.lock(); }
3676  void UnlockWrite() { m_Mutex.unlock(); }
3677  private:
3678  std::shared_mutex m_Mutex;
3679  };
3680  #define VMA_RW_MUTEX VmaRWMutex
3681  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3682  // Use SRWLOCK from WinAPI.
3683  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3684  class VmaRWMutex
3685  {
3686  public:
3687  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3688  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3689  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3690  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3691  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3692  private:
3693  SRWLOCK m_Lock;
3694  };
3695  #define VMA_RW_MUTEX VmaRWMutex
3696  #else
3697  // Less efficient fallback: Use normal mutex.
3698  class VmaRWMutex
3699  {
3700  public:
3701  void LockRead() { m_Mutex.Lock(); }
3702  void UnlockRead() { m_Mutex.Unlock(); }
3703  void LockWrite() { m_Mutex.Lock(); }
3704  void UnlockWrite() { m_Mutex.Unlock(); }
3705  private:
3706  VMA_MUTEX m_Mutex;
3707  };
3708  #define VMA_RW_MUTEX VmaRWMutex
3709  #endif // #if VMA_USE_STL_SHARED_MUTEX
3710 #endif // #ifndef VMA_RW_MUTEX
3711 
3712 /*
3713 If providing your own implementation, you need to implement a subset of std::atomic.
3714 */
3715 #ifndef VMA_ATOMIC_UINT32
3716  #include <atomic>
3717  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3718 #endif
3719 
3720 #ifndef VMA_ATOMIC_UINT64
3721  #include <atomic>
3722  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3723 #endif
3724 
3725 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3726 
3730  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3731 #endif
3732 
3733 #ifndef VMA_DEBUG_ALIGNMENT
3734 
3738  #define VMA_DEBUG_ALIGNMENT (1)
3739 #endif
3740 
3741 #ifndef VMA_DEBUG_MARGIN
3742 
3746  #define VMA_DEBUG_MARGIN (0)
3747 #endif
3748 
3749 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3750 
3754  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3755 #endif
3756 
3757 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3758 
3763  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3764 #endif
3765 
3766 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3767 
3771  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3772 #endif
3773 
3774 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3775 
3779  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3780 #endif
3781 
3782 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3783  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3785 #endif
3786 
3787 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3788  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3790 #endif
3791 
3792 #ifndef VMA_CLASS_NO_COPY
3793  #define VMA_CLASS_NO_COPY(className) \
3794  private: \
3795  className(const className&) = delete; \
3796  className& operator=(const className&) = delete;
3797 #endif
3798 
3799 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3800 
3801 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3802 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3803 
3804 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3805 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3806 
3807 /*******************************************************************************
3808 END OF CONFIGURATION
3809 */
3810 
3811 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3812 
3813 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3814  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3815 
3816 // Returns number of bits set to 1 in (v).
3817 static inline uint32_t VmaCountBitsSet(uint32_t v)
3818 {
3819  uint32_t c = v - ((v >> 1) & 0x55555555);
3820  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3821  c = ((c >> 4) + c) & 0x0F0F0F0F;
3822  c = ((c >> 8) + c) & 0x00FF00FF;
3823  c = ((c >> 16) + c) & 0x0000FFFF;
3824  return c;
3825 }
3826 
3827 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3828 // Use types like uint32_t, uint64_t as T.
3829 template <typename T>
3830 static inline T VmaAlignUp(T val, T align)
3831 {
3832  return (val + align - 1) / align * align;
3833 }
3834 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3835 // Use types like uint32_t, uint64_t as T.
3836 template <typename T>
3837 static inline T VmaAlignDown(T val, T align)
3838 {
3839  return val / align * align;
3840 }
3841 
3842 // Division with mathematical rounding to nearest number.
3843 template <typename T>
3844 static inline T VmaRoundDiv(T x, T y)
3845 {
3846  return (x + (y / (T)2)) / y;
3847 }
3848 
3849 /*
3850 Returns true if given number is a power of two.
3851 T must be unsigned integer number or signed integer but always nonnegative.
3852 For 0 returns true.
3853 */
3854 template <typename T>
3855 inline bool VmaIsPow2(T x)
3856 {
3857  return (x & (x-1)) == 0;
3858 }
3859 
3860 // Returns smallest power of 2 greater or equal to v.
3861 static inline uint32_t VmaNextPow2(uint32_t v)
3862 {
3863  v--;
3864  v |= v >> 1;
3865  v |= v >> 2;
3866  v |= v >> 4;
3867  v |= v >> 8;
3868  v |= v >> 16;
3869  v++;
3870  return v;
3871 }
3872 static inline uint64_t VmaNextPow2(uint64_t v)
3873 {
3874  v--;
3875  v |= v >> 1;
3876  v |= v >> 2;
3877  v |= v >> 4;
3878  v |= v >> 8;
3879  v |= v >> 16;
3880  v |= v >> 32;
3881  v++;
3882  return v;
3883 }
3884 
3885 // Returns largest power of 2 less or equal to v.
3886 static inline uint32_t VmaPrevPow2(uint32_t v)
3887 {
3888  v |= v >> 1;
3889  v |= v >> 2;
3890  v |= v >> 4;
3891  v |= v >> 8;
3892  v |= v >> 16;
3893  v = v ^ (v >> 1);
3894  return v;
3895 }
3896 static inline uint64_t VmaPrevPow2(uint64_t v)
3897 {
3898  v |= v >> 1;
3899  v |= v >> 2;
3900  v |= v >> 4;
3901  v |= v >> 8;
3902  v |= v >> 16;
3903  v |= v >> 32;
3904  v = v ^ (v >> 1);
3905  return v;
3906 }
3907 
3908 static inline bool VmaStrIsEmpty(const char* pStr)
3909 {
3910  return pStr == VMA_NULL || *pStr == '\0';
3911 }
3912 
3913 #if VMA_STATS_STRING_ENABLED
3914 
3915 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3916 {
3917  switch(algorithm)
3918  {
3920  return "Linear";
3922  return "Buddy";
3923  case 0:
3924  return "Default";
3925  default:
3926  VMA_ASSERT(0);
3927  return "";
3928  }
3929 }
3930 
3931 #endif // #if VMA_STATS_STRING_ENABLED
3932 
3933 #ifndef VMA_SORT
3934 
3935 template<typename Iterator, typename Compare>
3936 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3937 {
3938  Iterator centerValue = end; --centerValue;
3939  Iterator insertIndex = beg;
3940  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3941  {
3942  if(cmp(*memTypeIndex, *centerValue))
3943  {
3944  if(insertIndex != memTypeIndex)
3945  {
3946  VMA_SWAP(*memTypeIndex, *insertIndex);
3947  }
3948  ++insertIndex;
3949  }
3950  }
3951  if(insertIndex != centerValue)
3952  {
3953  VMA_SWAP(*insertIndex, *centerValue);
3954  }
3955  return insertIndex;
3956 }
3957 
3958 template<typename Iterator, typename Compare>
3959 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3960 {
3961  if(beg < end)
3962  {
3963  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3964  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3965  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3966  }
3967 }
3968 
3969 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3970 
3971 #endif // #ifndef VMA_SORT
3972 
3973 /*
3974 Returns true if two memory blocks occupy overlapping pages.
3975 ResourceA must be in less memory offset than ResourceB.
3976 
3977 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3978 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3979 */
3980 static inline bool VmaBlocksOnSamePage(
3981  VkDeviceSize resourceAOffset,
3982  VkDeviceSize resourceASize,
3983  VkDeviceSize resourceBOffset,
3984  VkDeviceSize pageSize)
3985 {
3986  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3987  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3988  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3989  VkDeviceSize resourceBStart = resourceBOffset;
3990  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3991  return resourceAEndPage == resourceBStartPage;
3992 }
3993 
3994 enum VmaSuballocationType
3995 {
3996  VMA_SUBALLOCATION_TYPE_FREE = 0,
3997  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3998  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3999  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4000  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4001  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4002  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4003 };
4004 
4005 /*
4006 Returns true if given suballocation types could conflict and must respect
4007 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4008 or linear image and another one is optimal image. If type is unknown, behave
4009 conservatively.
4010 */
4011 static inline bool VmaIsBufferImageGranularityConflict(
4012  VmaSuballocationType suballocType1,
4013  VmaSuballocationType suballocType2)
4014 {
4015  if(suballocType1 > suballocType2)
4016  {
4017  VMA_SWAP(suballocType1, suballocType2);
4018  }
4019 
4020  switch(suballocType1)
4021  {
4022  case VMA_SUBALLOCATION_TYPE_FREE:
4023  return false;
4024  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4025  return true;
4026  case VMA_SUBALLOCATION_TYPE_BUFFER:
4027  return
4028  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4029  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4030  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4031  return
4032  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4033  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4034  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4035  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4036  return
4037  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4038  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4039  return false;
4040  default:
4041  VMA_ASSERT(0);
4042  return true;
4043  }
4044 }
4045 
4046 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4047 {
4048 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4049  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4050  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4051  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4052  {
4053  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4054  }
4055 #else
4056  // no-op
4057 #endif
4058 }
4059 
4060 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4061 {
4062 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4063  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4064  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4065  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4066  {
4067  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4068  {
4069  return false;
4070  }
4071  }
4072 #endif
4073  return true;
4074 }
4075 
4076 /*
4077 Fills structure with parameters of an example buffer to be used for transfers
4078 during GPU memory defragmentation.
4079 */
4080 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4081 {
4082  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4083  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4084  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4085  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4086 }
4087 
4088 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4089 struct VmaMutexLock
4090 {
4091  VMA_CLASS_NO_COPY(VmaMutexLock)
4092 public:
4093  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4094  m_pMutex(useMutex ? &mutex : VMA_NULL)
4095  { if(m_pMutex) { m_pMutex->Lock(); } }
4096  ~VmaMutexLock()
4097  { if(m_pMutex) { m_pMutex->Unlock(); } }
4098 private:
4099  VMA_MUTEX* m_pMutex;
4100 };
4101 
4102 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4103 struct VmaMutexLockRead
4104 {
4105  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4106 public:
4107  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4108  m_pMutex(useMutex ? &mutex : VMA_NULL)
4109  { if(m_pMutex) { m_pMutex->LockRead(); } }
4110  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4111 private:
4112  VMA_RW_MUTEX* m_pMutex;
4113 };
4114 
4115 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4116 struct VmaMutexLockWrite
4117 {
4118  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4119 public:
4120  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4121  m_pMutex(useMutex ? &mutex : VMA_NULL)
4122  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4123  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4124 private:
4125  VMA_RW_MUTEX* m_pMutex;
4126 };
4127 
4128 #if VMA_DEBUG_GLOBAL_MUTEX
4129  static VMA_MUTEX gDebugGlobalMutex;
4130  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4131 #else
4132  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4133 #endif
4134 
4135 // Minimum size of a free suballocation to register it in the free suballocation collection.
4136 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4137 
4138 /*
4139 Performs binary search and returns iterator to first element that is greater or
4140 equal to (key), according to comparison (cmp).
4141 
4142 Cmp should return true if first argument is less than second argument.
4143 
4144 Returned value is the found element, if present in the collection or place where
4145 new element with value (key) should be inserted.
4146 */
4147 template <typename CmpLess, typename IterT, typename KeyT>
4148 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4149 {
4150  size_t down = 0, up = (end - beg);
4151  while(down < up)
4152  {
4153  const size_t mid = (down + up) / 2;
4154  if(cmp(*(beg+mid), key))
4155  {
4156  down = mid + 1;
4157  }
4158  else
4159  {
4160  up = mid;
4161  }
4162  }
4163  return beg + down;
4164 }
4165 
4166 template<typename CmpLess, typename IterT, typename KeyT>
4167 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4168 {
4169  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4170  beg, end, value, cmp);
4171  if(it == end ||
4172  (!cmp(*it, value) && !cmp(value, *it)))
4173  {
4174  return it;
4175  }
4176  return end;
4177 }
4178 
4179 /*
4180 Returns true if all pointers in the array are not-null and unique.
4181 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4182 T must be pointer type, e.g. VmaAllocation, VmaPool.
4183 */
4184 template<typename T>
4185 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4186 {
4187  for(uint32_t i = 0; i < count; ++i)
4188  {
4189  const T iPtr = arr[i];
4190  if(iPtr == VMA_NULL)
4191  {
4192  return false;
4193  }
4194  for(uint32_t j = i + 1; j < count; ++j)
4195  {
4196  if(iPtr == arr[j])
4197  {
4198  return false;
4199  }
4200  }
4201  }
4202  return true;
4203 }
4204 
4206 // Memory allocation
4207 
4208 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4209 {
4210  if((pAllocationCallbacks != VMA_NULL) &&
4211  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4212  {
4213  return (*pAllocationCallbacks->pfnAllocation)(
4214  pAllocationCallbacks->pUserData,
4215  size,
4216  alignment,
4217  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4218  }
4219  else
4220  {
4221  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4222  }
4223 }
4224 
4225 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4226 {
4227  if((pAllocationCallbacks != VMA_NULL) &&
4228  (pAllocationCallbacks->pfnFree != VMA_NULL))
4229  {
4230  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4231  }
4232  else
4233  {
4234  VMA_SYSTEM_FREE(ptr);
4235  }
4236 }
4237 
4238 template<typename T>
4239 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4240 {
4241  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4242 }
4243 
4244 template<typename T>
4245 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4246 {
4247  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4248 }
4249 
4250 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4251 
4252 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4253 
4254 template<typename T>
4255 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4256 {
4257  ptr->~T();
4258  VmaFree(pAllocationCallbacks, ptr);
4259 }
4260 
4261 template<typename T>
4262 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4263 {
4264  if(ptr != VMA_NULL)
4265  {
4266  for(size_t i = count; i--; )
4267  {
4268  ptr[i].~T();
4269  }
4270  VmaFree(pAllocationCallbacks, ptr);
4271  }
4272 }
4273 
4274 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4275 {
4276  if(srcStr != VMA_NULL)
4277  {
4278  const size_t len = strlen(srcStr);
4279  char* const result = vma_new_array(allocs, char, len + 1);
4280  memcpy(result, srcStr, len + 1);
4281  return result;
4282  }
4283  else
4284  {
4285  return VMA_NULL;
4286  }
4287 }
4288 
4289 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4290 {
4291  if(str != VMA_NULL)
4292  {
4293  const size_t len = strlen(str);
4294  vma_delete_array(allocs, str, len + 1);
4295  }
4296 }
4297 
4298 // STL-compatible allocator.
4299 template<typename T>
4300 class VmaStlAllocator
4301 {
4302 public:
4303  const VkAllocationCallbacks* const m_pCallbacks;
4304  typedef T value_type;
4305 
4306  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4307  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4308 
4309  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4310  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4311 
4312  template<typename U>
4313  bool operator==(const VmaStlAllocator<U>& rhs) const
4314  {
4315  return m_pCallbacks == rhs.m_pCallbacks;
4316  }
4317  template<typename U>
4318  bool operator!=(const VmaStlAllocator<U>& rhs) const
4319  {
4320  return m_pCallbacks != rhs.m_pCallbacks;
4321  }
4322 
4323  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4324 };
4325 
4326 #if VMA_USE_STL_VECTOR
4327 
4328 #define VmaVector std::vector
4329 
4330 template<typename T, typename allocatorT>
4331 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4332 {
4333  vec.insert(vec.begin() + index, item);
4334 }
4335 
4336 template<typename T, typename allocatorT>
4337 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4338 {
4339  vec.erase(vec.begin() + index);
4340 }
4341 
4342 #else // #if VMA_USE_STL_VECTOR
4343 
4344 /* Class with interface compatible with subset of std::vector.
4345 T must be POD because constructors and destructors are not called and memcpy is
4346 used for these objects. */
4347 template<typename T, typename AllocatorT>
4348 class VmaVector
4349 {
4350 public:
4351  typedef T value_type;
4352 
4353  VmaVector(const AllocatorT& allocator) :
4354  m_Allocator(allocator),
4355  m_pArray(VMA_NULL),
4356  m_Count(0),
4357  m_Capacity(0)
4358  {
4359  }
4360 
4361  VmaVector(size_t count, const AllocatorT& allocator) :
4362  m_Allocator(allocator),
4363  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4364  m_Count(count),
4365  m_Capacity(count)
4366  {
4367  }
4368 
4369  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4370  // value is unused.
4371  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4372  : VmaVector(count, allocator) {}
4373 
4374  VmaVector(const VmaVector<T, AllocatorT>& src) :
4375  m_Allocator(src.m_Allocator),
4376  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4377  m_Count(src.m_Count),
4378  m_Capacity(src.m_Count)
4379  {
4380  if(m_Count != 0)
4381  {
4382  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4383  }
4384  }
4385 
4386  ~VmaVector()
4387  {
4388  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4389  }
4390 
4391  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4392  {
4393  if(&rhs != this)
4394  {
4395  resize(rhs.m_Count);
4396  if(m_Count != 0)
4397  {
4398  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4399  }
4400  }
4401  return *this;
4402  }
4403 
4404  bool empty() const { return m_Count == 0; }
4405  size_t size() const { return m_Count; }
4406  T* data() { return m_pArray; }
4407  const T* data() const { return m_pArray; }
4408 
4409  T& operator[](size_t index)
4410  {
4411  VMA_HEAVY_ASSERT(index < m_Count);
4412  return m_pArray[index];
4413  }
4414  const T& operator[](size_t index) const
4415  {
4416  VMA_HEAVY_ASSERT(index < m_Count);
4417  return m_pArray[index];
4418  }
4419 
4420  T& front()
4421  {
4422  VMA_HEAVY_ASSERT(m_Count > 0);
4423  return m_pArray[0];
4424  }
4425  const T& front() const
4426  {
4427  VMA_HEAVY_ASSERT(m_Count > 0);
4428  return m_pArray[0];
4429  }
4430  T& back()
4431  {
4432  VMA_HEAVY_ASSERT(m_Count > 0);
4433  return m_pArray[m_Count - 1];
4434  }
4435  const T& back() const
4436  {
4437  VMA_HEAVY_ASSERT(m_Count > 0);
4438  return m_pArray[m_Count - 1];
4439  }
4440 
4441  void reserve(size_t newCapacity, bool freeMemory = false)
4442  {
4443  newCapacity = VMA_MAX(newCapacity, m_Count);
4444 
4445  if((newCapacity < m_Capacity) && !freeMemory)
4446  {
4447  newCapacity = m_Capacity;
4448  }
4449 
4450  if(newCapacity != m_Capacity)
4451  {
4452  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4453  if(m_Count != 0)
4454  {
4455  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4456  }
4457  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4458  m_Capacity = newCapacity;
4459  m_pArray = newArray;
4460  }
4461  }
4462 
4463  void resize(size_t newCount, bool freeMemory = false)
4464  {
4465  size_t newCapacity = m_Capacity;
4466  if(newCount > m_Capacity)
4467  {
4468  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4469  }
4470  else if(freeMemory)
4471  {
4472  newCapacity = newCount;
4473  }
4474 
4475  if(newCapacity != m_Capacity)
4476  {
4477  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4478  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4479  if(elementsToCopy != 0)
4480  {
4481  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4482  }
4483  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4484  m_Capacity = newCapacity;
4485  m_pArray = newArray;
4486  }
4487 
4488  m_Count = newCount;
4489  }
4490 
4491  void clear(bool freeMemory = false)
4492  {
4493  resize(0, freeMemory);
4494  }
4495 
4496  void insert(size_t index, const T& src)
4497  {
4498  VMA_HEAVY_ASSERT(index <= m_Count);
4499  const size_t oldCount = size();
4500  resize(oldCount + 1);
4501  if(index < oldCount)
4502  {
4503  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4504  }
4505  m_pArray[index] = src;
4506  }
4507 
4508  void remove(size_t index)
4509  {
4510  VMA_HEAVY_ASSERT(index < m_Count);
4511  const size_t oldCount = size();
4512  if(index < oldCount - 1)
4513  {
4514  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4515  }
4516  resize(oldCount - 1);
4517  }
4518 
4519  void push_back(const T& src)
4520  {
4521  const size_t newIndex = size();
4522  resize(newIndex + 1);
4523  m_pArray[newIndex] = src;
4524  }
4525 
4526  void pop_back()
4527  {
4528  VMA_HEAVY_ASSERT(m_Count > 0);
4529  resize(size() - 1);
4530  }
4531 
4532  void push_front(const T& src)
4533  {
4534  insert(0, src);
4535  }
4536 
4537  void pop_front()
4538  {
4539  VMA_HEAVY_ASSERT(m_Count > 0);
4540  remove(0);
4541  }
4542 
4543  typedef T* iterator;
4544 
4545  iterator begin() { return m_pArray; }
4546  iterator end() { return m_pArray + m_Count; }
4547 
4548 private:
4549  AllocatorT m_Allocator;
4550  T* m_pArray;
4551  size_t m_Count;
4552  size_t m_Capacity;
4553 };
4554 
4555 template<typename T, typename allocatorT>
4556 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4557 {
4558  vec.insert(index, item);
4559 }
4560 
4561 template<typename T, typename allocatorT>
4562 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4563 {
4564  vec.remove(index);
4565 }
4566 
4567 #endif // #if VMA_USE_STL_VECTOR
4568 
4569 template<typename CmpLess, typename VectorT>
4570 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4571 {
4572  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4573  vector.data(),
4574  vector.data() + vector.size(),
4575  value,
4576  CmpLess()) - vector.data();
4577  VmaVectorInsert(vector, indexToInsert, value);
4578  return indexToInsert;
4579 }
4580 
4581 template<typename CmpLess, typename VectorT>
4582 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4583 {
4584  CmpLess comparator;
4585  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4586  vector.begin(),
4587  vector.end(),
4588  value,
4589  comparator);
4590  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4591  {
4592  size_t indexToRemove = it - vector.begin();
4593  VmaVectorRemove(vector, indexToRemove);
4594  return true;
4595  }
4596  return false;
4597 }
4598 
4600 // class VmaPoolAllocator
4601 
4602 /*
4603 Allocator for objects of type T using a list of arrays (pools) to speed up
4604 allocation. Number of elements that can be allocated is not bounded because
4605 allocator can create multiple blocks.
4606 */
4607 template<typename T>
4608 class VmaPoolAllocator
4609 {
4610  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4611 public:
4612  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4613  ~VmaPoolAllocator();
4614  T* Alloc();
4615  void Free(T* ptr);
4616 
4617 private:
4618  union Item
4619  {
4620  uint32_t NextFreeIndex;
4621  alignas(T) char Value[sizeof(T)];
4622  };
4623 
4624  struct ItemBlock
4625  {
4626  Item* pItems;
4627  uint32_t Capacity;
4628  uint32_t FirstFreeIndex;
4629  };
4630 
4631  const VkAllocationCallbacks* m_pAllocationCallbacks;
4632  const uint32_t m_FirstBlockCapacity;
4633  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4634 
4635  ItemBlock& CreateNewBlock();
4636 };
4637 
4638 template<typename T>
4639 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4640  m_pAllocationCallbacks(pAllocationCallbacks),
4641  m_FirstBlockCapacity(firstBlockCapacity),
4642  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4643 {
4644  VMA_ASSERT(m_FirstBlockCapacity > 1);
4645 }
4646 
4647 template<typename T>
4648 VmaPoolAllocator<T>::~VmaPoolAllocator()
4649 {
4650  for(size_t i = m_ItemBlocks.size(); i--; )
4651  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4652  m_ItemBlocks.clear();
4653 }
4654 
4655 template<typename T>
4656 T* VmaPoolAllocator<T>::Alloc()
4657 {
4658  for(size_t i = m_ItemBlocks.size(); i--; )
4659  {
4660  ItemBlock& block = m_ItemBlocks[i];
4661  // This block has some free items: Use first one.
4662  if(block.FirstFreeIndex != UINT32_MAX)
4663  {
4664  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4665  block.FirstFreeIndex = pItem->NextFreeIndex;
4666  T* result = (T*)&pItem->Value;
4667  new(result)T(); // Explicit constructor call.
4668  return result;
4669  }
4670  }
4671 
4672  // No block has free item: Create new one and use it.
4673  ItemBlock& newBlock = CreateNewBlock();
4674  Item* const pItem = &newBlock.pItems[0];
4675  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4676  T* result = (T*)&pItem->Value;
4677  new(result)T(); // Explicit constructor call.
4678  return result;
4679 }
4680 
4681 template<typename T>
4682 void VmaPoolAllocator<T>::Free(T* ptr)
4683 {
4684  // Search all memory blocks to find ptr.
4685  for(size_t i = m_ItemBlocks.size(); i--; )
4686  {
4687  ItemBlock& block = m_ItemBlocks[i];
4688 
4689  // Casting to union.
4690  Item* pItemPtr;
4691  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4692 
4693  // Check if pItemPtr is in address range of this block.
4694  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4695  {
4696  ptr->~T(); // Explicit destructor call.
4697  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4698  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4699  block.FirstFreeIndex = index;
4700  return;
4701  }
4702  }
4703  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4704 }
4705 
4706 template<typename T>
4707 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4708 {
4709  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4710  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4711 
4712  const ItemBlock newBlock = {
4713  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4714  newBlockCapacity,
4715  0 };
4716 
4717  m_ItemBlocks.push_back(newBlock);
4718 
4719  // Setup singly-linked list of all free items in this block.
4720  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4721  newBlock.pItems[i].NextFreeIndex = i + 1;
4722  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4723  return m_ItemBlocks.back();
4724 }
4725 
4727 // class VmaRawList, VmaList
4728 
4729 #if VMA_USE_STL_LIST
4730 
4731 #define VmaList std::list
4732 
4733 #else // #if VMA_USE_STL_LIST
4734 
4735 template<typename T>
4736 struct VmaListItem
4737 {
4738  VmaListItem* pPrev;
4739  VmaListItem* pNext;
4740  T Value;
4741 };
4742 
4743 // Doubly linked list.
4744 template<typename T>
4745 class VmaRawList
4746 {
4747  VMA_CLASS_NO_COPY(VmaRawList)
4748 public:
4749  typedef VmaListItem<T> ItemType;
4750 
4751  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4752  ~VmaRawList();
4753  void Clear();
4754 
4755  size_t GetCount() const { return m_Count; }
4756  bool IsEmpty() const { return m_Count == 0; }
4757 
4758  ItemType* Front() { return m_pFront; }
4759  const ItemType* Front() const { return m_pFront; }
4760  ItemType* Back() { return m_pBack; }
4761  const ItemType* Back() const { return m_pBack; }
4762 
4763  ItemType* PushBack();
4764  ItemType* PushFront();
4765  ItemType* PushBack(const T& value);
4766  ItemType* PushFront(const T& value);
4767  void PopBack();
4768  void PopFront();
4769 
4770  // Item can be null - it means PushBack.
4771  ItemType* InsertBefore(ItemType* pItem);
4772  // Item can be null - it means PushFront.
4773  ItemType* InsertAfter(ItemType* pItem);
4774 
4775  ItemType* InsertBefore(ItemType* pItem, const T& value);
4776  ItemType* InsertAfter(ItemType* pItem, const T& value);
4777 
4778  void Remove(ItemType* pItem);
4779 
4780 private:
4781  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4782  VmaPoolAllocator<ItemType> m_ItemAllocator;
4783  ItemType* m_pFront;
4784  ItemType* m_pBack;
4785  size_t m_Count;
4786 };
4787 
4788 template<typename T>
4789 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4790  m_pAllocationCallbacks(pAllocationCallbacks),
4791  m_ItemAllocator(pAllocationCallbacks, 128),
4792  m_pFront(VMA_NULL),
4793  m_pBack(VMA_NULL),
4794  m_Count(0)
4795 {
4796 }
4797 
4798 template<typename T>
4799 VmaRawList<T>::~VmaRawList()
4800 {
4801  // Intentionally not calling Clear, because that would be unnecessary
4802  // computations to return all items to m_ItemAllocator as free.
4803 }
4804 
4805 template<typename T>
4806 void VmaRawList<T>::Clear()
4807 {
4808  if(IsEmpty() == false)
4809  {
4810  ItemType* pItem = m_pBack;
4811  while(pItem != VMA_NULL)
4812  {
4813  ItemType* const pPrevItem = pItem->pPrev;
4814  m_ItemAllocator.Free(pItem);
4815  pItem = pPrevItem;
4816  }
4817  m_pFront = VMA_NULL;
4818  m_pBack = VMA_NULL;
4819  m_Count = 0;
4820  }
4821 }
4822 
4823 template<typename T>
4824 VmaListItem<T>* VmaRawList<T>::PushBack()
4825 {
4826  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4827  pNewItem->pNext = VMA_NULL;
4828  if(IsEmpty())
4829  {
4830  pNewItem->pPrev = VMA_NULL;
4831  m_pFront = pNewItem;
4832  m_pBack = pNewItem;
4833  m_Count = 1;
4834  }
4835  else
4836  {
4837  pNewItem->pPrev = m_pBack;
4838  m_pBack->pNext = pNewItem;
4839  m_pBack = pNewItem;
4840  ++m_Count;
4841  }
4842  return pNewItem;
4843 }
4844 
4845 template<typename T>
4846 VmaListItem<T>* VmaRawList<T>::PushFront()
4847 {
4848  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4849  pNewItem->pPrev = VMA_NULL;
4850  if(IsEmpty())
4851  {
4852  pNewItem->pNext = VMA_NULL;
4853  m_pFront = pNewItem;
4854  m_pBack = pNewItem;
4855  m_Count = 1;
4856  }
4857  else
4858  {
4859  pNewItem->pNext = m_pFront;
4860  m_pFront->pPrev = pNewItem;
4861  m_pFront = pNewItem;
4862  ++m_Count;
4863  }
4864  return pNewItem;
4865 }
4866 
4867 template<typename T>
4868 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4869 {
4870  ItemType* const pNewItem = PushBack();
4871  pNewItem->Value = value;
4872  return pNewItem;
4873 }
4874 
4875 template<typename T>
4876 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4877 {
4878  ItemType* const pNewItem = PushFront();
4879  pNewItem->Value = value;
4880  return pNewItem;
4881 }
4882 
4883 template<typename T>
4884 void VmaRawList<T>::PopBack()
4885 {
4886  VMA_HEAVY_ASSERT(m_Count > 0);
4887  ItemType* const pBackItem = m_pBack;
4888  ItemType* const pPrevItem = pBackItem->pPrev;
4889  if(pPrevItem != VMA_NULL)
4890  {
4891  pPrevItem->pNext = VMA_NULL;
4892  }
4893  m_pBack = pPrevItem;
4894  m_ItemAllocator.Free(pBackItem);
4895  --m_Count;
4896 }
4897 
4898 template<typename T>
4899 void VmaRawList<T>::PopFront()
4900 {
4901  VMA_HEAVY_ASSERT(m_Count > 0);
4902  ItemType* const pFrontItem = m_pFront;
4903  ItemType* const pNextItem = pFrontItem->pNext;
4904  if(pNextItem != VMA_NULL)
4905  {
4906  pNextItem->pPrev = VMA_NULL;
4907  }
4908  m_pFront = pNextItem;
4909  m_ItemAllocator.Free(pFrontItem);
4910  --m_Count;
4911 }
4912 
4913 template<typename T>
4914 void VmaRawList<T>::Remove(ItemType* pItem)
4915 {
4916  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4917  VMA_HEAVY_ASSERT(m_Count > 0);
4918 
4919  if(pItem->pPrev != VMA_NULL)
4920  {
4921  pItem->pPrev->pNext = pItem->pNext;
4922  }
4923  else
4924  {
4925  VMA_HEAVY_ASSERT(m_pFront == pItem);
4926  m_pFront = pItem->pNext;
4927  }
4928 
4929  if(pItem->pNext != VMA_NULL)
4930  {
4931  pItem->pNext->pPrev = pItem->pPrev;
4932  }
4933  else
4934  {
4935  VMA_HEAVY_ASSERT(m_pBack == pItem);
4936  m_pBack = pItem->pPrev;
4937  }
4938 
4939  m_ItemAllocator.Free(pItem);
4940  --m_Count;
4941 }
4942 
4943 template<typename T>
4944 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4945 {
4946  if(pItem != VMA_NULL)
4947  {
4948  ItemType* const prevItem = pItem->pPrev;
4949  ItemType* const newItem = m_ItemAllocator.Alloc();
4950  newItem->pPrev = prevItem;
4951  newItem->pNext = pItem;
4952  pItem->pPrev = newItem;
4953  if(prevItem != VMA_NULL)
4954  {
4955  prevItem->pNext = newItem;
4956  }
4957  else
4958  {
4959  VMA_HEAVY_ASSERT(m_pFront == pItem);
4960  m_pFront = newItem;
4961  }
4962  ++m_Count;
4963  return newItem;
4964  }
4965  else
4966  return PushBack();
4967 }
4968 
4969 template<typename T>
4970 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4971 {
4972  if(pItem != VMA_NULL)
4973  {
4974  ItemType* const nextItem = pItem->pNext;
4975  ItemType* const newItem = m_ItemAllocator.Alloc();
4976  newItem->pNext = nextItem;
4977  newItem->pPrev = pItem;
4978  pItem->pNext = newItem;
4979  if(nextItem != VMA_NULL)
4980  {
4981  nextItem->pPrev = newItem;
4982  }
4983  else
4984  {
4985  VMA_HEAVY_ASSERT(m_pBack == pItem);
4986  m_pBack = newItem;
4987  }
4988  ++m_Count;
4989  return newItem;
4990  }
4991  else
4992  return PushFront();
4993 }
4994 
4995 template<typename T>
4996 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4997 {
4998  ItemType* const newItem = InsertBefore(pItem);
4999  newItem->Value = value;
5000  return newItem;
5001 }
5002 
5003 template<typename T>
5004 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5005 {
5006  ItemType* const newItem = InsertAfter(pItem);
5007  newItem->Value = value;
5008  return newItem;
5009 }
5010 
5011 template<typename T, typename AllocatorT>
5012 class VmaList
5013 {
5014  VMA_CLASS_NO_COPY(VmaList)
5015 public:
5016  class iterator
5017  {
5018  public:
5019  iterator() :
5020  m_pList(VMA_NULL),
5021  m_pItem(VMA_NULL)
5022  {
5023  }
5024 
5025  T& operator*() const
5026  {
5027  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5028  return m_pItem->Value;
5029  }
5030  T* operator->() const
5031  {
5032  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5033  return &m_pItem->Value;
5034  }
5035 
5036  iterator& operator++()
5037  {
5038  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5039  m_pItem = m_pItem->pNext;
5040  return *this;
5041  }
5042  iterator& operator--()
5043  {
5044  if(m_pItem != VMA_NULL)
5045  {
5046  m_pItem = m_pItem->pPrev;
5047  }
5048  else
5049  {
5050  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5051  m_pItem = m_pList->Back();
5052  }
5053  return *this;
5054  }
5055 
5056  iterator operator++(int)
5057  {
5058  iterator result = *this;
5059  ++*this;
5060  return result;
5061  }
5062  iterator operator--(int)
5063  {
5064  iterator result = *this;
5065  --*this;
5066  return result;
5067  }
5068 
5069  bool operator==(const iterator& rhs) const
5070  {
5071  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5072  return m_pItem == rhs.m_pItem;
5073  }
5074  bool operator!=(const iterator& rhs) const
5075  {
5076  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5077  return m_pItem != rhs.m_pItem;
5078  }
5079 
5080  private:
5081  VmaRawList<T>* m_pList;
5082  VmaListItem<T>* m_pItem;
5083 
5084  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5085  m_pList(pList),
5086  m_pItem(pItem)
5087  {
5088  }
5089 
5090  friend class VmaList<T, AllocatorT>;
5091  };
5092 
5093  class const_iterator
5094  {
5095  public:
5096  const_iterator() :
5097  m_pList(VMA_NULL),
5098  m_pItem(VMA_NULL)
5099  {
5100  }
5101 
5102  const_iterator(const iterator& src) :
5103  m_pList(src.m_pList),
5104  m_pItem(src.m_pItem)
5105  {
5106  }
5107 
5108  const T& operator*() const
5109  {
5110  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5111  return m_pItem->Value;
5112  }
5113  const T* operator->() const
5114  {
5115  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5116  return &m_pItem->Value;
5117  }
5118 
5119  const_iterator& operator++()
5120  {
5121  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5122  m_pItem = m_pItem->pNext;
5123  return *this;
5124  }
5125  const_iterator& operator--()
5126  {
5127  if(m_pItem != VMA_NULL)
5128  {
5129  m_pItem = m_pItem->pPrev;
5130  }
5131  else
5132  {
5133  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5134  m_pItem = m_pList->Back();
5135  }
5136  return *this;
5137  }
5138 
5139  const_iterator operator++(int)
5140  {
5141  const_iterator result = *this;
5142  ++*this;
5143  return result;
5144  }
5145  const_iterator operator--(int)
5146  {
5147  const_iterator result = *this;
5148  --*this;
5149  return result;
5150  }
5151 
5152  bool operator==(const const_iterator& rhs) const
5153  {
5154  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5155  return m_pItem == rhs.m_pItem;
5156  }
5157  bool operator!=(const const_iterator& rhs) const
5158  {
5159  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5160  return m_pItem != rhs.m_pItem;
5161  }
5162 
5163  private:
5164  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5165  m_pList(pList),
5166  m_pItem(pItem)
5167  {
5168  }
5169 
5170  const VmaRawList<T>* m_pList;
5171  const VmaListItem<T>* m_pItem;
5172 
5173  friend class VmaList<T, AllocatorT>;
5174  };
5175 
5176  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5177 
5178  bool empty() const { return m_RawList.IsEmpty(); }
5179  size_t size() const { return m_RawList.GetCount(); }
5180 
5181  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5182  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5183 
5184  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5185  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5186 
5187  void clear() { m_RawList.Clear(); }
5188  void push_back(const T& value) { m_RawList.PushBack(value); }
5189  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5190  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5191 
5192 private:
5193  VmaRawList<T> m_RawList;
5194 };
5195 
5196 #endif // #if VMA_USE_STL_LIST
5197 
5199 // class VmaMap
5200 
5201 // Unused in this version.
5202 #if 0
5203 
5204 #if VMA_USE_STL_UNORDERED_MAP
5205 
5206 #define VmaPair std::pair
5207 
5208 #define VMA_MAP_TYPE(KeyT, ValueT) \
5209  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5210 
5211 #else // #if VMA_USE_STL_UNORDERED_MAP
5212 
5213 template<typename T1, typename T2>
5214 struct VmaPair
5215 {
5216  T1 first;
5217  T2 second;
5218 
5219  VmaPair() : first(), second() { }
5220  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5221 };
5222 
5223 /* Class compatible with subset of interface of std::unordered_map.
5224 KeyT, ValueT must be POD because they will be stored in VmaVector.
5225 */
5226 template<typename KeyT, typename ValueT>
5227 class VmaMap
5228 {
5229 public:
5230  typedef VmaPair<KeyT, ValueT> PairType;
5231  typedef PairType* iterator;
5232 
5233  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5234 
5235  iterator begin() { return m_Vector.begin(); }
5236  iterator end() { return m_Vector.end(); }
5237 
5238  void insert(const PairType& pair);
5239  iterator find(const KeyT& key);
5240  void erase(iterator it);
5241 
5242 private:
5243  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5244 };
5245 
5246 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5247 
5248 template<typename FirstT, typename SecondT>
5249 struct VmaPairFirstLess
5250 {
5251  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5252  {
5253  return lhs.first < rhs.first;
5254  }
5255  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5256  {
5257  return lhs.first < rhsFirst;
5258  }
5259 };
5260 
5261 template<typename KeyT, typename ValueT>
5262 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5263 {
5264  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5265  m_Vector.data(),
5266  m_Vector.data() + m_Vector.size(),
5267  pair,
5268  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5269  VmaVectorInsert(m_Vector, indexToInsert, pair);
5270 }
5271 
5272 template<typename KeyT, typename ValueT>
5273 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5274 {
5275  PairType* it = VmaBinaryFindFirstNotLess(
5276  m_Vector.data(),
5277  m_Vector.data() + m_Vector.size(),
5278  key,
5279  VmaPairFirstLess<KeyT, ValueT>());
5280  if((it != m_Vector.end()) && (it->first == key))
5281  {
5282  return it;
5283  }
5284  else
5285  {
5286  return m_Vector.end();
5287  }
5288 }
5289 
5290 template<typename KeyT, typename ValueT>
5291 void VmaMap<KeyT, ValueT>::erase(iterator it)
5292 {
5293  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5294 }
5295 
5296 #endif // #if VMA_USE_STL_UNORDERED_MAP
5297 
5298 #endif // #if 0
5299 
5301 
5302 class VmaDeviceMemoryBlock;
5303 
5304 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5305 
5306 struct VmaAllocation_T
5307 {
5308 private:
5309  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5310 
5311  enum FLAGS
5312  {
5313  FLAG_USER_DATA_STRING = 0x01,
5314  };
5315 
5316 public:
5317  enum ALLOCATION_TYPE
5318  {
5319  ALLOCATION_TYPE_NONE,
5320  ALLOCATION_TYPE_BLOCK,
5321  ALLOCATION_TYPE_DEDICATED,
5322  };
5323 
5324  /*
5325  This struct is allocated using VmaPoolAllocator.
5326  */
5327 
5328  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5329  {
5330  m_Alignment = 1;
5331  m_Size = 0;
5332  m_MemoryTypeIndex = 0;
5333  m_pUserData = VMA_NULL;
5334  m_LastUseFrameIndex = currentFrameIndex;
5335  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5336  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5337  m_MapCount = 0;
5338  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5339 
5340 #if VMA_STATS_STRING_ENABLED
5341  m_CreationFrameIndex = currentFrameIndex;
5342  m_BufferImageUsage = 0;
5343 #endif
5344  }
5345 
5346  void Dtor()
5347  {
5348  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5349 
5350  // Check if owned string was freed.
5351  VMA_ASSERT(m_pUserData == VMA_NULL);
5352  }
5353 
5354  void InitBlockAllocation(
5355  VmaDeviceMemoryBlock* block,
5356  VkDeviceSize offset,
5357  VkDeviceSize alignment,
5358  VkDeviceSize size,
5359  uint32_t memoryTypeIndex,
5360  VmaSuballocationType suballocationType,
5361  bool mapped,
5362  bool canBecomeLost)
5363  {
5364  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5365  VMA_ASSERT(block != VMA_NULL);
5366  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5367  m_Alignment = alignment;
5368  m_Size = size;
5369  m_MemoryTypeIndex = memoryTypeIndex;
5370  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5371  m_SuballocationType = (uint8_t)suballocationType;
5372  m_BlockAllocation.m_Block = block;
5373  m_BlockAllocation.m_Offset = offset;
5374  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5375  }
5376 
5377  void InitLost()
5378  {
5379  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5380  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5381  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5382  m_MemoryTypeIndex = 0;
5383  m_BlockAllocation.m_Block = VMA_NULL;
5384  m_BlockAllocation.m_Offset = 0;
5385  m_BlockAllocation.m_CanBecomeLost = true;
5386  }
5387 
5388  void ChangeBlockAllocation(
5389  VmaAllocator hAllocator,
5390  VmaDeviceMemoryBlock* block,
5391  VkDeviceSize offset);
5392 
5393  void ChangeOffset(VkDeviceSize newOffset);
5394 
5395  // pMappedData not null means allocation is created with MAPPED flag.
5396  void InitDedicatedAllocation(
5397  uint32_t memoryTypeIndex,
5398  VkDeviceMemory hMemory,
5399  VmaSuballocationType suballocationType,
5400  void* pMappedData,
5401  VkDeviceSize size)
5402  {
5403  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5404  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5405  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5406  m_Alignment = 0;
5407  m_Size = size;
5408  m_MemoryTypeIndex = memoryTypeIndex;
5409  m_SuballocationType = (uint8_t)suballocationType;
5410  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5411  m_DedicatedAllocation.m_hMemory = hMemory;
5412  m_DedicatedAllocation.m_pMappedData = pMappedData;
5413  }
5414 
5415  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5416  VkDeviceSize GetAlignment() const { return m_Alignment; }
5417  VkDeviceSize GetSize() const { return m_Size; }
5418  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5419  void* GetUserData() const { return m_pUserData; }
5420  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5421  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5422 
5423  VmaDeviceMemoryBlock* GetBlock() const
5424  {
5425  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5426  return m_BlockAllocation.m_Block;
5427  }
5428  VkDeviceSize GetOffset() const;
5429  VkDeviceMemory GetMemory() const;
5430  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5431  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5432  void* GetMappedData() const;
5433  bool CanBecomeLost() const;
5434 
5435  uint32_t GetLastUseFrameIndex() const
5436  {
5437  return m_LastUseFrameIndex.load();
5438  }
5439  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5440  {
5441  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5442  }
5443  /*
5444  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5445  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5446  - Else, returns false.
5447 
5448  If hAllocation is already lost, assert - you should not call it then.
5449  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5450  */
5451  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5452 
5453  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5454  {
5455  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5456  outInfo.blockCount = 1;
5457  outInfo.allocationCount = 1;
5458  outInfo.unusedRangeCount = 0;
5459  outInfo.usedBytes = m_Size;
5460  outInfo.unusedBytes = 0;
5461  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5462  outInfo.unusedRangeSizeMin = UINT64_MAX;
5463  outInfo.unusedRangeSizeMax = 0;
5464  }
5465 
5466  void BlockAllocMap();
5467  void BlockAllocUnmap();
5468  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5469  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5470 
5471 #if VMA_STATS_STRING_ENABLED
5472  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5473  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5474 
5475  void InitBufferImageUsage(uint32_t bufferImageUsage)
5476  {
5477  VMA_ASSERT(m_BufferImageUsage == 0);
5478  m_BufferImageUsage = bufferImageUsage;
5479  }
5480 
5481  void PrintParameters(class VmaJsonWriter& json) const;
5482 #endif
5483 
5484 private:
5485  VkDeviceSize m_Alignment;
5486  VkDeviceSize m_Size;
5487  void* m_pUserData;
5488  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5489  uint32_t m_MemoryTypeIndex;
5490  uint8_t m_Type; // ALLOCATION_TYPE
5491  uint8_t m_SuballocationType; // VmaSuballocationType
5492  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5493  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5494  uint8_t m_MapCount;
5495  uint8_t m_Flags; // enum FLAGS
5496 
5497  // Allocation out of VmaDeviceMemoryBlock.
5498  struct BlockAllocation
5499  {
5500  VmaDeviceMemoryBlock* m_Block;
5501  VkDeviceSize m_Offset;
5502  bool m_CanBecomeLost;
5503  };
5504 
5505  // Allocation for an object that has its own private VkDeviceMemory.
5506  struct DedicatedAllocation
5507  {
5508  VkDeviceMemory m_hMemory;
5509  void* m_pMappedData; // Not null means memory is mapped.
5510  };
5511 
5512  union
5513  {
5514  // Allocation out of VmaDeviceMemoryBlock.
5515  BlockAllocation m_BlockAllocation;
5516  // Allocation for an object that has its own private VkDeviceMemory.
5517  DedicatedAllocation m_DedicatedAllocation;
5518  };
5519 
5520 #if VMA_STATS_STRING_ENABLED
5521  uint32_t m_CreationFrameIndex;
5522  uint32_t m_BufferImageUsage; // 0 if unknown.
5523 #endif
5524 
5525  void FreeUserDataString(VmaAllocator hAllocator);
5526 };
5527 
5528 /*
5529 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5530 allocated memory block or free.
5531 */
5532 struct VmaSuballocation
5533 {
5534  VkDeviceSize offset;
5535  VkDeviceSize size;
5536  VmaAllocation hAllocation;
5537  VmaSuballocationType type;
5538 };
5539 
5540 // Comparator for offsets.
5541 struct VmaSuballocationOffsetLess
5542 {
5543  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5544  {
5545  return lhs.offset < rhs.offset;
5546  }
5547 };
5548 struct VmaSuballocationOffsetGreater
5549 {
5550  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5551  {
5552  return lhs.offset > rhs.offset;
5553  }
5554 };
5555 
5556 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5557 
5558 // Cost of one additional allocation lost, as equivalent in bytes.
5559 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5560 
5561 enum class VmaAllocationRequestType
5562 {
5563  Normal,
5564  // Used by "Linear" algorithm.
5565  UpperAddress,
5566  EndOf1st,
5567  EndOf2nd,
5568 };
5569 
5570 /*
5571 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5572 
5573 If canMakeOtherLost was false:
5574 - item points to a FREE suballocation.
5575 - itemsToMakeLostCount is 0.
5576 
5577 If canMakeOtherLost was true:
5578 - item points to first of sequence of suballocations, which are either FREE,
5579  or point to VmaAllocations that can become lost.
5580 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5581  the requested allocation to succeed.
5582 */
5583 struct VmaAllocationRequest
5584 {
5585  VkDeviceSize offset;
5586  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5587  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5588  VmaSuballocationList::iterator item;
5589  size_t itemsToMakeLostCount;
5590  void* customData;
5591  VmaAllocationRequestType type;
5592 
5593  VkDeviceSize CalcCost() const
5594  {
5595  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5596  }
5597 };
5598 
5599 /*
5600 Data structure used for bookkeeping of allocations and unused ranges of memory
5601 in a single VkDeviceMemory block.
5602 */
5603 class VmaBlockMetadata
5604 {
5605 public:
5606  VmaBlockMetadata(VmaAllocator hAllocator);
5607  virtual ~VmaBlockMetadata() { }
5608  virtual void Init(VkDeviceSize size) { m_Size = size; }
5609 
5610  // Validates all data structures inside this object. If not valid, returns false.
5611  virtual bool Validate() const = 0;
5612  VkDeviceSize GetSize() const { return m_Size; }
5613  virtual size_t GetAllocationCount() const = 0;
5614  virtual VkDeviceSize GetSumFreeSize() const = 0;
5615  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5616  // Returns true if this block is empty - contains only single free suballocation.
5617  virtual bool IsEmpty() const = 0;
5618 
5619  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5620  // Shouldn't modify blockCount.
5621  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5622 
5623 #if VMA_STATS_STRING_ENABLED
5624  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5625 #endif
5626 
5627  // Tries to find a place for suballocation with given parameters inside this block.
5628  // If succeeded, fills pAllocationRequest and returns true.
5629  // If failed, returns false.
5630  virtual bool CreateAllocationRequest(
5631  uint32_t currentFrameIndex,
5632  uint32_t frameInUseCount,
5633  VkDeviceSize bufferImageGranularity,
5634  VkDeviceSize allocSize,
5635  VkDeviceSize allocAlignment,
5636  bool upperAddress,
5637  VmaSuballocationType allocType,
5638  bool canMakeOtherLost,
5639  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5640  uint32_t strategy,
5641  VmaAllocationRequest* pAllocationRequest) = 0;
5642 
5643  virtual bool MakeRequestedAllocationsLost(
5644  uint32_t currentFrameIndex,
5645  uint32_t frameInUseCount,
5646  VmaAllocationRequest* pAllocationRequest) = 0;
5647 
5648  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5649 
5650  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5651 
5652  // Makes actual allocation based on request. Request must already be checked and valid.
5653  virtual void Alloc(
5654  const VmaAllocationRequest& request,
5655  VmaSuballocationType type,
5656  VkDeviceSize allocSize,
5657  VmaAllocation hAllocation) = 0;
5658 
5659  // Frees suballocation assigned to given memory region.
5660  virtual void Free(const VmaAllocation allocation) = 0;
5661  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5662 
5663 protected:
5664  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5665 
5666 #if VMA_STATS_STRING_ENABLED
5667  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5668  VkDeviceSize unusedBytes,
5669  size_t allocationCount,
5670  size_t unusedRangeCount) const;
5671  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5672  VkDeviceSize offset,
5673  VmaAllocation hAllocation) const;
5674  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5675  VkDeviceSize offset,
5676  VkDeviceSize size) const;
5677  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5678 #endif
5679 
5680 private:
5681  VkDeviceSize m_Size;
5682  const VkAllocationCallbacks* m_pAllocationCallbacks;
5683 };
5684 
5685 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5686  VMA_ASSERT(0 && "Validation failed: " #cond); \
5687  return false; \
5688  } } while(false)
5689 
5690 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5691 {
5692  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5693 public:
5694  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5695  virtual ~VmaBlockMetadata_Generic();
5696  virtual void Init(VkDeviceSize size);
5697 
5698  virtual bool Validate() const;
5699  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5700  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5701  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5702  virtual bool IsEmpty() const;
5703 
5704  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5705  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5706 
5707 #if VMA_STATS_STRING_ENABLED
5708  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5709 #endif
5710 
5711  virtual bool CreateAllocationRequest(
5712  uint32_t currentFrameIndex,
5713  uint32_t frameInUseCount,
5714  VkDeviceSize bufferImageGranularity,
5715  VkDeviceSize allocSize,
5716  VkDeviceSize allocAlignment,
5717  bool upperAddress,
5718  VmaSuballocationType allocType,
5719  bool canMakeOtherLost,
5720  uint32_t strategy,
5721  VmaAllocationRequest* pAllocationRequest);
5722 
5723  virtual bool MakeRequestedAllocationsLost(
5724  uint32_t currentFrameIndex,
5725  uint32_t frameInUseCount,
5726  VmaAllocationRequest* pAllocationRequest);
5727 
5728  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5729 
5730  virtual VkResult CheckCorruption(const void* pBlockData);
5731 
5732  virtual void Alloc(
5733  const VmaAllocationRequest& request,
5734  VmaSuballocationType type,
5735  VkDeviceSize allocSize,
5736  VmaAllocation hAllocation);
5737 
5738  virtual void Free(const VmaAllocation allocation);
5739  virtual void FreeAtOffset(VkDeviceSize offset);
5740 
5742  // For defragmentation
5743 
5744  bool IsBufferImageGranularityConflictPossible(
5745  VkDeviceSize bufferImageGranularity,
5746  VmaSuballocationType& inOutPrevSuballocType) const;
5747 
5748 private:
5749  friend class VmaDefragmentationAlgorithm_Generic;
5750  friend class VmaDefragmentationAlgorithm_Fast;
5751 
5752  uint32_t m_FreeCount;
5753  VkDeviceSize m_SumFreeSize;
5754  VmaSuballocationList m_Suballocations;
5755  // Suballocations that are free and have size greater than certain threshold.
5756  // Sorted by size, ascending.
5757  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5758 
5759  bool ValidateFreeSuballocationList() const;
5760 
5761  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5762  // If yes, fills pOffset and returns true. If no, returns false.
5763  bool CheckAllocation(
5764  uint32_t currentFrameIndex,
5765  uint32_t frameInUseCount,
5766  VkDeviceSize bufferImageGranularity,
5767  VkDeviceSize allocSize,
5768  VkDeviceSize allocAlignment,
5769  VmaSuballocationType allocType,
5770  VmaSuballocationList::const_iterator suballocItem,
5771  bool canMakeOtherLost,
5772  VkDeviceSize* pOffset,
5773  size_t* itemsToMakeLostCount,
5774  VkDeviceSize* pSumFreeSize,
5775  VkDeviceSize* pSumItemSize) const;
5776  // Given free suballocation, it merges it with following one, which must also be free.
5777  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5778  // Releases given suballocation, making it free.
5779  // Merges it with adjacent free suballocations if applicable.
5780  // Returns iterator to new free suballocation at this place.
5781  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5782  // Given free suballocation, it inserts it into sorted list of
5783  // m_FreeSuballocationsBySize if it's suitable.
5784  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5785  // Given free suballocation, it removes it from sorted list of
5786  // m_FreeSuballocationsBySize if it's suitable.
5787  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5788 };
5789 
5790 /*
5791 Allocations and their references in internal data structure look like this:
5792 
5793 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5794 
5795  0 +-------+
5796  | |
5797  | |
5798  | |
5799  +-------+
5800  | Alloc | 1st[m_1stNullItemsBeginCount]
5801  +-------+
5802  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5803  +-------+
5804  | ... |
5805  +-------+
5806  | Alloc | 1st[1st.size() - 1]
5807  +-------+
5808  | |
5809  | |
5810  | |
5811 GetSize() +-------+
5812 
5813 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5814 
5815  0 +-------+
5816  | Alloc | 2nd[0]
5817  +-------+
5818  | Alloc | 2nd[1]
5819  +-------+
5820  | ... |
5821  +-------+
5822  | Alloc | 2nd[2nd.size() - 1]
5823  +-------+
5824  | |
5825  | |
5826  | |
5827  +-------+
5828  | Alloc | 1st[m_1stNullItemsBeginCount]
5829  +-------+
5830  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5831  +-------+
5832  | ... |
5833  +-------+
5834  | Alloc | 1st[1st.size() - 1]
5835  +-------+
5836  | |
5837 GetSize() +-------+
5838 
5839 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5840 
5841  0 +-------+
5842  | |
5843  | |
5844  | |
5845  +-------+
5846  | Alloc | 1st[m_1stNullItemsBeginCount]
5847  +-------+
5848  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5849  +-------+
5850  | ... |
5851  +-------+
5852  | Alloc | 1st[1st.size() - 1]
5853  +-------+
5854  | |
5855  | |
5856  | |
5857  +-------+
5858  | Alloc | 2nd[2nd.size() - 1]
5859  +-------+
5860  | ... |
5861  +-------+
5862  | Alloc | 2nd[1]
5863  +-------+
5864  | Alloc | 2nd[0]
5865 GetSize() +-------+
5866 
5867 */
5868 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5869 {
5870  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5871 public:
5872  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5873  virtual ~VmaBlockMetadata_Linear();
5874  virtual void Init(VkDeviceSize size);
5875 
5876  virtual bool Validate() const;
5877  virtual size_t GetAllocationCount() const;
5878  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5879  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5880  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5881 
5882  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5883  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5884 
5885 #if VMA_STATS_STRING_ENABLED
5886  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5887 #endif
5888 
5889  virtual bool CreateAllocationRequest(
5890  uint32_t currentFrameIndex,
5891  uint32_t frameInUseCount,
5892  VkDeviceSize bufferImageGranularity,
5893  VkDeviceSize allocSize,
5894  VkDeviceSize allocAlignment,
5895  bool upperAddress,
5896  VmaSuballocationType allocType,
5897  bool canMakeOtherLost,
5898  uint32_t strategy,
5899  VmaAllocationRequest* pAllocationRequest);
5900 
5901  virtual bool MakeRequestedAllocationsLost(
5902  uint32_t currentFrameIndex,
5903  uint32_t frameInUseCount,
5904  VmaAllocationRequest* pAllocationRequest);
5905 
5906  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5907 
5908  virtual VkResult CheckCorruption(const void* pBlockData);
5909 
5910  virtual void Alloc(
5911  const VmaAllocationRequest& request,
5912  VmaSuballocationType type,
5913  VkDeviceSize allocSize,
5914  VmaAllocation hAllocation);
5915 
5916  virtual void Free(const VmaAllocation allocation);
5917  virtual void FreeAtOffset(VkDeviceSize offset);
5918 
5919 private:
5920  /*
5921  There are two suballocation vectors, used in ping-pong way.
5922  The one with index m_1stVectorIndex is called 1st.
5923  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5924  2nd can be non-empty only when 1st is not empty.
5925  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5926  */
5927  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5928 
5929  enum SECOND_VECTOR_MODE
5930  {
5931  SECOND_VECTOR_EMPTY,
5932  /*
5933  Suballocations in 2nd vector are created later than the ones in 1st, but they
5934  all have smaller offset.
5935  */
5936  SECOND_VECTOR_RING_BUFFER,
5937  /*
5938  Suballocations in 2nd vector are upper side of double stack.
5939  They all have offsets higher than those in 1st vector.
5940  Top of this stack means smaller offsets, but higher indices in this vector.
5941  */
5942  SECOND_VECTOR_DOUBLE_STACK,
5943  };
5944 
5945  VkDeviceSize m_SumFreeSize;
5946  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5947  uint32_t m_1stVectorIndex;
5948  SECOND_VECTOR_MODE m_2ndVectorMode;
5949 
5950  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5951  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5952  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5953  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5954 
5955  // Number of items in 1st vector with hAllocation = null at the beginning.
5956  size_t m_1stNullItemsBeginCount;
5957  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5958  size_t m_1stNullItemsMiddleCount;
5959  // Number of items in 2nd vector with hAllocation = null.
5960  size_t m_2ndNullItemsCount;
5961 
5962  bool ShouldCompact1st() const;
5963  void CleanupAfterFree();
5964 
5965  bool CreateAllocationRequest_LowerAddress(
5966  uint32_t currentFrameIndex,
5967  uint32_t frameInUseCount,
5968  VkDeviceSize bufferImageGranularity,
5969  VkDeviceSize allocSize,
5970  VkDeviceSize allocAlignment,
5971  VmaSuballocationType allocType,
5972  bool canMakeOtherLost,
5973  uint32_t strategy,
5974  VmaAllocationRequest* pAllocationRequest);
5975  bool CreateAllocationRequest_UpperAddress(
5976  uint32_t currentFrameIndex,
5977  uint32_t frameInUseCount,
5978  VkDeviceSize bufferImageGranularity,
5979  VkDeviceSize allocSize,
5980  VkDeviceSize allocAlignment,
5981  VmaSuballocationType allocType,
5982  bool canMakeOtherLost,
5983  uint32_t strategy,
5984  VmaAllocationRequest* pAllocationRequest);
5985 };
5986 
5987 /*
5988 - GetSize() is the original size of allocated memory block.
5989 - m_UsableSize is this size aligned down to a power of two.
5990  All allocations and calculations happen relative to m_UsableSize.
5991 - GetUnusableSize() is the difference between them.
5992  It is repoted as separate, unused range, not available for allocations.
5993 
5994 Node at level 0 has size = m_UsableSize.
5995 Each next level contains nodes with size 2 times smaller than current level.
5996 m_LevelCount is the maximum number of levels to use in the current object.
5997 */
5998 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5999 {
6000  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6001 public:
6002  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6003  virtual ~VmaBlockMetadata_Buddy();
6004  virtual void Init(VkDeviceSize size);
6005 
6006  virtual bool Validate() const;
6007  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6008  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6009  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6010  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6011 
6012  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6013  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6014 
6015 #if VMA_STATS_STRING_ENABLED
6016  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6017 #endif
6018 
6019  virtual bool CreateAllocationRequest(
6020  uint32_t currentFrameIndex,
6021  uint32_t frameInUseCount,
6022  VkDeviceSize bufferImageGranularity,
6023  VkDeviceSize allocSize,
6024  VkDeviceSize allocAlignment,
6025  bool upperAddress,
6026  VmaSuballocationType allocType,
6027  bool canMakeOtherLost,
6028  uint32_t strategy,
6029  VmaAllocationRequest* pAllocationRequest);
6030 
6031  virtual bool MakeRequestedAllocationsLost(
6032  uint32_t currentFrameIndex,
6033  uint32_t frameInUseCount,
6034  VmaAllocationRequest* pAllocationRequest);
6035 
6036  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6037 
6038  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6039 
6040  virtual void Alloc(
6041  const VmaAllocationRequest& request,
6042  VmaSuballocationType type,
6043  VkDeviceSize allocSize,
6044  VmaAllocation hAllocation);
6045 
6046  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6047  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6048 
6049 private:
6050  static const VkDeviceSize MIN_NODE_SIZE = 32;
6051  static const size_t MAX_LEVELS = 30;
6052 
6053  struct ValidationContext
6054  {
6055  size_t calculatedAllocationCount;
6056  size_t calculatedFreeCount;
6057  VkDeviceSize calculatedSumFreeSize;
6058 
6059  ValidationContext() :
6060  calculatedAllocationCount(0),
6061  calculatedFreeCount(0),
6062  calculatedSumFreeSize(0) { }
6063  };
6064 
6065  struct Node
6066  {
6067  VkDeviceSize offset;
6068  enum TYPE
6069  {
6070  TYPE_FREE,
6071  TYPE_ALLOCATION,
6072  TYPE_SPLIT,
6073  TYPE_COUNT
6074  } type;
6075  Node* parent;
6076  Node* buddy;
6077 
6078  union
6079  {
6080  struct
6081  {
6082  Node* prev;
6083  Node* next;
6084  } free;
6085  struct
6086  {
6087  VmaAllocation alloc;
6088  } allocation;
6089  struct
6090  {
6091  Node* leftChild;
6092  } split;
6093  };
6094  };
6095 
6096  // Size of the memory block aligned down to a power of two.
6097  VkDeviceSize m_UsableSize;
6098  uint32_t m_LevelCount;
6099 
6100  Node* m_Root;
6101  struct {
6102  Node* front;
6103  Node* back;
6104  } m_FreeList[MAX_LEVELS];
6105  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6106  size_t m_AllocationCount;
6107  // Number of nodes in the tree with type == TYPE_FREE.
6108  size_t m_FreeCount;
6109  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6110  VkDeviceSize m_SumFreeSize;
6111 
6112  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6113  void DeleteNode(Node* node);
6114  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6115  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6116  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6117  // Alloc passed just for validation. Can be null.
6118  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6119  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6120  // Adds node to the front of FreeList at given level.
6121  // node->type must be FREE.
6122  // node->free.prev, next can be undefined.
6123  void AddToFreeListFront(uint32_t level, Node* node);
6124  // Removes node from FreeList at given level.
6125  // node->type must be FREE.
6126  // node->free.prev, next stay untouched.
6127  void RemoveFromFreeList(uint32_t level, Node* node);
6128 
6129 #if VMA_STATS_STRING_ENABLED
6130  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6131 #endif
6132 };
6133 
6134 /*
6135 Represents a single block of device memory (`VkDeviceMemory`) with all the
6136 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6137 
6138 Thread-safety: This class must be externally synchronized.
6139 */
6140 class VmaDeviceMemoryBlock
6141 {
6142  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6143 public:
6144  VmaBlockMetadata* m_pMetadata;
6145 
6146  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6147 
6148  ~VmaDeviceMemoryBlock()
6149  {
6150  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6151  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6152  }
6153 
6154  // Always call after construction.
6155  void Init(
6156  VmaAllocator hAllocator,
6157  VmaPool hParentPool,
6158  uint32_t newMemoryTypeIndex,
6159  VkDeviceMemory newMemory,
6160  VkDeviceSize newSize,
6161  uint32_t id,
6162  uint32_t algorithm);
6163  // Always call before destruction.
6164  void Destroy(VmaAllocator allocator);
6165 
6166  VmaPool GetParentPool() const { return m_hParentPool; }
6167  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6168  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6169  uint32_t GetId() const { return m_Id; }
6170  void* GetMappedData() const { return m_pMappedData; }
6171 
6172  // Validates all data structures inside this object. If not valid, returns false.
6173  bool Validate() const;
6174 
6175  VkResult CheckCorruption(VmaAllocator hAllocator);
6176 
6177  // ppData can be null.
6178  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6179  void Unmap(VmaAllocator hAllocator, uint32_t count);
6180 
6181  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6182  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6183 
6184  VkResult BindBufferMemory(
6185  const VmaAllocator hAllocator,
6186  const VmaAllocation hAllocation,
6187  VkDeviceSize allocationLocalOffset,
6188  VkBuffer hBuffer,
6189  const void* pNext);
6190  VkResult BindImageMemory(
6191  const VmaAllocator hAllocator,
6192  const VmaAllocation hAllocation,
6193  VkDeviceSize allocationLocalOffset,
6194  VkImage hImage,
6195  const void* pNext);
6196 
6197 private:
6198  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6199  uint32_t m_MemoryTypeIndex;
6200  uint32_t m_Id;
6201  VkDeviceMemory m_hMemory;
6202 
6203  /*
6204  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6205  Also protects m_MapCount, m_pMappedData.
6206  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6207  */
6208  VMA_MUTEX m_Mutex;
6209  uint32_t m_MapCount;
6210  void* m_pMappedData;
6211 };
6212 
6213 struct VmaPointerLess
6214 {
6215  bool operator()(const void* lhs, const void* rhs) const
6216  {
6217  return lhs < rhs;
6218  }
6219 };
6220 
6221 struct VmaDefragmentationMove
6222 {
6223  size_t srcBlockIndex;
6224  size_t dstBlockIndex;
6225  VkDeviceSize srcOffset;
6226  VkDeviceSize dstOffset;
6227  VkDeviceSize size;
6228 };
6229 
6230 class VmaDefragmentationAlgorithm;
6231 
6232 /*
6233 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6234 Vulkan memory type.
6235 
6236 Synchronized internally with a mutex.
6237 */
6238 struct VmaBlockVector
6239 {
6240  VMA_CLASS_NO_COPY(VmaBlockVector)
6241 public:
6242  VmaBlockVector(
6243  VmaAllocator hAllocator,
6244  VmaPool hParentPool,
6245  uint32_t memoryTypeIndex,
6246  VkDeviceSize preferredBlockSize,
6247  size_t minBlockCount,
6248  size_t maxBlockCount,
6249  VkDeviceSize bufferImageGranularity,
6250  uint32_t frameInUseCount,
6251  bool explicitBlockSize,
6252  uint32_t algorithm);
6253  ~VmaBlockVector();
6254 
6255  VkResult CreateMinBlocks();
6256 
6257  VmaAllocator GetAllocator() const { return m_hAllocator; }
6258  VmaPool GetParentPool() const { return m_hParentPool; }
6259  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6260  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6261  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6262  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6263  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6264  uint32_t GetAlgorithm() const { return m_Algorithm; }
6265 
6266  void GetPoolStats(VmaPoolStats* pStats);
6267 
6268  bool IsEmpty() const { return m_Blocks.empty(); }
6269  bool IsCorruptionDetectionEnabled() const;
6270 
6271  VkResult Allocate(
6272  uint32_t currentFrameIndex,
6273  VkDeviceSize size,
6274  VkDeviceSize alignment,
6275  const VmaAllocationCreateInfo& createInfo,
6276  VmaSuballocationType suballocType,
6277  size_t allocationCount,
6278  VmaAllocation* pAllocations);
6279 
6280  void Free(const VmaAllocation hAllocation);
6281 
6282  // Adds statistics of this BlockVector to pStats.
6283  void AddStats(VmaStats* pStats);
6284 
6285 #if VMA_STATS_STRING_ENABLED
6286  void PrintDetailedMap(class VmaJsonWriter& json);
6287 #endif
6288 
6289  void MakePoolAllocationsLost(
6290  uint32_t currentFrameIndex,
6291  size_t* pLostAllocationCount);
6292  VkResult CheckCorruption();
6293 
6294  // Saves results in pCtx->res.
6295  void Defragment(
6296  class VmaBlockVectorDefragmentationContext* pCtx,
6297  VmaDefragmentationStats* pStats,
6298  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6299  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6300  VkCommandBuffer commandBuffer);
6301  void DefragmentationEnd(
6302  class VmaBlockVectorDefragmentationContext* pCtx,
6303  VmaDefragmentationStats* pStats);
6304 
6306  // To be used only while the m_Mutex is locked. Used during defragmentation.
6307 
6308  size_t GetBlockCount() const { return m_Blocks.size(); }
6309  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6310  size_t CalcAllocationCount() const;
6311  bool IsBufferImageGranularityConflictPossible() const;
6312 
6313 private:
6314  friend class VmaDefragmentationAlgorithm_Generic;
6315 
6316  const VmaAllocator m_hAllocator;
6317  const VmaPool m_hParentPool;
6318  const uint32_t m_MemoryTypeIndex;
6319  const VkDeviceSize m_PreferredBlockSize;
6320  const size_t m_MinBlockCount;
6321  const size_t m_MaxBlockCount;
6322  const VkDeviceSize m_BufferImageGranularity;
6323  const uint32_t m_FrameInUseCount;
6324  const bool m_ExplicitBlockSize;
6325  const uint32_t m_Algorithm;
6326  VMA_RW_MUTEX m_Mutex;
6327 
6328  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6329  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6330  bool m_HasEmptyBlock;
6331  // Incrementally sorted by sumFreeSize, ascending.
6332  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6333  uint32_t m_NextBlockId;
6334 
6335  VkDeviceSize CalcMaxBlockSize() const;
6336 
6337  // Finds and removes given block from vector.
6338  void Remove(VmaDeviceMemoryBlock* pBlock);
6339 
6340  // Performs single step in sorting m_Blocks. They may not be fully sorted
6341  // after this call.
6342  void IncrementallySortBlocks();
6343 
6344  VkResult AllocatePage(
6345  uint32_t currentFrameIndex,
6346  VkDeviceSize size,
6347  VkDeviceSize alignment,
6348  const VmaAllocationCreateInfo& createInfo,
6349  VmaSuballocationType suballocType,
6350  VmaAllocation* pAllocation);
6351 
6352  // To be used only without CAN_MAKE_OTHER_LOST flag.
6353  VkResult AllocateFromBlock(
6354  VmaDeviceMemoryBlock* pBlock,
6355  uint32_t currentFrameIndex,
6356  VkDeviceSize size,
6357  VkDeviceSize alignment,
6358  VmaAllocationCreateFlags allocFlags,
6359  void* pUserData,
6360  VmaSuballocationType suballocType,
6361  uint32_t strategy,
6362  VmaAllocation* pAllocation);
6363 
6364  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6365 
6366  // Saves result to pCtx->res.
6367  void ApplyDefragmentationMovesCpu(
6368  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6369  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6370  // Saves result to pCtx->res.
6371  void ApplyDefragmentationMovesGpu(
6372  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6373  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6374  VkCommandBuffer commandBuffer);
6375 
6376  /*
6377  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6378  - updated with new data.
6379  */
6380  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6381 
6382  void UpdateHasEmptyBlock();
6383 };
6384 
6385 struct VmaPool_T
6386 {
6387  VMA_CLASS_NO_COPY(VmaPool_T)
6388 public:
6389  VmaBlockVector m_BlockVector;
6390 
6391  VmaPool_T(
6392  VmaAllocator hAllocator,
6393  const VmaPoolCreateInfo& createInfo,
6394  VkDeviceSize preferredBlockSize);
6395  ~VmaPool_T();
6396 
6397  uint32_t GetId() const { return m_Id; }
6398  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6399 
6400  const char* GetName() const { return m_Name; }
6401  void SetName(const char* pName);
6402 
6403 #if VMA_STATS_STRING_ENABLED
6404  //void PrintDetailedMap(class VmaStringBuilder& sb);
6405 #endif
6406 
6407 private:
6408  uint32_t m_Id;
6409  char* m_Name;
6410 };
6411 
6412 /*
6413 Performs defragmentation:
6414 
6415 - Updates `pBlockVector->m_pMetadata`.
6416 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6417 - Does not move actual data, only returns requested moves as `moves`.
6418 */
6419 class VmaDefragmentationAlgorithm
6420 {
6421  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6422 public:
6423  VmaDefragmentationAlgorithm(
6424  VmaAllocator hAllocator,
6425  VmaBlockVector* pBlockVector,
6426  uint32_t currentFrameIndex) :
6427  m_hAllocator(hAllocator),
6428  m_pBlockVector(pBlockVector),
6429  m_CurrentFrameIndex(currentFrameIndex)
6430  {
6431  }
6432  virtual ~VmaDefragmentationAlgorithm()
6433  {
6434  }
6435 
6436  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6437  virtual void AddAll() = 0;
6438 
6439  virtual VkResult Defragment(
6440  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6441  VkDeviceSize maxBytesToMove,
6442  uint32_t maxAllocationsToMove) = 0;
6443 
6444  virtual VkDeviceSize GetBytesMoved() const = 0;
6445  virtual uint32_t GetAllocationsMoved() const = 0;
6446 
6447 protected:
6448  VmaAllocator const m_hAllocator;
6449  VmaBlockVector* const m_pBlockVector;
6450  const uint32_t m_CurrentFrameIndex;
6451 
6452  struct AllocationInfo
6453  {
6454  VmaAllocation m_hAllocation;
6455  VkBool32* m_pChanged;
6456 
6457  AllocationInfo() :
6458  m_hAllocation(VK_NULL_HANDLE),
6459  m_pChanged(VMA_NULL)
6460  {
6461  }
6462  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6463  m_hAllocation(hAlloc),
6464  m_pChanged(pChanged)
6465  {
6466  }
6467  };
6468 };
6469 
6470 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6471 {
6472  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6473 public:
6474  VmaDefragmentationAlgorithm_Generic(
6475  VmaAllocator hAllocator,
6476  VmaBlockVector* pBlockVector,
6477  uint32_t currentFrameIndex,
6478  bool overlappingMoveSupported);
6479  virtual ~VmaDefragmentationAlgorithm_Generic();
6480 
6481  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6482  virtual void AddAll() { m_AllAllocations = true; }
6483 
6484  virtual VkResult Defragment(
6485  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6486  VkDeviceSize maxBytesToMove,
6487  uint32_t maxAllocationsToMove);
6488 
6489  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6490  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6491 
6492 private:
6493  uint32_t m_AllocationCount;
6494  bool m_AllAllocations;
6495 
6496  VkDeviceSize m_BytesMoved;
6497  uint32_t m_AllocationsMoved;
6498 
6499  struct AllocationInfoSizeGreater
6500  {
6501  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6502  {
6503  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6504  }
6505  };
6506 
6507  struct AllocationInfoOffsetGreater
6508  {
6509  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6510  {
6511  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6512  }
6513  };
6514 
6515  struct BlockInfo
6516  {
6517  size_t m_OriginalBlockIndex;
6518  VmaDeviceMemoryBlock* m_pBlock;
6519  bool m_HasNonMovableAllocations;
6520  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6521 
6522  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6523  m_OriginalBlockIndex(SIZE_MAX),
6524  m_pBlock(VMA_NULL),
6525  m_HasNonMovableAllocations(true),
6526  m_Allocations(pAllocationCallbacks)
6527  {
6528  }
6529 
6530  void CalcHasNonMovableAllocations()
6531  {
6532  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6533  const size_t defragmentAllocCount = m_Allocations.size();
6534  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6535  }
6536 
6537  void SortAllocationsBySizeDescending()
6538  {
6539  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6540  }
6541 
6542  void SortAllocationsByOffsetDescending()
6543  {
6544  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6545  }
6546  };
6547 
6548  struct BlockPointerLess
6549  {
6550  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6551  {
6552  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6553  }
6554  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6555  {
6556  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6557  }
6558  };
6559 
6560  // 1. Blocks with some non-movable allocations go first.
6561  // 2. Blocks with smaller sumFreeSize go first.
6562  struct BlockInfoCompareMoveDestination
6563  {
6564  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6565  {
6566  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6567  {
6568  return true;
6569  }
6570  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6571  {
6572  return false;
6573  }
6574  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6575  {
6576  return true;
6577  }
6578  return false;
6579  }
6580  };
6581 
6582  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6583  BlockInfoVector m_Blocks;
6584 
6585  VkResult DefragmentRound(
6586  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6587  VkDeviceSize maxBytesToMove,
6588  uint32_t maxAllocationsToMove);
6589 
6590  size_t CalcBlocksWithNonMovableCount() const;
6591 
6592  static bool MoveMakesSense(
6593  size_t dstBlockIndex, VkDeviceSize dstOffset,
6594  size_t srcBlockIndex, VkDeviceSize srcOffset);
6595 };
6596 
6597 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6598 {
6599  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6600 public:
6601  VmaDefragmentationAlgorithm_Fast(
6602  VmaAllocator hAllocator,
6603  VmaBlockVector* pBlockVector,
6604  uint32_t currentFrameIndex,
6605  bool overlappingMoveSupported);
6606  virtual ~VmaDefragmentationAlgorithm_Fast();
6607 
6608  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6609  virtual void AddAll() { m_AllAllocations = true; }
6610 
6611  virtual VkResult Defragment(
6612  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6613  VkDeviceSize maxBytesToMove,
6614  uint32_t maxAllocationsToMove);
6615 
6616  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6617  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6618 
6619 private:
6620  struct BlockInfo
6621  {
6622  size_t origBlockIndex;
6623  };
6624 
6625  class FreeSpaceDatabase
6626  {
6627  public:
6628  FreeSpaceDatabase()
6629  {
6630  FreeSpace s = {};
6631  s.blockInfoIndex = SIZE_MAX;
6632  for(size_t i = 0; i < MAX_COUNT; ++i)
6633  {
6634  m_FreeSpaces[i] = s;
6635  }
6636  }
6637 
6638  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6639  {
6640  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6641  {
6642  return;
6643  }
6644 
6645  // Find first invalid or the smallest structure.
6646  size_t bestIndex = SIZE_MAX;
6647  for(size_t i = 0; i < MAX_COUNT; ++i)
6648  {
6649  // Empty structure.
6650  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6651  {
6652  bestIndex = i;
6653  break;
6654  }
6655  if(m_FreeSpaces[i].size < size &&
6656  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6657  {
6658  bestIndex = i;
6659  }
6660  }
6661 
6662  if(bestIndex != SIZE_MAX)
6663  {
6664  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6665  m_FreeSpaces[bestIndex].offset = offset;
6666  m_FreeSpaces[bestIndex].size = size;
6667  }
6668  }
6669 
6670  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6671  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6672  {
6673  size_t bestIndex = SIZE_MAX;
6674  VkDeviceSize bestFreeSpaceAfter = 0;
6675  for(size_t i = 0; i < MAX_COUNT; ++i)
6676  {
6677  // Structure is valid.
6678  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6679  {
6680  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6681  // Allocation fits into this structure.
6682  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6683  {
6684  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6685  (dstOffset + size);
6686  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6687  {
6688  bestIndex = i;
6689  bestFreeSpaceAfter = freeSpaceAfter;
6690  }
6691  }
6692  }
6693  }
6694 
6695  if(bestIndex != SIZE_MAX)
6696  {
6697  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6698  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6699 
6700  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6701  {
6702  // Leave this structure for remaining empty space.
6703  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6704  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6705  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6706  }
6707  else
6708  {
6709  // This structure becomes invalid.
6710  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6711  }
6712 
6713  return true;
6714  }
6715 
6716  return false;
6717  }
6718 
6719  private:
6720  static const size_t MAX_COUNT = 4;
6721 
6722  struct FreeSpace
6723  {
6724  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6725  VkDeviceSize offset;
6726  VkDeviceSize size;
6727  } m_FreeSpaces[MAX_COUNT];
6728  };
6729 
6730  const bool m_OverlappingMoveSupported;
6731 
6732  uint32_t m_AllocationCount;
6733  bool m_AllAllocations;
6734 
6735  VkDeviceSize m_BytesMoved;
6736  uint32_t m_AllocationsMoved;
6737 
6738  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6739 
6740  void PreprocessMetadata();
6741  void PostprocessMetadata();
6742  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6743 };
6744 
6745 struct VmaBlockDefragmentationContext
6746 {
6747  enum BLOCK_FLAG
6748  {
6749  BLOCK_FLAG_USED = 0x00000001,
6750  };
6751  uint32_t flags;
6752  VkBuffer hBuffer;
6753 };
6754 
6755 class VmaBlockVectorDefragmentationContext
6756 {
6757  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6758 public:
6759  VkResult res;
6760  bool mutexLocked;
6761  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6762 
6763  VmaBlockVectorDefragmentationContext(
6764  VmaAllocator hAllocator,
6765  VmaPool hCustomPool, // Optional.
6766  VmaBlockVector* pBlockVector,
6767  uint32_t currFrameIndex);
6768  ~VmaBlockVectorDefragmentationContext();
6769 
6770  VmaPool GetCustomPool() const { return m_hCustomPool; }
6771  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6772  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6773 
6774  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6775  void AddAll() { m_AllAllocations = true; }
6776 
6777  void Begin(bool overlappingMoveSupported);
6778 
6779 private:
6780  const VmaAllocator m_hAllocator;
6781  // Null if not from custom pool.
6782  const VmaPool m_hCustomPool;
6783  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6784  VmaBlockVector* const m_pBlockVector;
6785  const uint32_t m_CurrFrameIndex;
6786  // Owner of this object.
6787  VmaDefragmentationAlgorithm* m_pAlgorithm;
6788 
6789  struct AllocInfo
6790  {
6791  VmaAllocation hAlloc;
6792  VkBool32* pChanged;
6793  };
6794  // Used between constructor and Begin.
6795  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6796  bool m_AllAllocations;
6797 };
6798 
6799 struct VmaDefragmentationContext_T
6800 {
6801 private:
6802  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6803 public:
6804  VmaDefragmentationContext_T(
6805  VmaAllocator hAllocator,
6806  uint32_t currFrameIndex,
6807  uint32_t flags,
6808  VmaDefragmentationStats* pStats);
6809  ~VmaDefragmentationContext_T();
6810 
6811  void AddPools(uint32_t poolCount, VmaPool* pPools);
6812  void AddAllocations(
6813  uint32_t allocationCount,
6814  VmaAllocation* pAllocations,
6815  VkBool32* pAllocationsChanged);
6816 
6817  /*
6818  Returns:
6819  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6820  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6821  - Negative value if error occured and object can be destroyed immediately.
6822  */
6823  VkResult Defragment(
6824  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6825  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6826  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6827 
6828 private:
6829  const VmaAllocator m_hAllocator;
6830  const uint32_t m_CurrFrameIndex;
6831  const uint32_t m_Flags;
6832  VmaDefragmentationStats* const m_pStats;
6833  // Owner of these objects.
6834  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6835  // Owner of these objects.
6836  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6837 };
6838 
6839 #if VMA_RECORDING_ENABLED
6840 
6841 class VmaRecorder
6842 {
6843 public:
6844  VmaRecorder();
6845  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6846  void WriteConfiguration(
6847  const VkPhysicalDeviceProperties& devProps,
6848  const VkPhysicalDeviceMemoryProperties& memProps,
6849  uint32_t vulkanApiVersion,
6850  bool dedicatedAllocationExtensionEnabled,
6851  bool bindMemory2ExtensionEnabled,
6852  bool memoryBudgetExtensionEnabled);
6853  ~VmaRecorder();
6854 
6855  void RecordCreateAllocator(uint32_t frameIndex);
6856  void RecordDestroyAllocator(uint32_t frameIndex);
6857  void RecordCreatePool(uint32_t frameIndex,
6858  const VmaPoolCreateInfo& createInfo,
6859  VmaPool pool);
6860  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6861  void RecordAllocateMemory(uint32_t frameIndex,
6862  const VkMemoryRequirements& vkMemReq,
6863  const VmaAllocationCreateInfo& createInfo,
6864  VmaAllocation allocation);
6865  void RecordAllocateMemoryPages(uint32_t frameIndex,
6866  const VkMemoryRequirements& vkMemReq,
6867  const VmaAllocationCreateInfo& createInfo,
6868  uint64_t allocationCount,
6869  const VmaAllocation* pAllocations);
6870  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6871  const VkMemoryRequirements& vkMemReq,
6872  bool requiresDedicatedAllocation,
6873  bool prefersDedicatedAllocation,
6874  const VmaAllocationCreateInfo& createInfo,
6875  VmaAllocation allocation);
6876  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6877  const VkMemoryRequirements& vkMemReq,
6878  bool requiresDedicatedAllocation,
6879  bool prefersDedicatedAllocation,
6880  const VmaAllocationCreateInfo& createInfo,
6881  VmaAllocation allocation);
6882  void RecordFreeMemory(uint32_t frameIndex,
6883  VmaAllocation allocation);
6884  void RecordFreeMemoryPages(uint32_t frameIndex,
6885  uint64_t allocationCount,
6886  const VmaAllocation* pAllocations);
6887  void RecordSetAllocationUserData(uint32_t frameIndex,
6888  VmaAllocation allocation,
6889  const void* pUserData);
6890  void RecordCreateLostAllocation(uint32_t frameIndex,
6891  VmaAllocation allocation);
6892  void RecordMapMemory(uint32_t frameIndex,
6893  VmaAllocation allocation);
6894  void RecordUnmapMemory(uint32_t frameIndex,
6895  VmaAllocation allocation);
6896  void RecordFlushAllocation(uint32_t frameIndex,
6897  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6898  void RecordInvalidateAllocation(uint32_t frameIndex,
6899  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6900  void RecordCreateBuffer(uint32_t frameIndex,
6901  const VkBufferCreateInfo& bufCreateInfo,
6902  const VmaAllocationCreateInfo& allocCreateInfo,
6903  VmaAllocation allocation);
6904  void RecordCreateImage(uint32_t frameIndex,
6905  const VkImageCreateInfo& imageCreateInfo,
6906  const VmaAllocationCreateInfo& allocCreateInfo,
6907  VmaAllocation allocation);
6908  void RecordDestroyBuffer(uint32_t frameIndex,
6909  VmaAllocation allocation);
6910  void RecordDestroyImage(uint32_t frameIndex,
6911  VmaAllocation allocation);
6912  void RecordTouchAllocation(uint32_t frameIndex,
6913  VmaAllocation allocation);
6914  void RecordGetAllocationInfo(uint32_t frameIndex,
6915  VmaAllocation allocation);
6916  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6917  VmaPool pool);
6918  void RecordDefragmentationBegin(uint32_t frameIndex,
6919  const VmaDefragmentationInfo2& info,
6921  void RecordDefragmentationEnd(uint32_t frameIndex,
6923  void RecordSetPoolName(uint32_t frameIndex,
6924  VmaPool pool,
6925  const char* name);
6926 
6927 private:
6928  struct CallParams
6929  {
6930  uint32_t threadId;
6931  double time;
6932  };
6933 
6934  class UserDataString
6935  {
6936  public:
6937  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6938  const char* GetString() const { return m_Str; }
6939 
6940  private:
6941  char m_PtrStr[17];
6942  const char* m_Str;
6943  };
6944 
6945  bool m_UseMutex;
6946  VmaRecordFlags m_Flags;
6947  FILE* m_File;
6948  VMA_MUTEX m_FileMutex;
6949  int64_t m_Freq;
6950  int64_t m_StartCounter;
6951 
6952  void GetBasicParams(CallParams& outParams);
6953 
6954  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6955  template<typename T>
6956  void PrintPointerList(uint64_t count, const T* pItems)
6957  {
6958  if(count)
6959  {
6960  fprintf(m_File, "%p", pItems[0]);
6961  for(uint64_t i = 1; i < count; ++i)
6962  {
6963  fprintf(m_File, " %p", pItems[i]);
6964  }
6965  }
6966  }
6967 
6968  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6969  void Flush();
6970 };
6971 
6972 #endif // #if VMA_RECORDING_ENABLED
6973 
6974 /*
6975 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6976 */
6977 class VmaAllocationObjectAllocator
6978 {
6979  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6980 public:
6981  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6982 
6983  VmaAllocation Allocate();
6984  void Free(VmaAllocation hAlloc);
6985 
6986 private:
6987  VMA_MUTEX m_Mutex;
6988  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6989 };
6990 
6991 struct VmaCurrentBudgetData
6992 {
6993  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
6994  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
6995 
6996 #if VMA_MEMORY_BUDGET
6997  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
6998  VMA_RW_MUTEX m_BudgetMutex;
6999  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7000  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7001  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7002 #endif // #if VMA_MEMORY_BUDGET
7003 
7004  VmaCurrentBudgetData()
7005  {
7006  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7007  {
7008  m_BlockBytes[heapIndex] = 0;
7009  m_AllocationBytes[heapIndex] = 0;
7010 #if VMA_MEMORY_BUDGET
7011  m_VulkanUsage[heapIndex] = 0;
7012  m_VulkanBudget[heapIndex] = 0;
7013  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7014 #endif
7015  }
7016 
7017 #if VMA_MEMORY_BUDGET
7018  m_OperationsSinceBudgetFetch = 0;
7019 #endif
7020  }
7021 
7022  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7023  {
7024  m_AllocationBytes[heapIndex] += allocationSize;
7025 #if VMA_MEMORY_BUDGET
7026  ++m_OperationsSinceBudgetFetch;
7027 #endif
7028  }
7029 
7030  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7031  {
7032  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7033  m_AllocationBytes[heapIndex] -= allocationSize;
7034 #if VMA_MEMORY_BUDGET
7035  ++m_OperationsSinceBudgetFetch;
7036 #endif
7037  }
7038 };
7039 
7040 // Main allocator object.
7041 struct VmaAllocator_T
7042 {
7043  VMA_CLASS_NO_COPY(VmaAllocator_T)
7044 public:
7045  bool m_UseMutex;
7046  uint32_t m_VulkanApiVersion;
7047  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7048  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7049  bool m_UseExtMemoryBudget;
7050  VkDevice m_hDevice;
7051  VkInstance m_hInstance;
7052  bool m_AllocationCallbacksSpecified;
7053  VkAllocationCallbacks m_AllocationCallbacks;
7054  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7055  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7056 
7057  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7058  uint32_t m_HeapSizeLimitMask;
7059 
7060  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7061  VkPhysicalDeviceMemoryProperties m_MemProps;
7062 
7063  // Default pools.
7064  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7065 
7066  // Each vector is sorted by memory (handle value).
7067  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7068  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7069  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7070 
7071  VmaCurrentBudgetData m_Budget;
7072 
7073  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7074  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7075  ~VmaAllocator_T();
7076 
7077  const VkAllocationCallbacks* GetAllocationCallbacks() const
7078  {
7079  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7080  }
7081  const VmaVulkanFunctions& GetVulkanFunctions() const
7082  {
7083  return m_VulkanFunctions;
7084  }
7085 
7086  VkDeviceSize GetBufferImageGranularity() const
7087  {
7088  return VMA_MAX(
7089  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7090  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7091  }
7092 
7093  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7094  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7095 
7096  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7097  {
7098  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7099  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7100  }
7101  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7102  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7103  {
7104  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7105  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7106  }
7107  // Minimum alignment for all allocations in specific memory type.
7108  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7109  {
7110  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7111  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7112  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7113  }
7114 
7115  bool IsIntegratedGpu() const
7116  {
7117  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7118  }
7119 
7120 #if VMA_RECORDING_ENABLED
7121  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7122 #endif
7123 
7124  void GetBufferMemoryRequirements(
7125  VkBuffer hBuffer,
7126  VkMemoryRequirements& memReq,
7127  bool& requiresDedicatedAllocation,
7128  bool& prefersDedicatedAllocation) const;
7129  void GetImageMemoryRequirements(
7130  VkImage hImage,
7131  VkMemoryRequirements& memReq,
7132  bool& requiresDedicatedAllocation,
7133  bool& prefersDedicatedAllocation) const;
7134 
7135  // Main allocation function.
7136  VkResult AllocateMemory(
7137  const VkMemoryRequirements& vkMemReq,
7138  bool requiresDedicatedAllocation,
7139  bool prefersDedicatedAllocation,
7140  VkBuffer dedicatedBuffer,
7141  VkImage dedicatedImage,
7142  const VmaAllocationCreateInfo& createInfo,
7143  VmaSuballocationType suballocType,
7144  size_t allocationCount,
7145  VmaAllocation* pAllocations);
7146 
7147  // Main deallocation function.
7148  void FreeMemory(
7149  size_t allocationCount,
7150  const VmaAllocation* pAllocations);
7151 
7152  VkResult ResizeAllocation(
7153  const VmaAllocation alloc,
7154  VkDeviceSize newSize);
7155 
7156  void CalculateStats(VmaStats* pStats);
7157 
7158  void GetBudget(
7159  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7160 
7161 #if VMA_STATS_STRING_ENABLED
7162  void PrintDetailedMap(class VmaJsonWriter& json);
7163 #endif
7164 
7165  VkResult DefragmentationBegin(
7166  const VmaDefragmentationInfo2& info,
7167  VmaDefragmentationStats* pStats,
7168  VmaDefragmentationContext* pContext);
7169  VkResult DefragmentationEnd(
7170  VmaDefragmentationContext context);
7171 
7172  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7173  bool TouchAllocation(VmaAllocation hAllocation);
7174 
7175  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7176  void DestroyPool(VmaPool pool);
7177  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7178 
7179  void SetCurrentFrameIndex(uint32_t frameIndex);
7180  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7181 
7182  void MakePoolAllocationsLost(
7183  VmaPool hPool,
7184  size_t* pLostAllocationCount);
7185  VkResult CheckPoolCorruption(VmaPool hPool);
7186  VkResult CheckCorruption(uint32_t memoryTypeBits);
7187 
7188  void CreateLostAllocation(VmaAllocation* pAllocation);
7189 
7190  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7191  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7192  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7193  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7194  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7195  VkResult BindVulkanBuffer(
7196  VkDeviceMemory memory,
7197  VkDeviceSize memoryOffset,
7198  VkBuffer buffer,
7199  const void* pNext);
7200  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7201  VkResult BindVulkanImage(
7202  VkDeviceMemory memory,
7203  VkDeviceSize memoryOffset,
7204  VkImage image,
7205  const void* pNext);
7206 
7207  VkResult Map(VmaAllocation hAllocation, void** ppData);
7208  void Unmap(VmaAllocation hAllocation);
7209 
7210  VkResult BindBufferMemory(
7211  VmaAllocation hAllocation,
7212  VkDeviceSize allocationLocalOffset,
7213  VkBuffer hBuffer,
7214  const void* pNext);
7215  VkResult BindImageMemory(
7216  VmaAllocation hAllocation,
7217  VkDeviceSize allocationLocalOffset,
7218  VkImage hImage,
7219  const void* pNext);
7220 
7221  void FlushOrInvalidateAllocation(
7222  VmaAllocation hAllocation,
7223  VkDeviceSize offset, VkDeviceSize size,
7224  VMA_CACHE_OPERATION op);
7225 
7226  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7227 
7228  /*
7229  Returns bit mask of memory types that can support defragmentation on GPU as
7230  they support creation of required buffer for copy operations.
7231  */
7232  uint32_t GetGpuDefragmentationMemoryTypeBits();
7233 
7234 private:
7235  VkDeviceSize m_PreferredLargeHeapBlockSize;
7236 
7237  VkPhysicalDevice m_PhysicalDevice;
7238  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7239  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7240 
7241  VMA_RW_MUTEX m_PoolsMutex;
7242  // Protected by m_PoolsMutex. Sorted by pointer value.
7243  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7244  uint32_t m_NextPoolId;
7245 
7246  VmaVulkanFunctions m_VulkanFunctions;
7247 
7248 #if VMA_RECORDING_ENABLED
7249  VmaRecorder* m_pRecorder;
7250 #endif
7251 
7252  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7253 
7254  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7255 
7256  VkResult AllocateMemoryOfType(
7257  VkDeviceSize size,
7258  VkDeviceSize alignment,
7259  bool dedicatedAllocation,
7260  VkBuffer dedicatedBuffer,
7261  VkImage dedicatedImage,
7262  const VmaAllocationCreateInfo& createInfo,
7263  uint32_t memTypeIndex,
7264  VmaSuballocationType suballocType,
7265  size_t allocationCount,
7266  VmaAllocation* pAllocations);
7267 
7268  // Helper function only to be used inside AllocateDedicatedMemory.
7269  VkResult AllocateDedicatedMemoryPage(
7270  VkDeviceSize size,
7271  VmaSuballocationType suballocType,
7272  uint32_t memTypeIndex,
7273  const VkMemoryAllocateInfo& allocInfo,
7274  bool map,
7275  bool isUserDataString,
7276  void* pUserData,
7277  VmaAllocation* pAllocation);
7278 
7279  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7280  VkResult AllocateDedicatedMemory(
7281  VkDeviceSize size,
7282  VmaSuballocationType suballocType,
7283  uint32_t memTypeIndex,
7284  bool withinBudget,
7285  bool map,
7286  bool isUserDataString,
7287  void* pUserData,
7288  VkBuffer dedicatedBuffer,
7289  VkImage dedicatedImage,
7290  size_t allocationCount,
7291  VmaAllocation* pAllocations);
7292 
7293  void FreeDedicatedMemory(const VmaAllocation allocation);
7294 
7295  /*
7296  Calculates and returns bit mask of memory types that can support defragmentation
7297  on GPU as they support creation of required buffer for copy operations.
7298  */
7299  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7300 
7301 #if VMA_MEMORY_BUDGET
7302  void UpdateVulkanBudget();
7303 #endif // #if VMA_MEMORY_BUDGET
7304 };
7305 
7307 // Memory allocation #2 after VmaAllocator_T definition
7308 
7309 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7310 {
7311  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7312 }
7313 
7314 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7315 {
7316  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7317 }
7318 
7319 template<typename T>
7320 static T* VmaAllocate(VmaAllocator hAllocator)
7321 {
7322  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7323 }
7324 
7325 template<typename T>
7326 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7327 {
7328  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7329 }
7330 
7331 template<typename T>
7332 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7333 {
7334  if(ptr != VMA_NULL)
7335  {
7336  ptr->~T();
7337  VmaFree(hAllocator, ptr);
7338  }
7339 }
7340 
7341 template<typename T>
7342 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7343 {
7344  if(ptr != VMA_NULL)
7345  {
7346  for(size_t i = count; i--; )
7347  ptr[i].~T();
7348  VmaFree(hAllocator, ptr);
7349  }
7350 }
7351 
7353 // VmaStringBuilder
7354 
7355 #if VMA_STATS_STRING_ENABLED
7356 
7357 class VmaStringBuilder
7358 {
7359 public:
7360  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7361  size_t GetLength() const { return m_Data.size(); }
7362  const char* GetData() const { return m_Data.data(); }
7363 
7364  void Add(char ch) { m_Data.push_back(ch); }
7365  void Add(const char* pStr);
7366  void AddNewLine() { Add('\n'); }
7367  void AddNumber(uint32_t num);
7368  void AddNumber(uint64_t num);
7369  void AddPointer(const void* ptr);
7370 
7371 private:
7372  VmaVector< char, VmaStlAllocator<char> > m_Data;
7373 };
7374 
7375 void VmaStringBuilder::Add(const char* pStr)
7376 {
7377  const size_t strLen = strlen(pStr);
7378  if(strLen > 0)
7379  {
7380  const size_t oldCount = m_Data.size();
7381  m_Data.resize(oldCount + strLen);
7382  memcpy(m_Data.data() + oldCount, pStr, strLen);
7383  }
7384 }
7385 
7386 void VmaStringBuilder::AddNumber(uint32_t num)
7387 {
7388  char buf[11];
7389  buf[10] = '\0';
7390  char *p = &buf[10];
7391  do
7392  {
7393  *--p = '0' + (num % 10);
7394  num /= 10;
7395  }
7396  while(num);
7397  Add(p);
7398 }
7399 
7400 void VmaStringBuilder::AddNumber(uint64_t num)
7401 {
7402  char buf[21];
7403  buf[20] = '\0';
7404  char *p = &buf[20];
7405  do
7406  {
7407  *--p = '0' + (num % 10);
7408  num /= 10;
7409  }
7410  while(num);
7411  Add(p);
7412 }
7413 
7414 void VmaStringBuilder::AddPointer(const void* ptr)
7415 {
7416  char buf[21];
7417  VmaPtrToStr(buf, sizeof(buf), ptr);
7418  Add(buf);
7419 }
7420 
7421 #endif // #if VMA_STATS_STRING_ENABLED
7422 
7424 // VmaJsonWriter
7425 
7426 #if VMA_STATS_STRING_ENABLED
7427 
7428 class VmaJsonWriter
7429 {
7430  VMA_CLASS_NO_COPY(VmaJsonWriter)
7431 public:
7432  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7433  ~VmaJsonWriter();
7434 
7435  void BeginObject(bool singleLine = false);
7436  void EndObject();
7437 
7438  void BeginArray(bool singleLine = false);
7439  void EndArray();
7440 
7441  void WriteString(const char* pStr);
7442  void BeginString(const char* pStr = VMA_NULL);
7443  void ContinueString(const char* pStr);
7444  void ContinueString(uint32_t n);
7445  void ContinueString(uint64_t n);
7446  void ContinueString_Pointer(const void* ptr);
7447  void EndString(const char* pStr = VMA_NULL);
7448 
7449  void WriteNumber(uint32_t n);
7450  void WriteNumber(uint64_t n);
7451  void WriteBool(bool b);
7452  void WriteNull();
7453 
7454 private:
7455  static const char* const INDENT;
7456 
7457  enum COLLECTION_TYPE
7458  {
7459  COLLECTION_TYPE_OBJECT,
7460  COLLECTION_TYPE_ARRAY,
7461  };
7462  struct StackItem
7463  {
7464  COLLECTION_TYPE type;
7465  uint32_t valueCount;
7466  bool singleLineMode;
7467  };
7468 
7469  VmaStringBuilder& m_SB;
7470  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7471  bool m_InsideString;
7472 
7473  void BeginValue(bool isString);
7474  void WriteIndent(bool oneLess = false);
7475 };
7476 
7477 const char* const VmaJsonWriter::INDENT = " ";
7478 
7479 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7480  m_SB(sb),
7481  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7482  m_InsideString(false)
7483 {
7484 }
7485 
7486 VmaJsonWriter::~VmaJsonWriter()
7487 {
7488  VMA_ASSERT(!m_InsideString);
7489  VMA_ASSERT(m_Stack.empty());
7490 }
7491 
7492 void VmaJsonWriter::BeginObject(bool singleLine)
7493 {
7494  VMA_ASSERT(!m_InsideString);
7495 
7496  BeginValue(false);
7497  m_SB.Add('{');
7498 
7499  StackItem item;
7500  item.type = COLLECTION_TYPE_OBJECT;
7501  item.valueCount = 0;
7502  item.singleLineMode = singleLine;
7503  m_Stack.push_back(item);
7504 }
7505 
7506 void VmaJsonWriter::EndObject()
7507 {
7508  VMA_ASSERT(!m_InsideString);
7509 
7510  WriteIndent(true);
7511  m_SB.Add('}');
7512 
7513  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7514  m_Stack.pop_back();
7515 }
7516 
7517 void VmaJsonWriter::BeginArray(bool singleLine)
7518 {
7519  VMA_ASSERT(!m_InsideString);
7520 
7521  BeginValue(false);
7522  m_SB.Add('[');
7523 
7524  StackItem item;
7525  item.type = COLLECTION_TYPE_ARRAY;
7526  item.valueCount = 0;
7527  item.singleLineMode = singleLine;
7528  m_Stack.push_back(item);
7529 }
7530 
7531 void VmaJsonWriter::EndArray()
7532 {
7533  VMA_ASSERT(!m_InsideString);
7534 
7535  WriteIndent(true);
7536  m_SB.Add(']');
7537 
7538  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7539  m_Stack.pop_back();
7540 }
7541 
7542 void VmaJsonWriter::WriteString(const char* pStr)
7543 {
7544  BeginString(pStr);
7545  EndString();
7546 }
7547 
7548 void VmaJsonWriter::BeginString(const char* pStr)
7549 {
7550  VMA_ASSERT(!m_InsideString);
7551 
7552  BeginValue(true);
7553  m_SB.Add('"');
7554  m_InsideString = true;
7555  if(pStr != VMA_NULL && pStr[0] != '\0')
7556  {
7557  ContinueString(pStr);
7558  }
7559 }
7560 
7561 void VmaJsonWriter::ContinueString(const char* pStr)
7562 {
7563  VMA_ASSERT(m_InsideString);
7564 
7565  const size_t strLen = strlen(pStr);
7566  for(size_t i = 0; i < strLen; ++i)
7567  {
7568  char ch = pStr[i];
7569  if(ch == '\\')
7570  {
7571  m_SB.Add("\\\\");
7572  }
7573  else if(ch == '"')
7574  {
7575  m_SB.Add("\\\"");
7576  }
7577  else if(ch >= 32)
7578  {
7579  m_SB.Add(ch);
7580  }
7581  else switch(ch)
7582  {
7583  case '\b':
7584  m_SB.Add("\\b");
7585  break;
7586  case '\f':
7587  m_SB.Add("\\f");
7588  break;
7589  case '\n':
7590  m_SB.Add("\\n");
7591  break;
7592  case '\r':
7593  m_SB.Add("\\r");
7594  break;
7595  case '\t':
7596  m_SB.Add("\\t");
7597  break;
7598  default:
7599  VMA_ASSERT(0 && "Character not currently supported.");
7600  break;
7601  }
7602  }
7603 }
7604 
7605 void VmaJsonWriter::ContinueString(uint32_t n)
7606 {
7607  VMA_ASSERT(m_InsideString);
7608  m_SB.AddNumber(n);
7609 }
7610 
7611 void VmaJsonWriter::ContinueString(uint64_t n)
7612 {
7613  VMA_ASSERT(m_InsideString);
7614  m_SB.AddNumber(n);
7615 }
7616 
7617 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7618 {
7619  VMA_ASSERT(m_InsideString);
7620  m_SB.AddPointer(ptr);
7621 }
7622 
7623 void VmaJsonWriter::EndString(const char* pStr)
7624 {
7625  VMA_ASSERT(m_InsideString);
7626  if(pStr != VMA_NULL && pStr[0] != '\0')
7627  {
7628  ContinueString(pStr);
7629  }
7630  m_SB.Add('"');
7631  m_InsideString = false;
7632 }
7633 
7634 void VmaJsonWriter::WriteNumber(uint32_t n)
7635 {
7636  VMA_ASSERT(!m_InsideString);
7637  BeginValue(false);
7638  m_SB.AddNumber(n);
7639 }
7640 
7641 void VmaJsonWriter::WriteNumber(uint64_t n)
7642 {
7643  VMA_ASSERT(!m_InsideString);
7644  BeginValue(false);
7645  m_SB.AddNumber(n);
7646 }
7647 
7648 void VmaJsonWriter::WriteBool(bool b)
7649 {
7650  VMA_ASSERT(!m_InsideString);
7651  BeginValue(false);
7652  m_SB.Add(b ? "true" : "false");
7653 }
7654 
7655 void VmaJsonWriter::WriteNull()
7656 {
7657  VMA_ASSERT(!m_InsideString);
7658  BeginValue(false);
7659  m_SB.Add("null");
7660 }
7661 
7662 void VmaJsonWriter::BeginValue(bool isString)
7663 {
7664  if(!m_Stack.empty())
7665  {
7666  StackItem& currItem = m_Stack.back();
7667  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7668  currItem.valueCount % 2 == 0)
7669  {
7670  VMA_ASSERT(isString);
7671  }
7672 
7673  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7674  currItem.valueCount % 2 != 0)
7675  {
7676  m_SB.Add(": ");
7677  }
7678  else if(currItem.valueCount > 0)
7679  {
7680  m_SB.Add(", ");
7681  WriteIndent();
7682  }
7683  else
7684  {
7685  WriteIndent();
7686  }
7687  ++currItem.valueCount;
7688  }
7689 }
7690 
7691 void VmaJsonWriter::WriteIndent(bool oneLess)
7692 {
7693  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7694  {
7695  m_SB.AddNewLine();
7696 
7697  size_t count = m_Stack.size();
7698  if(count > 0 && oneLess)
7699  {
7700  --count;
7701  }
7702  for(size_t i = 0; i < count; ++i)
7703  {
7704  m_SB.Add(INDENT);
7705  }
7706  }
7707 }
7708 
7709 #endif // #if VMA_STATS_STRING_ENABLED
7710 
7712 
7713 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7714 {
7715  if(IsUserDataString())
7716  {
7717  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7718 
7719  FreeUserDataString(hAllocator);
7720 
7721  if(pUserData != VMA_NULL)
7722  {
7723  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
7724  }
7725  }
7726  else
7727  {
7728  m_pUserData = pUserData;
7729  }
7730 }
7731 
7732 void VmaAllocation_T::ChangeBlockAllocation(
7733  VmaAllocator hAllocator,
7734  VmaDeviceMemoryBlock* block,
7735  VkDeviceSize offset)
7736 {
7737  VMA_ASSERT(block != VMA_NULL);
7738  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7739 
7740  // Move mapping reference counter from old block to new block.
7741  if(block != m_BlockAllocation.m_Block)
7742  {
7743  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7744  if(IsPersistentMap())
7745  ++mapRefCount;
7746  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7747  block->Map(hAllocator, mapRefCount, VMA_NULL);
7748  }
7749 
7750  m_BlockAllocation.m_Block = block;
7751  m_BlockAllocation.m_Offset = offset;
7752 }
7753 
7754 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7755 {
7756  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7757  m_BlockAllocation.m_Offset = newOffset;
7758 }
7759 
7760 VkDeviceSize VmaAllocation_T::GetOffset() const
7761 {
7762  switch(m_Type)
7763  {
7764  case ALLOCATION_TYPE_BLOCK:
7765  return m_BlockAllocation.m_Offset;
7766  case ALLOCATION_TYPE_DEDICATED:
7767  return 0;
7768  default:
7769  VMA_ASSERT(0);
7770  return 0;
7771  }
7772 }
7773 
7774 VkDeviceMemory VmaAllocation_T::GetMemory() const
7775 {
7776  switch(m_Type)
7777  {
7778  case ALLOCATION_TYPE_BLOCK:
7779  return m_BlockAllocation.m_Block->GetDeviceMemory();
7780  case ALLOCATION_TYPE_DEDICATED:
7781  return m_DedicatedAllocation.m_hMemory;
7782  default:
7783  VMA_ASSERT(0);
7784  return VK_NULL_HANDLE;
7785  }
7786 }
7787 
7788 void* VmaAllocation_T::GetMappedData() const
7789 {
7790  switch(m_Type)
7791  {
7792  case ALLOCATION_TYPE_BLOCK:
7793  if(m_MapCount != 0)
7794  {
7795  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7796  VMA_ASSERT(pBlockData != VMA_NULL);
7797  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7798  }
7799  else
7800  {
7801  return VMA_NULL;
7802  }
7803  break;
7804  case ALLOCATION_TYPE_DEDICATED:
7805  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7806  return m_DedicatedAllocation.m_pMappedData;
7807  default:
7808  VMA_ASSERT(0);
7809  return VMA_NULL;
7810  }
7811 }
7812 
7813 bool VmaAllocation_T::CanBecomeLost() const
7814 {
7815  switch(m_Type)
7816  {
7817  case ALLOCATION_TYPE_BLOCK:
7818  return m_BlockAllocation.m_CanBecomeLost;
7819  case ALLOCATION_TYPE_DEDICATED:
7820  return false;
7821  default:
7822  VMA_ASSERT(0);
7823  return false;
7824  }
7825 }
7826 
7827 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7828 {
7829  VMA_ASSERT(CanBecomeLost());
7830 
7831  /*
7832  Warning: This is a carefully designed algorithm.
7833  Do not modify unless you really know what you're doing :)
7834  */
7835  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7836  for(;;)
7837  {
7838  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7839  {
7840  VMA_ASSERT(0);
7841  return false;
7842  }
7843  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7844  {
7845  return false;
7846  }
7847  else // Last use time earlier than current time.
7848  {
7849  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7850  {
7851  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7852  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7853  return true;
7854  }
7855  }
7856  }
7857 }
7858 
7859 #if VMA_STATS_STRING_ENABLED
7860 
7861 // Correspond to values of enum VmaSuballocationType.
7862 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7863  "FREE",
7864  "UNKNOWN",
7865  "BUFFER",
7866  "IMAGE_UNKNOWN",
7867  "IMAGE_LINEAR",
7868  "IMAGE_OPTIMAL",
7869 };
7870 
7871 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7872 {
7873  json.WriteString("Type");
7874  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7875 
7876  json.WriteString("Size");
7877  json.WriteNumber(m_Size);
7878 
7879  if(m_pUserData != VMA_NULL)
7880  {
7881  json.WriteString("UserData");
7882  if(IsUserDataString())
7883  {
7884  json.WriteString((const char*)m_pUserData);
7885  }
7886  else
7887  {
7888  json.BeginString();
7889  json.ContinueString_Pointer(m_pUserData);
7890  json.EndString();
7891  }
7892  }
7893 
7894  json.WriteString("CreationFrameIndex");
7895  json.WriteNumber(m_CreationFrameIndex);
7896 
7897  json.WriteString("LastUseFrameIndex");
7898  json.WriteNumber(GetLastUseFrameIndex());
7899 
7900  if(m_BufferImageUsage != 0)
7901  {
7902  json.WriteString("Usage");
7903  json.WriteNumber(m_BufferImageUsage);
7904  }
7905 }
7906 
7907 #endif
7908 
7909 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7910 {
7911  VMA_ASSERT(IsUserDataString());
7912  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
7913  m_pUserData = VMA_NULL;
7914 }
7915 
7916 void VmaAllocation_T::BlockAllocMap()
7917 {
7918  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7919 
7920  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7921  {
7922  ++m_MapCount;
7923  }
7924  else
7925  {
7926  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7927  }
7928 }
7929 
7930 void VmaAllocation_T::BlockAllocUnmap()
7931 {
7932  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7933 
7934  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7935  {
7936  --m_MapCount;
7937  }
7938  else
7939  {
7940  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7941  }
7942 }
7943 
7944 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7945 {
7946  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7947 
7948  if(m_MapCount != 0)
7949  {
7950  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7951  {
7952  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7953  *ppData = m_DedicatedAllocation.m_pMappedData;
7954  ++m_MapCount;
7955  return VK_SUCCESS;
7956  }
7957  else
7958  {
7959  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7960  return VK_ERROR_MEMORY_MAP_FAILED;
7961  }
7962  }
7963  else
7964  {
7965  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7966  hAllocator->m_hDevice,
7967  m_DedicatedAllocation.m_hMemory,
7968  0, // offset
7969  VK_WHOLE_SIZE,
7970  0, // flags
7971  ppData);
7972  if(result == VK_SUCCESS)
7973  {
7974  m_DedicatedAllocation.m_pMappedData = *ppData;
7975  m_MapCount = 1;
7976  }
7977  return result;
7978  }
7979 }
7980 
7981 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7982 {
7983  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7984 
7985  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7986  {
7987  --m_MapCount;
7988  if(m_MapCount == 0)
7989  {
7990  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7991  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7992  hAllocator->m_hDevice,
7993  m_DedicatedAllocation.m_hMemory);
7994  }
7995  }
7996  else
7997  {
7998  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7999  }
8000 }
8001 
8002 #if VMA_STATS_STRING_ENABLED
8003 
8004 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8005 {
8006  json.BeginObject();
8007 
8008  json.WriteString("Blocks");
8009  json.WriteNumber(stat.blockCount);
8010 
8011  json.WriteString("Allocations");
8012  json.WriteNumber(stat.allocationCount);
8013 
8014  json.WriteString("UnusedRanges");
8015  json.WriteNumber(stat.unusedRangeCount);
8016 
8017  json.WriteString("UsedBytes");
8018  json.WriteNumber(stat.usedBytes);
8019 
8020  json.WriteString("UnusedBytes");
8021  json.WriteNumber(stat.unusedBytes);
8022 
8023  if(stat.allocationCount > 1)
8024  {
8025  json.WriteString("AllocationSize");
8026  json.BeginObject(true);
8027  json.WriteString("Min");
8028  json.WriteNumber(stat.allocationSizeMin);
8029  json.WriteString("Avg");
8030  json.WriteNumber(stat.allocationSizeAvg);
8031  json.WriteString("Max");
8032  json.WriteNumber(stat.allocationSizeMax);
8033  json.EndObject();
8034  }
8035 
8036  if(stat.unusedRangeCount > 1)
8037  {
8038  json.WriteString("UnusedRangeSize");
8039  json.BeginObject(true);
8040  json.WriteString("Min");
8041  json.WriteNumber(stat.unusedRangeSizeMin);
8042  json.WriteString("Avg");
8043  json.WriteNumber(stat.unusedRangeSizeAvg);
8044  json.WriteString("Max");
8045  json.WriteNumber(stat.unusedRangeSizeMax);
8046  json.EndObject();
8047  }
8048 
8049  json.EndObject();
8050 }
8051 
8052 #endif // #if VMA_STATS_STRING_ENABLED
8053 
8054 struct VmaSuballocationItemSizeLess
8055 {
8056  bool operator()(
8057  const VmaSuballocationList::iterator lhs,
8058  const VmaSuballocationList::iterator rhs) const
8059  {
8060  return lhs->size < rhs->size;
8061  }
8062  bool operator()(
8063  const VmaSuballocationList::iterator lhs,
8064  VkDeviceSize rhsSize) const
8065  {
8066  return lhs->size < rhsSize;
8067  }
8068 };
8069 
8070 
8072 // class VmaBlockMetadata
8073 
8074 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8075  m_Size(0),
8076  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8077 {
8078 }
8079 
8080 #if VMA_STATS_STRING_ENABLED
8081 
8082 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8083  VkDeviceSize unusedBytes,
8084  size_t allocationCount,
8085  size_t unusedRangeCount) const
8086 {
8087  json.BeginObject();
8088 
8089  json.WriteString("TotalBytes");
8090  json.WriteNumber(GetSize());
8091 
8092  json.WriteString("UnusedBytes");
8093  json.WriteNumber(unusedBytes);
8094 
8095  json.WriteString("Allocations");
8096  json.WriteNumber((uint64_t)allocationCount);
8097 
8098  json.WriteString("UnusedRanges");
8099  json.WriteNumber((uint64_t)unusedRangeCount);
8100 
8101  json.WriteString("Suballocations");
8102  json.BeginArray();
8103 }
8104 
8105 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8106  VkDeviceSize offset,
8107  VmaAllocation hAllocation) const
8108 {
8109  json.BeginObject(true);
8110 
8111  json.WriteString("Offset");
8112  json.WriteNumber(offset);
8113 
8114  hAllocation->PrintParameters(json);
8115 
8116  json.EndObject();
8117 }
8118 
8119 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8120  VkDeviceSize offset,
8121  VkDeviceSize size) const
8122 {
8123  json.BeginObject(true);
8124 
8125  json.WriteString("Offset");
8126  json.WriteNumber(offset);
8127 
8128  json.WriteString("Type");
8129  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8130 
8131  json.WriteString("Size");
8132  json.WriteNumber(size);
8133 
8134  json.EndObject();
8135 }
8136 
8137 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8138 {
8139  json.EndArray();
8140  json.EndObject();
8141 }
8142 
8143 #endif // #if VMA_STATS_STRING_ENABLED
8144 
8146 // class VmaBlockMetadata_Generic
8147 
8148 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8149  VmaBlockMetadata(hAllocator),
8150  m_FreeCount(0),
8151  m_SumFreeSize(0),
8152  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8153  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8154 {
8155 }
8156 
8157 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8158 {
8159 }
8160 
8161 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8162 {
8163  VmaBlockMetadata::Init(size);
8164 
8165  m_FreeCount = 1;
8166  m_SumFreeSize = size;
8167 
8168  VmaSuballocation suballoc = {};
8169  suballoc.offset = 0;
8170  suballoc.size = size;
8171  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8172  suballoc.hAllocation = VK_NULL_HANDLE;
8173 
8174  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8175  m_Suballocations.push_back(suballoc);
8176  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8177  --suballocItem;
8178  m_FreeSuballocationsBySize.push_back(suballocItem);
8179 }
8180 
8181 bool VmaBlockMetadata_Generic::Validate() const
8182 {
8183  VMA_VALIDATE(!m_Suballocations.empty());
8184 
8185  // Expected offset of new suballocation as calculated from previous ones.
8186  VkDeviceSize calculatedOffset = 0;
8187  // Expected number of free suballocations as calculated from traversing their list.
8188  uint32_t calculatedFreeCount = 0;
8189  // Expected sum size of free suballocations as calculated from traversing their list.
8190  VkDeviceSize calculatedSumFreeSize = 0;
8191  // Expected number of free suballocations that should be registered in
8192  // m_FreeSuballocationsBySize calculated from traversing their list.
8193  size_t freeSuballocationsToRegister = 0;
8194  // True if previous visited suballocation was free.
8195  bool prevFree = false;
8196 
8197  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8198  suballocItem != m_Suballocations.cend();
8199  ++suballocItem)
8200  {
8201  const VmaSuballocation& subAlloc = *suballocItem;
8202 
8203  // Actual offset of this suballocation doesn't match expected one.
8204  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8205 
8206  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8207  // Two adjacent free suballocations are invalid. They should be merged.
8208  VMA_VALIDATE(!prevFree || !currFree);
8209 
8210  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8211 
8212  if(currFree)
8213  {
8214  calculatedSumFreeSize += subAlloc.size;
8215  ++calculatedFreeCount;
8216  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8217  {
8218  ++freeSuballocationsToRegister;
8219  }
8220 
8221  // Margin required between allocations - every free space must be at least that large.
8222  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8223  }
8224  else
8225  {
8226  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8227  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8228 
8229  // Margin required between allocations - previous allocation must be free.
8230  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8231  }
8232 
8233  calculatedOffset += subAlloc.size;
8234  prevFree = currFree;
8235  }
8236 
8237  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8238  // match expected one.
8239  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8240 
8241  VkDeviceSize lastSize = 0;
8242  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8243  {
8244  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8245 
8246  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8247  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8248  // They must be sorted by size ascending.
8249  VMA_VALIDATE(suballocItem->size >= lastSize);
8250 
8251  lastSize = suballocItem->size;
8252  }
8253 
8254  // Check if totals match calculacted values.
8255  VMA_VALIDATE(ValidateFreeSuballocationList());
8256  VMA_VALIDATE(calculatedOffset == GetSize());
8257  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8258  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8259 
8260  return true;
8261 }
8262 
8263 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8264 {
8265  if(!m_FreeSuballocationsBySize.empty())
8266  {
8267  return m_FreeSuballocationsBySize.back()->size;
8268  }
8269  else
8270  {
8271  return 0;
8272  }
8273 }
8274 
8275 bool VmaBlockMetadata_Generic::IsEmpty() const
8276 {
8277  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8278 }
8279 
8280 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8281 {
8282  outInfo.blockCount = 1;
8283 
8284  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8285  outInfo.allocationCount = rangeCount - m_FreeCount;
8286  outInfo.unusedRangeCount = m_FreeCount;
8287 
8288  outInfo.unusedBytes = m_SumFreeSize;
8289  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8290 
8291  outInfo.allocationSizeMin = UINT64_MAX;
8292  outInfo.allocationSizeMax = 0;
8293  outInfo.unusedRangeSizeMin = UINT64_MAX;
8294  outInfo.unusedRangeSizeMax = 0;
8295 
8296  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8297  suballocItem != m_Suballocations.cend();
8298  ++suballocItem)
8299  {
8300  const VmaSuballocation& suballoc = *suballocItem;
8301  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8302  {
8303  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8304  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8305  }
8306  else
8307  {
8308  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8309  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8310  }
8311  }
8312 }
8313 
8314 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8315 {
8316  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8317 
8318  inoutStats.size += GetSize();
8319  inoutStats.unusedSize += m_SumFreeSize;
8320  inoutStats.allocationCount += rangeCount - m_FreeCount;
8321  inoutStats.unusedRangeCount += m_FreeCount;
8322  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8323 }
8324 
8325 #if VMA_STATS_STRING_ENABLED
8326 
8327 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8328 {
8329  PrintDetailedMap_Begin(json,
8330  m_SumFreeSize, // unusedBytes
8331  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8332  m_FreeCount); // unusedRangeCount
8333 
8334  size_t i = 0;
8335  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8336  suballocItem != m_Suballocations.cend();
8337  ++suballocItem, ++i)
8338  {
8339  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8340  {
8341  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8342  }
8343  else
8344  {
8345  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8346  }
8347  }
8348 
8349  PrintDetailedMap_End(json);
8350 }
8351 
8352 #endif // #if VMA_STATS_STRING_ENABLED
8353 
8354 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8355  uint32_t currentFrameIndex,
8356  uint32_t frameInUseCount,
8357  VkDeviceSize bufferImageGranularity,
8358  VkDeviceSize allocSize,
8359  VkDeviceSize allocAlignment,
8360  bool upperAddress,
8361  VmaSuballocationType allocType,
8362  bool canMakeOtherLost,
8363  uint32_t strategy,
8364  VmaAllocationRequest* pAllocationRequest)
8365 {
8366  VMA_ASSERT(allocSize > 0);
8367  VMA_ASSERT(!upperAddress);
8368  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8369  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8370  VMA_HEAVY_ASSERT(Validate());
8371 
8372  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8373 
8374  // There is not enough total free space in this block to fullfill the request: Early return.
8375  if(canMakeOtherLost == false &&
8376  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8377  {
8378  return false;
8379  }
8380 
8381  // New algorithm, efficiently searching freeSuballocationsBySize.
8382  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8383  if(freeSuballocCount > 0)
8384  {
8386  {
8387  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8388  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8389  m_FreeSuballocationsBySize.data(),
8390  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8391  allocSize + 2 * VMA_DEBUG_MARGIN,
8392  VmaSuballocationItemSizeLess());
8393  size_t index = it - m_FreeSuballocationsBySize.data();
8394  for(; index < freeSuballocCount; ++index)
8395  {
8396  if(CheckAllocation(
8397  currentFrameIndex,
8398  frameInUseCount,
8399  bufferImageGranularity,
8400  allocSize,
8401  allocAlignment,
8402  allocType,
8403  m_FreeSuballocationsBySize[index],
8404  false, // canMakeOtherLost
8405  &pAllocationRequest->offset,
8406  &pAllocationRequest->itemsToMakeLostCount,
8407  &pAllocationRequest->sumFreeSize,
8408  &pAllocationRequest->sumItemSize))
8409  {
8410  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8411  return true;
8412  }
8413  }
8414  }
8415  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8416  {
8417  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8418  it != m_Suballocations.end();
8419  ++it)
8420  {
8421  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8422  currentFrameIndex,
8423  frameInUseCount,
8424  bufferImageGranularity,
8425  allocSize,
8426  allocAlignment,
8427  allocType,
8428  it,
8429  false, // canMakeOtherLost
8430  &pAllocationRequest->offset,
8431  &pAllocationRequest->itemsToMakeLostCount,
8432  &pAllocationRequest->sumFreeSize,
8433  &pAllocationRequest->sumItemSize))
8434  {
8435  pAllocationRequest->item = it;
8436  return true;
8437  }
8438  }
8439  }
8440  else // WORST_FIT, FIRST_FIT
8441  {
8442  // Search staring from biggest suballocations.
8443  for(size_t index = freeSuballocCount; index--; )
8444  {
8445  if(CheckAllocation(
8446  currentFrameIndex,
8447  frameInUseCount,
8448  bufferImageGranularity,
8449  allocSize,
8450  allocAlignment,
8451  allocType,
8452  m_FreeSuballocationsBySize[index],
8453  false, // canMakeOtherLost
8454  &pAllocationRequest->offset,
8455  &pAllocationRequest->itemsToMakeLostCount,
8456  &pAllocationRequest->sumFreeSize,
8457  &pAllocationRequest->sumItemSize))
8458  {
8459  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8460  return true;
8461  }
8462  }
8463  }
8464  }
8465 
8466  if(canMakeOtherLost)
8467  {
8468  // Brute-force algorithm. TODO: Come up with something better.
8469 
8470  bool found = false;
8471  VmaAllocationRequest tmpAllocRequest = {};
8472  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8473  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8474  suballocIt != m_Suballocations.end();
8475  ++suballocIt)
8476  {
8477  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8478  suballocIt->hAllocation->CanBecomeLost())
8479  {
8480  if(CheckAllocation(
8481  currentFrameIndex,
8482  frameInUseCount,
8483  bufferImageGranularity,
8484  allocSize,
8485  allocAlignment,
8486  allocType,
8487  suballocIt,
8488  canMakeOtherLost,
8489  &tmpAllocRequest.offset,
8490  &tmpAllocRequest.itemsToMakeLostCount,
8491  &tmpAllocRequest.sumFreeSize,
8492  &tmpAllocRequest.sumItemSize))
8493  {
8495  {
8496  *pAllocationRequest = tmpAllocRequest;
8497  pAllocationRequest->item = suballocIt;
8498  break;
8499  }
8500  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8501  {
8502  *pAllocationRequest = tmpAllocRequest;
8503  pAllocationRequest->item = suballocIt;
8504  found = true;
8505  }
8506  }
8507  }
8508  }
8509 
8510  return found;
8511  }
8512 
8513  return false;
8514 }
8515 
8516 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8517  uint32_t currentFrameIndex,
8518  uint32_t frameInUseCount,
8519  VmaAllocationRequest* pAllocationRequest)
8520 {
8521  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8522 
8523  while(pAllocationRequest->itemsToMakeLostCount > 0)
8524  {
8525  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8526  {
8527  ++pAllocationRequest->item;
8528  }
8529  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8530  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8531  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8532  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8533  {
8534  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8535  --pAllocationRequest->itemsToMakeLostCount;
8536  }
8537  else
8538  {
8539  return false;
8540  }
8541  }
8542 
8543  VMA_HEAVY_ASSERT(Validate());
8544  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8545  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8546 
8547  return true;
8548 }
8549 
8550 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8551 {
8552  uint32_t lostAllocationCount = 0;
8553  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8554  it != m_Suballocations.end();
8555  ++it)
8556  {
8557  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8558  it->hAllocation->CanBecomeLost() &&
8559  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8560  {
8561  it = FreeSuballocation(it);
8562  ++lostAllocationCount;
8563  }
8564  }
8565  return lostAllocationCount;
8566 }
8567 
8568 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8569 {
8570  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8571  it != m_Suballocations.end();
8572  ++it)
8573  {
8574  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8575  {
8576  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8577  {
8578  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8579  return VK_ERROR_VALIDATION_FAILED_EXT;
8580  }
8581  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8582  {
8583  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8584  return VK_ERROR_VALIDATION_FAILED_EXT;
8585  }
8586  }
8587  }
8588 
8589  return VK_SUCCESS;
8590 }
8591 
8592 void VmaBlockMetadata_Generic::Alloc(
8593  const VmaAllocationRequest& request,
8594  VmaSuballocationType type,
8595  VkDeviceSize allocSize,
8596  VmaAllocation hAllocation)
8597 {
8598  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8599  VMA_ASSERT(request.item != m_Suballocations.end());
8600  VmaSuballocation& suballoc = *request.item;
8601  // Given suballocation is a free block.
8602  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8603  // Given offset is inside this suballocation.
8604  VMA_ASSERT(request.offset >= suballoc.offset);
8605  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8606  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8607  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8608 
8609  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8610  // it to become used.
8611  UnregisterFreeSuballocation(request.item);
8612 
8613  suballoc.offset = request.offset;
8614  suballoc.size = allocSize;
8615  suballoc.type = type;
8616  suballoc.hAllocation = hAllocation;
8617 
8618  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8619  if(paddingEnd)
8620  {
8621  VmaSuballocation paddingSuballoc = {};
8622  paddingSuballoc.offset = request.offset + allocSize;
8623  paddingSuballoc.size = paddingEnd;
8624  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8625  VmaSuballocationList::iterator next = request.item;
8626  ++next;
8627  const VmaSuballocationList::iterator paddingEndItem =
8628  m_Suballocations.insert(next, paddingSuballoc);
8629  RegisterFreeSuballocation(paddingEndItem);
8630  }
8631 
8632  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8633  if(paddingBegin)
8634  {
8635  VmaSuballocation paddingSuballoc = {};
8636  paddingSuballoc.offset = request.offset - paddingBegin;
8637  paddingSuballoc.size = paddingBegin;
8638  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8639  const VmaSuballocationList::iterator paddingBeginItem =
8640  m_Suballocations.insert(request.item, paddingSuballoc);
8641  RegisterFreeSuballocation(paddingBeginItem);
8642  }
8643 
8644  // Update totals.
8645  m_FreeCount = m_FreeCount - 1;
8646  if(paddingBegin > 0)
8647  {
8648  ++m_FreeCount;
8649  }
8650  if(paddingEnd > 0)
8651  {
8652  ++m_FreeCount;
8653  }
8654  m_SumFreeSize -= allocSize;
8655 }
8656 
8657 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8658 {
8659  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8660  suballocItem != m_Suballocations.end();
8661  ++suballocItem)
8662  {
8663  VmaSuballocation& suballoc = *suballocItem;
8664  if(suballoc.hAllocation == allocation)
8665  {
8666  FreeSuballocation(suballocItem);
8667  VMA_HEAVY_ASSERT(Validate());
8668  return;
8669  }
8670  }
8671  VMA_ASSERT(0 && "Not found!");
8672 }
8673 
8674 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8675 {
8676  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8677  suballocItem != m_Suballocations.end();
8678  ++suballocItem)
8679  {
8680  VmaSuballocation& suballoc = *suballocItem;
8681  if(suballoc.offset == offset)
8682  {
8683  FreeSuballocation(suballocItem);
8684  return;
8685  }
8686  }
8687  VMA_ASSERT(0 && "Not found!");
8688 }
8689 
8690 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8691 {
8692  VkDeviceSize lastSize = 0;
8693  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8694  {
8695  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8696 
8697  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8698  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8699  VMA_VALIDATE(it->size >= lastSize);
8700  lastSize = it->size;
8701  }
8702  return true;
8703 }
8704 
8705 bool VmaBlockMetadata_Generic::CheckAllocation(
8706  uint32_t currentFrameIndex,
8707  uint32_t frameInUseCount,
8708  VkDeviceSize bufferImageGranularity,
8709  VkDeviceSize allocSize,
8710  VkDeviceSize allocAlignment,
8711  VmaSuballocationType allocType,
8712  VmaSuballocationList::const_iterator suballocItem,
8713  bool canMakeOtherLost,
8714  VkDeviceSize* pOffset,
8715  size_t* itemsToMakeLostCount,
8716  VkDeviceSize* pSumFreeSize,
8717  VkDeviceSize* pSumItemSize) const
8718 {
8719  VMA_ASSERT(allocSize > 0);
8720  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8721  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8722  VMA_ASSERT(pOffset != VMA_NULL);
8723 
8724  *itemsToMakeLostCount = 0;
8725  *pSumFreeSize = 0;
8726  *pSumItemSize = 0;
8727 
8728  if(canMakeOtherLost)
8729  {
8730  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8731  {
8732  *pSumFreeSize = suballocItem->size;
8733  }
8734  else
8735  {
8736  if(suballocItem->hAllocation->CanBecomeLost() &&
8737  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8738  {
8739  ++*itemsToMakeLostCount;
8740  *pSumItemSize = suballocItem->size;
8741  }
8742  else
8743  {
8744  return false;
8745  }
8746  }
8747 
8748  // Remaining size is too small for this request: Early return.
8749  if(GetSize() - suballocItem->offset < allocSize)
8750  {
8751  return false;
8752  }
8753 
8754  // Start from offset equal to beginning of this suballocation.
8755  *pOffset = suballocItem->offset;
8756 
8757  // Apply VMA_DEBUG_MARGIN at the beginning.
8758  if(VMA_DEBUG_MARGIN > 0)
8759  {
8760  *pOffset += VMA_DEBUG_MARGIN;
8761  }
8762 
8763  // Apply alignment.
8764  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8765 
8766  // Check previous suballocations for BufferImageGranularity conflicts.
8767  // Make bigger alignment if necessary.
8768  if(bufferImageGranularity > 1)
8769  {
8770  bool bufferImageGranularityConflict = false;
8771  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8772  while(prevSuballocItem != m_Suballocations.cbegin())
8773  {
8774  --prevSuballocItem;
8775  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8776  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8777  {
8778  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8779  {
8780  bufferImageGranularityConflict = true;
8781  break;
8782  }
8783  }
8784  else
8785  // Already on previous page.
8786  break;
8787  }
8788  if(bufferImageGranularityConflict)
8789  {
8790  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8791  }
8792  }
8793 
8794  // Now that we have final *pOffset, check if we are past suballocItem.
8795  // If yes, return false - this function should be called for another suballocItem as starting point.
8796  if(*pOffset >= suballocItem->offset + suballocItem->size)
8797  {
8798  return false;
8799  }
8800 
8801  // Calculate padding at the beginning based on current offset.
8802  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8803 
8804  // Calculate required margin at the end.
8805  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8806 
8807  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8808  // Another early return check.
8809  if(suballocItem->offset + totalSize > GetSize())
8810  {
8811  return false;
8812  }
8813 
8814  // Advance lastSuballocItem until desired size is reached.
8815  // Update itemsToMakeLostCount.
8816  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8817  if(totalSize > suballocItem->size)
8818  {
8819  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8820  while(remainingSize > 0)
8821  {
8822  ++lastSuballocItem;
8823  if(lastSuballocItem == m_Suballocations.cend())
8824  {
8825  return false;
8826  }
8827  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8828  {
8829  *pSumFreeSize += lastSuballocItem->size;
8830  }
8831  else
8832  {
8833  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8834  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8835  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8836  {
8837  ++*itemsToMakeLostCount;
8838  *pSumItemSize += lastSuballocItem->size;
8839  }
8840  else
8841  {
8842  return false;
8843  }
8844  }
8845  remainingSize = (lastSuballocItem->size < remainingSize) ?
8846  remainingSize - lastSuballocItem->size : 0;
8847  }
8848  }
8849 
8850  // Check next suballocations for BufferImageGranularity conflicts.
8851  // If conflict exists, we must mark more allocations lost or fail.
8852  if(bufferImageGranularity > 1)
8853  {
8854  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8855  ++nextSuballocItem;
8856  while(nextSuballocItem != m_Suballocations.cend())
8857  {
8858  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8859  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8860  {
8861  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8862  {
8863  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8864  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8865  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8866  {
8867  ++*itemsToMakeLostCount;
8868  }
8869  else
8870  {
8871  return false;
8872  }
8873  }
8874  }
8875  else
8876  {
8877  // Already on next page.
8878  break;
8879  }
8880  ++nextSuballocItem;
8881  }
8882  }
8883  }
8884  else
8885  {
8886  const VmaSuballocation& suballoc = *suballocItem;
8887  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8888 
8889  *pSumFreeSize = suballoc.size;
8890 
8891  // Size of this suballocation is too small for this request: Early return.
8892  if(suballoc.size < allocSize)
8893  {
8894  return false;
8895  }
8896 
8897  // Start from offset equal to beginning of this suballocation.
8898  *pOffset = suballoc.offset;
8899 
8900  // Apply VMA_DEBUG_MARGIN at the beginning.
8901  if(VMA_DEBUG_MARGIN > 0)
8902  {
8903  *pOffset += VMA_DEBUG_MARGIN;
8904  }
8905 
8906  // Apply alignment.
8907  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8908 
8909  // Check previous suballocations for BufferImageGranularity conflicts.
8910  // Make bigger alignment if necessary.
8911  if(bufferImageGranularity > 1)
8912  {
8913  bool bufferImageGranularityConflict = false;
8914  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8915  while(prevSuballocItem != m_Suballocations.cbegin())
8916  {
8917  --prevSuballocItem;
8918  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8919  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8920  {
8921  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8922  {
8923  bufferImageGranularityConflict = true;
8924  break;
8925  }
8926  }
8927  else
8928  // Already on previous page.
8929  break;
8930  }
8931  if(bufferImageGranularityConflict)
8932  {
8933  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8934  }
8935  }
8936 
8937  // Calculate padding at the beginning based on current offset.
8938  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8939 
8940  // Calculate required margin at the end.
8941  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8942 
8943  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8944  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8945  {
8946  return false;
8947  }
8948 
8949  // Check next suballocations for BufferImageGranularity conflicts.
8950  // If conflict exists, allocation cannot be made here.
8951  if(bufferImageGranularity > 1)
8952  {
8953  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8954  ++nextSuballocItem;
8955  while(nextSuballocItem != m_Suballocations.cend())
8956  {
8957  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8958  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8959  {
8960  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8961  {
8962  return false;
8963  }
8964  }
8965  else
8966  {
8967  // Already on next page.
8968  break;
8969  }
8970  ++nextSuballocItem;
8971  }
8972  }
8973  }
8974 
8975  // All tests passed: Success. pOffset is already filled.
8976  return true;
8977 }
8978 
8979 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8980 {
8981  VMA_ASSERT(item != m_Suballocations.end());
8982  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8983 
8984  VmaSuballocationList::iterator nextItem = item;
8985  ++nextItem;
8986  VMA_ASSERT(nextItem != m_Suballocations.end());
8987  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8988 
8989  item->size += nextItem->size;
8990  --m_FreeCount;
8991  m_Suballocations.erase(nextItem);
8992 }
8993 
8994 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8995 {
8996  // Change this suballocation to be marked as free.
8997  VmaSuballocation& suballoc = *suballocItem;
8998  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8999  suballoc.hAllocation = VK_NULL_HANDLE;
9000 
9001  // Update totals.
9002  ++m_FreeCount;
9003  m_SumFreeSize += suballoc.size;
9004 
9005  // Merge with previous and/or next suballocation if it's also free.
9006  bool mergeWithNext = false;
9007  bool mergeWithPrev = false;
9008 
9009  VmaSuballocationList::iterator nextItem = suballocItem;
9010  ++nextItem;
9011  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9012  {
9013  mergeWithNext = true;
9014  }
9015 
9016  VmaSuballocationList::iterator prevItem = suballocItem;
9017  if(suballocItem != m_Suballocations.begin())
9018  {
9019  --prevItem;
9020  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9021  {
9022  mergeWithPrev = true;
9023  }
9024  }
9025 
9026  if(mergeWithNext)
9027  {
9028  UnregisterFreeSuballocation(nextItem);
9029  MergeFreeWithNext(suballocItem);
9030  }
9031 
9032  if(mergeWithPrev)
9033  {
9034  UnregisterFreeSuballocation(prevItem);
9035  MergeFreeWithNext(prevItem);
9036  RegisterFreeSuballocation(prevItem);
9037  return prevItem;
9038  }
9039  else
9040  {
9041  RegisterFreeSuballocation(suballocItem);
9042  return suballocItem;
9043  }
9044 }
9045 
9046 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9047 {
9048  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9049  VMA_ASSERT(item->size > 0);
9050 
9051  // You may want to enable this validation at the beginning or at the end of
9052  // this function, depending on what do you want to check.
9053  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9054 
9055  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9056  {
9057  if(m_FreeSuballocationsBySize.empty())
9058  {
9059  m_FreeSuballocationsBySize.push_back(item);
9060  }
9061  else
9062  {
9063  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9064  }
9065  }
9066 
9067  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9068 }
9069 
9070 
9071 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9072 {
9073  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9074  VMA_ASSERT(item->size > 0);
9075 
9076  // You may want to enable this validation at the beginning or at the end of
9077  // this function, depending on what do you want to check.
9078  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9079 
9080  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9081  {
9082  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9083  m_FreeSuballocationsBySize.data(),
9084  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9085  item,
9086  VmaSuballocationItemSizeLess());
9087  for(size_t index = it - m_FreeSuballocationsBySize.data();
9088  index < m_FreeSuballocationsBySize.size();
9089  ++index)
9090  {
9091  if(m_FreeSuballocationsBySize[index] == item)
9092  {
9093  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9094  return;
9095  }
9096  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9097  }
9098  VMA_ASSERT(0 && "Not found.");
9099  }
9100 
9101  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9102 }
9103 
9104 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9105  VkDeviceSize bufferImageGranularity,
9106  VmaSuballocationType& inOutPrevSuballocType) const
9107 {
9108  if(bufferImageGranularity == 1 || IsEmpty())
9109  {
9110  return false;
9111  }
9112 
9113  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9114  bool typeConflictFound = false;
9115  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9116  it != m_Suballocations.cend();
9117  ++it)
9118  {
9119  const VmaSuballocationType suballocType = it->type;
9120  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9121  {
9122  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9123  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9124  {
9125  typeConflictFound = true;
9126  }
9127  inOutPrevSuballocType = suballocType;
9128  }
9129  }
9130 
9131  return typeConflictFound || minAlignment >= bufferImageGranularity;
9132 }
9133 
9135 // class VmaBlockMetadata_Linear
9136 
9137 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9138  VmaBlockMetadata(hAllocator),
9139  m_SumFreeSize(0),
9140  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9141  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9142  m_1stVectorIndex(0),
9143  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9144  m_1stNullItemsBeginCount(0),
9145  m_1stNullItemsMiddleCount(0),
9146  m_2ndNullItemsCount(0)
9147 {
9148 }
9149 
9150 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9151 {
9152 }
9153 
9154 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9155 {
9156  VmaBlockMetadata::Init(size);
9157  m_SumFreeSize = size;
9158 }
9159 
9160 bool VmaBlockMetadata_Linear::Validate() const
9161 {
9162  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9163  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9164 
9165  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9166  VMA_VALIDATE(!suballocations1st.empty() ||
9167  suballocations2nd.empty() ||
9168  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9169 
9170  if(!suballocations1st.empty())
9171  {
9172  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9173  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9174  // Null item at the end should be just pop_back().
9175  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9176  }
9177  if(!suballocations2nd.empty())
9178  {
9179  // Null item at the end should be just pop_back().
9180  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9181  }
9182 
9183  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9184  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9185 
9186  VkDeviceSize sumUsedSize = 0;
9187  const size_t suballoc1stCount = suballocations1st.size();
9188  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9189 
9190  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9191  {
9192  const size_t suballoc2ndCount = suballocations2nd.size();
9193  size_t nullItem2ndCount = 0;
9194  for(size_t i = 0; i < suballoc2ndCount; ++i)
9195  {
9196  const VmaSuballocation& suballoc = suballocations2nd[i];
9197  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9198 
9199  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9200  VMA_VALIDATE(suballoc.offset >= offset);
9201 
9202  if(!currFree)
9203  {
9204  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9205  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9206  sumUsedSize += suballoc.size;
9207  }
9208  else
9209  {
9210  ++nullItem2ndCount;
9211  }
9212 
9213  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9214  }
9215 
9216  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9217  }
9218 
9219  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9220  {
9221  const VmaSuballocation& suballoc = suballocations1st[i];
9222  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9223  suballoc.hAllocation == VK_NULL_HANDLE);
9224  }
9225 
9226  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9227 
9228  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9229  {
9230  const VmaSuballocation& suballoc = suballocations1st[i];
9231  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9232 
9233  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9234  VMA_VALIDATE(suballoc.offset >= offset);
9235  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9236 
9237  if(!currFree)
9238  {
9239  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9240  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9241  sumUsedSize += suballoc.size;
9242  }
9243  else
9244  {
9245  ++nullItem1stCount;
9246  }
9247 
9248  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9249  }
9250  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9251 
9252  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9253  {
9254  const size_t suballoc2ndCount = suballocations2nd.size();
9255  size_t nullItem2ndCount = 0;
9256  for(size_t i = suballoc2ndCount; i--; )
9257  {
9258  const VmaSuballocation& suballoc = suballocations2nd[i];
9259  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9260 
9261  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9262  VMA_VALIDATE(suballoc.offset >= offset);
9263 
9264  if(!currFree)
9265  {
9266  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9267  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9268  sumUsedSize += suballoc.size;
9269  }
9270  else
9271  {
9272  ++nullItem2ndCount;
9273  }
9274 
9275  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9276  }
9277 
9278  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9279  }
9280 
9281  VMA_VALIDATE(offset <= GetSize());
9282  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9283 
9284  return true;
9285 }
9286 
9287 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9288 {
9289  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9290  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9291 }
9292 
9293 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9294 {
9295  const VkDeviceSize size = GetSize();
9296 
9297  /*
9298  We don't consider gaps inside allocation vectors with freed allocations because
9299  they are not suitable for reuse in linear allocator. We consider only space that
9300  is available for new allocations.
9301  */
9302  if(IsEmpty())
9303  {
9304  return size;
9305  }
9306 
9307  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9308 
9309  switch(m_2ndVectorMode)
9310  {
9311  case SECOND_VECTOR_EMPTY:
9312  /*
9313  Available space is after end of 1st, as well as before beginning of 1st (which
9314  whould make it a ring buffer).
9315  */
9316  {
9317  const size_t suballocations1stCount = suballocations1st.size();
9318  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9319  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9320  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9321  return VMA_MAX(
9322  firstSuballoc.offset,
9323  size - (lastSuballoc.offset + lastSuballoc.size));
9324  }
9325  break;
9326 
9327  case SECOND_VECTOR_RING_BUFFER:
9328  /*
9329  Available space is only between end of 2nd and beginning of 1st.
9330  */
9331  {
9332  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9333  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9334  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9335  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9336  }
9337  break;
9338 
9339  case SECOND_VECTOR_DOUBLE_STACK:
9340  /*
9341  Available space is only between end of 1st and top of 2nd.
9342  */
9343  {
9344  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9345  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9346  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9347  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9348  }
9349  break;
9350 
9351  default:
9352  VMA_ASSERT(0);
9353  return 0;
9354  }
9355 }
9356 
9357 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9358 {
9359  const VkDeviceSize size = GetSize();
9360  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9361  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9362  const size_t suballoc1stCount = suballocations1st.size();
9363  const size_t suballoc2ndCount = suballocations2nd.size();
9364 
9365  outInfo.blockCount = 1;
9366  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9367  outInfo.unusedRangeCount = 0;
9368  outInfo.usedBytes = 0;
9369  outInfo.allocationSizeMin = UINT64_MAX;
9370  outInfo.allocationSizeMax = 0;
9371  outInfo.unusedRangeSizeMin = UINT64_MAX;
9372  outInfo.unusedRangeSizeMax = 0;
9373 
9374  VkDeviceSize lastOffset = 0;
9375 
9376  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9377  {
9378  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9379  size_t nextAlloc2ndIndex = 0;
9380  while(lastOffset < freeSpace2ndTo1stEnd)
9381  {
9382  // Find next non-null allocation or move nextAllocIndex to the end.
9383  while(nextAlloc2ndIndex < suballoc2ndCount &&
9384  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9385  {
9386  ++nextAlloc2ndIndex;
9387  }
9388 
9389  // Found non-null allocation.
9390  if(nextAlloc2ndIndex < suballoc2ndCount)
9391  {
9392  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9393 
9394  // 1. Process free space before this allocation.
9395  if(lastOffset < suballoc.offset)
9396  {
9397  // There is free space from lastOffset to suballoc.offset.
9398  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9399  ++outInfo.unusedRangeCount;
9400  outInfo.unusedBytes += unusedRangeSize;
9401  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9402  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9403  }
9404 
9405  // 2. Process this allocation.
9406  // There is allocation with suballoc.offset, suballoc.size.
9407  outInfo.usedBytes += suballoc.size;
9408  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9409  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9410 
9411  // 3. Prepare for next iteration.
9412  lastOffset = suballoc.offset + suballoc.size;
9413  ++nextAlloc2ndIndex;
9414  }
9415  // We are at the end.
9416  else
9417  {
9418  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9419  if(lastOffset < freeSpace2ndTo1stEnd)
9420  {
9421  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9422  ++outInfo.unusedRangeCount;
9423  outInfo.unusedBytes += unusedRangeSize;
9424  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9425  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9426  }
9427 
9428  // End of loop.
9429  lastOffset = freeSpace2ndTo1stEnd;
9430  }
9431  }
9432  }
9433 
9434  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9435  const VkDeviceSize freeSpace1stTo2ndEnd =
9436  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9437  while(lastOffset < freeSpace1stTo2ndEnd)
9438  {
9439  // Find next non-null allocation or move nextAllocIndex to the end.
9440  while(nextAlloc1stIndex < suballoc1stCount &&
9441  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9442  {
9443  ++nextAlloc1stIndex;
9444  }
9445 
9446  // Found non-null allocation.
9447  if(nextAlloc1stIndex < suballoc1stCount)
9448  {
9449  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9450 
9451  // 1. Process free space before this allocation.
9452  if(lastOffset < suballoc.offset)
9453  {
9454  // There is free space from lastOffset to suballoc.offset.
9455  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9456  ++outInfo.unusedRangeCount;
9457  outInfo.unusedBytes += unusedRangeSize;
9458  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9459  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9460  }
9461 
9462  // 2. Process this allocation.
9463  // There is allocation with suballoc.offset, suballoc.size.
9464  outInfo.usedBytes += suballoc.size;
9465  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9466  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9467 
9468  // 3. Prepare for next iteration.
9469  lastOffset = suballoc.offset + suballoc.size;
9470  ++nextAlloc1stIndex;
9471  }
9472  // We are at the end.
9473  else
9474  {
9475  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9476  if(lastOffset < freeSpace1stTo2ndEnd)
9477  {
9478  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9479  ++outInfo.unusedRangeCount;
9480  outInfo.unusedBytes += unusedRangeSize;
9481  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9482  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9483  }
9484 
9485  // End of loop.
9486  lastOffset = freeSpace1stTo2ndEnd;
9487  }
9488  }
9489 
9490  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9491  {
9492  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9493  while(lastOffset < size)
9494  {
9495  // Find next non-null allocation or move nextAllocIndex to the end.
9496  while(nextAlloc2ndIndex != SIZE_MAX &&
9497  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9498  {
9499  --nextAlloc2ndIndex;
9500  }
9501 
9502  // Found non-null allocation.
9503  if(nextAlloc2ndIndex != SIZE_MAX)
9504  {
9505  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9506 
9507  // 1. Process free space before this allocation.
9508  if(lastOffset < suballoc.offset)
9509  {
9510  // There is free space from lastOffset to suballoc.offset.
9511  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9512  ++outInfo.unusedRangeCount;
9513  outInfo.unusedBytes += unusedRangeSize;
9514  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9515  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9516  }
9517 
9518  // 2. Process this allocation.
9519  // There is allocation with suballoc.offset, suballoc.size.
9520  outInfo.usedBytes += suballoc.size;
9521  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9522  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9523 
9524  // 3. Prepare for next iteration.
9525  lastOffset = suballoc.offset + suballoc.size;
9526  --nextAlloc2ndIndex;
9527  }
9528  // We are at the end.
9529  else
9530  {
9531  // There is free space from lastOffset to size.
9532  if(lastOffset < size)
9533  {
9534  const VkDeviceSize unusedRangeSize = size - lastOffset;
9535  ++outInfo.unusedRangeCount;
9536  outInfo.unusedBytes += unusedRangeSize;
9537  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9538  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9539  }
9540 
9541  // End of loop.
9542  lastOffset = size;
9543  }
9544  }
9545  }
9546 
9547  outInfo.unusedBytes = size - outInfo.usedBytes;
9548 }
9549 
9550 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9551 {
9552  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9553  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9554  const VkDeviceSize size = GetSize();
9555  const size_t suballoc1stCount = suballocations1st.size();
9556  const size_t suballoc2ndCount = suballocations2nd.size();
9557 
9558  inoutStats.size += size;
9559 
9560  VkDeviceSize lastOffset = 0;
9561 
9562  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9563  {
9564  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9565  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9566  while(lastOffset < freeSpace2ndTo1stEnd)
9567  {
9568  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9569  while(nextAlloc2ndIndex < suballoc2ndCount &&
9570  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9571  {
9572  ++nextAlloc2ndIndex;
9573  }
9574 
9575  // Found non-null allocation.
9576  if(nextAlloc2ndIndex < suballoc2ndCount)
9577  {
9578  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9579 
9580  // 1. Process free space before this allocation.
9581  if(lastOffset < suballoc.offset)
9582  {
9583  // There is free space from lastOffset to suballoc.offset.
9584  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9585  inoutStats.unusedSize += unusedRangeSize;
9586  ++inoutStats.unusedRangeCount;
9587  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9588  }
9589 
9590  // 2. Process this allocation.
9591  // There is allocation with suballoc.offset, suballoc.size.
9592  ++inoutStats.allocationCount;
9593 
9594  // 3. Prepare for next iteration.
9595  lastOffset = suballoc.offset + suballoc.size;
9596  ++nextAlloc2ndIndex;
9597  }
9598  // We are at the end.
9599  else
9600  {
9601  if(lastOffset < freeSpace2ndTo1stEnd)
9602  {
9603  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9604  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9605  inoutStats.unusedSize += unusedRangeSize;
9606  ++inoutStats.unusedRangeCount;
9607  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9608  }
9609 
9610  // End of loop.
9611  lastOffset = freeSpace2ndTo1stEnd;
9612  }
9613  }
9614  }
9615 
9616  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9617  const VkDeviceSize freeSpace1stTo2ndEnd =
9618  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9619  while(lastOffset < freeSpace1stTo2ndEnd)
9620  {
9621  // Find next non-null allocation or move nextAllocIndex to the end.
9622  while(nextAlloc1stIndex < suballoc1stCount &&
9623  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9624  {
9625  ++nextAlloc1stIndex;
9626  }
9627 
9628  // Found non-null allocation.
9629  if(nextAlloc1stIndex < suballoc1stCount)
9630  {
9631  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9632 
9633  // 1. Process free space before this allocation.
9634  if(lastOffset < suballoc.offset)
9635  {
9636  // There is free space from lastOffset to suballoc.offset.
9637  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9638  inoutStats.unusedSize += unusedRangeSize;
9639  ++inoutStats.unusedRangeCount;
9640  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9641  }
9642 
9643  // 2. Process this allocation.
9644  // There is allocation with suballoc.offset, suballoc.size.
9645  ++inoutStats.allocationCount;
9646 
9647  // 3. Prepare for next iteration.
9648  lastOffset = suballoc.offset + suballoc.size;
9649  ++nextAlloc1stIndex;
9650  }
9651  // We are at the end.
9652  else
9653  {
9654  if(lastOffset < freeSpace1stTo2ndEnd)
9655  {
9656  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9657  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9658  inoutStats.unusedSize += unusedRangeSize;
9659  ++inoutStats.unusedRangeCount;
9660  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9661  }
9662 
9663  // End of loop.
9664  lastOffset = freeSpace1stTo2ndEnd;
9665  }
9666  }
9667 
9668  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9669  {
9670  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9671  while(lastOffset < size)
9672  {
9673  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9674  while(nextAlloc2ndIndex != SIZE_MAX &&
9675  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9676  {
9677  --nextAlloc2ndIndex;
9678  }
9679 
9680  // Found non-null allocation.
9681  if(nextAlloc2ndIndex != SIZE_MAX)
9682  {
9683  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9684 
9685  // 1. Process free space before this allocation.
9686  if(lastOffset < suballoc.offset)
9687  {
9688  // There is free space from lastOffset to suballoc.offset.
9689  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9690  inoutStats.unusedSize += unusedRangeSize;
9691  ++inoutStats.unusedRangeCount;
9692  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9693  }
9694 
9695  // 2. Process this allocation.
9696  // There is allocation with suballoc.offset, suballoc.size.
9697  ++inoutStats.allocationCount;
9698 
9699  // 3. Prepare for next iteration.
9700  lastOffset = suballoc.offset + suballoc.size;
9701  --nextAlloc2ndIndex;
9702  }
9703  // We are at the end.
9704  else
9705  {
9706  if(lastOffset < size)
9707  {
9708  // There is free space from lastOffset to size.
9709  const VkDeviceSize unusedRangeSize = size - lastOffset;
9710  inoutStats.unusedSize += unusedRangeSize;
9711  ++inoutStats.unusedRangeCount;
9712  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9713  }
9714 
9715  // End of loop.
9716  lastOffset = size;
9717  }
9718  }
9719  }
9720 }
9721 
9722 #if VMA_STATS_STRING_ENABLED
9723 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9724 {
9725  const VkDeviceSize size = GetSize();
9726  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9727  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9728  const size_t suballoc1stCount = suballocations1st.size();
9729  const size_t suballoc2ndCount = suballocations2nd.size();
9730 
9731  // FIRST PASS
9732 
9733  size_t unusedRangeCount = 0;
9734  VkDeviceSize usedBytes = 0;
9735 
9736  VkDeviceSize lastOffset = 0;
9737 
9738  size_t alloc2ndCount = 0;
9739  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9740  {
9741  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9742  size_t nextAlloc2ndIndex = 0;
9743  while(lastOffset < freeSpace2ndTo1stEnd)
9744  {
9745  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9746  while(nextAlloc2ndIndex < suballoc2ndCount &&
9747  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9748  {
9749  ++nextAlloc2ndIndex;
9750  }
9751 
9752  // Found non-null allocation.
9753  if(nextAlloc2ndIndex < suballoc2ndCount)
9754  {
9755  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9756 
9757  // 1. Process free space before this allocation.
9758  if(lastOffset < suballoc.offset)
9759  {
9760  // There is free space from lastOffset to suballoc.offset.
9761  ++unusedRangeCount;
9762  }
9763 
9764  // 2. Process this allocation.
9765  // There is allocation with suballoc.offset, suballoc.size.
9766  ++alloc2ndCount;
9767  usedBytes += suballoc.size;
9768 
9769  // 3. Prepare for next iteration.
9770  lastOffset = suballoc.offset + suballoc.size;
9771  ++nextAlloc2ndIndex;
9772  }
9773  // We are at the end.
9774  else
9775  {
9776  if(lastOffset < freeSpace2ndTo1stEnd)
9777  {
9778  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9779  ++unusedRangeCount;
9780  }
9781 
9782  // End of loop.
9783  lastOffset = freeSpace2ndTo1stEnd;
9784  }
9785  }
9786  }
9787 
9788  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9789  size_t alloc1stCount = 0;
9790  const VkDeviceSize freeSpace1stTo2ndEnd =
9791  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9792  while(lastOffset < freeSpace1stTo2ndEnd)
9793  {
9794  // Find next non-null allocation or move nextAllocIndex to the end.
9795  while(nextAlloc1stIndex < suballoc1stCount &&
9796  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9797  {
9798  ++nextAlloc1stIndex;
9799  }
9800 
9801  // Found non-null allocation.
9802  if(nextAlloc1stIndex < suballoc1stCount)
9803  {
9804  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9805 
9806  // 1. Process free space before this allocation.
9807  if(lastOffset < suballoc.offset)
9808  {
9809  // There is free space from lastOffset to suballoc.offset.
9810  ++unusedRangeCount;
9811  }
9812 
9813  // 2. Process this allocation.
9814  // There is allocation with suballoc.offset, suballoc.size.
9815  ++alloc1stCount;
9816  usedBytes += suballoc.size;
9817 
9818  // 3. Prepare for next iteration.
9819  lastOffset = suballoc.offset + suballoc.size;
9820  ++nextAlloc1stIndex;
9821  }
9822  // We are at the end.
9823  else
9824  {
9825  if(lastOffset < size)
9826  {
9827  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9828  ++unusedRangeCount;
9829  }
9830 
9831  // End of loop.
9832  lastOffset = freeSpace1stTo2ndEnd;
9833  }
9834  }
9835 
9836  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9837  {
9838  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9839  while(lastOffset < size)
9840  {
9841  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9842  while(nextAlloc2ndIndex != SIZE_MAX &&
9843  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9844  {
9845  --nextAlloc2ndIndex;
9846  }
9847 
9848  // Found non-null allocation.
9849  if(nextAlloc2ndIndex != SIZE_MAX)
9850  {
9851  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9852 
9853  // 1. Process free space before this allocation.
9854  if(lastOffset < suballoc.offset)
9855  {
9856  // There is free space from lastOffset to suballoc.offset.
9857  ++unusedRangeCount;
9858  }
9859 
9860  // 2. Process this allocation.
9861  // There is allocation with suballoc.offset, suballoc.size.
9862  ++alloc2ndCount;
9863  usedBytes += suballoc.size;
9864 
9865  // 3. Prepare for next iteration.
9866  lastOffset = suballoc.offset + suballoc.size;
9867  --nextAlloc2ndIndex;
9868  }
9869  // We are at the end.
9870  else
9871  {
9872  if(lastOffset < size)
9873  {
9874  // There is free space from lastOffset to size.
9875  ++unusedRangeCount;
9876  }
9877 
9878  // End of loop.
9879  lastOffset = size;
9880  }
9881  }
9882  }
9883 
9884  const VkDeviceSize unusedBytes = size - usedBytes;
9885  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9886 
9887  // SECOND PASS
9888  lastOffset = 0;
9889 
9890  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9891  {
9892  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9893  size_t nextAlloc2ndIndex = 0;
9894  while(lastOffset < freeSpace2ndTo1stEnd)
9895  {
9896  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9897  while(nextAlloc2ndIndex < suballoc2ndCount &&
9898  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9899  {
9900  ++nextAlloc2ndIndex;
9901  }
9902 
9903  // Found non-null allocation.
9904  if(nextAlloc2ndIndex < suballoc2ndCount)
9905  {
9906  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9907 
9908  // 1. Process free space before this allocation.
9909  if(lastOffset < suballoc.offset)
9910  {
9911  // There is free space from lastOffset to suballoc.offset.
9912  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9913  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9914  }
9915 
9916  // 2. Process this allocation.
9917  // There is allocation with suballoc.offset, suballoc.size.
9918  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9919 
9920  // 3. Prepare for next iteration.
9921  lastOffset = suballoc.offset + suballoc.size;
9922  ++nextAlloc2ndIndex;
9923  }
9924  // We are at the end.
9925  else
9926  {
9927  if(lastOffset < freeSpace2ndTo1stEnd)
9928  {
9929  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9930  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9931  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9932  }
9933 
9934  // End of loop.
9935  lastOffset = freeSpace2ndTo1stEnd;
9936  }
9937  }
9938  }
9939 
9940  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9941  while(lastOffset < freeSpace1stTo2ndEnd)
9942  {
9943  // Find next non-null allocation or move nextAllocIndex to the end.
9944  while(nextAlloc1stIndex < suballoc1stCount &&
9945  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9946  {
9947  ++nextAlloc1stIndex;
9948  }
9949 
9950  // Found non-null allocation.
9951  if(nextAlloc1stIndex < suballoc1stCount)
9952  {
9953  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9954 
9955  // 1. Process free space before this allocation.
9956  if(lastOffset < suballoc.offset)
9957  {
9958  // There is free space from lastOffset to suballoc.offset.
9959  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9960  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9961  }
9962 
9963  // 2. Process this allocation.
9964  // There is allocation with suballoc.offset, suballoc.size.
9965  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9966 
9967  // 3. Prepare for next iteration.
9968  lastOffset = suballoc.offset + suballoc.size;
9969  ++nextAlloc1stIndex;
9970  }
9971  // We are at the end.
9972  else
9973  {
9974  if(lastOffset < freeSpace1stTo2ndEnd)
9975  {
9976  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9977  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9978  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9979  }
9980 
9981  // End of loop.
9982  lastOffset = freeSpace1stTo2ndEnd;
9983  }
9984  }
9985 
9986  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9987  {
9988  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9989  while(lastOffset < size)
9990  {
9991  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9992  while(nextAlloc2ndIndex != SIZE_MAX &&
9993  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9994  {
9995  --nextAlloc2ndIndex;
9996  }
9997 
9998  // Found non-null allocation.
9999  if(nextAlloc2ndIndex != SIZE_MAX)
10000  {
10001  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10002 
10003  // 1. Process free space before this allocation.
10004  if(lastOffset < suballoc.offset)
10005  {
10006  // There is free space from lastOffset to suballoc.offset.
10007  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10008  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10009  }
10010 
10011  // 2. Process this allocation.
10012  // There is allocation with suballoc.offset, suballoc.size.
10013  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10014 
10015  // 3. Prepare for next iteration.
10016  lastOffset = suballoc.offset + suballoc.size;
10017  --nextAlloc2ndIndex;
10018  }
10019  // We are at the end.
10020  else
10021  {
10022  if(lastOffset < size)
10023  {
10024  // There is free space from lastOffset to size.
10025  const VkDeviceSize unusedRangeSize = size - lastOffset;
10026  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10027  }
10028 
10029  // End of loop.
10030  lastOffset = size;
10031  }
10032  }
10033  }
10034 
10035  PrintDetailedMap_End(json);
10036 }
10037 #endif // #if VMA_STATS_STRING_ENABLED
10038 
10039 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10040  uint32_t currentFrameIndex,
10041  uint32_t frameInUseCount,
10042  VkDeviceSize bufferImageGranularity,
10043  VkDeviceSize allocSize,
10044  VkDeviceSize allocAlignment,
10045  bool upperAddress,
10046  VmaSuballocationType allocType,
10047  bool canMakeOtherLost,
10048  uint32_t strategy,
10049  VmaAllocationRequest* pAllocationRequest)
10050 {
10051  VMA_ASSERT(allocSize > 0);
10052  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10053  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10054  VMA_HEAVY_ASSERT(Validate());
10055  return upperAddress ?
10056  CreateAllocationRequest_UpperAddress(
10057  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10058  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10059  CreateAllocationRequest_LowerAddress(
10060  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10061  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10062 }
10063 
10064 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10065  uint32_t currentFrameIndex,
10066  uint32_t frameInUseCount,
10067  VkDeviceSize bufferImageGranularity,
10068  VkDeviceSize allocSize,
10069  VkDeviceSize allocAlignment,
10070  VmaSuballocationType allocType,
10071  bool canMakeOtherLost,
10072  uint32_t strategy,
10073  VmaAllocationRequest* pAllocationRequest)
10074 {
10075  const VkDeviceSize size = GetSize();
10076  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10077  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10078 
10079  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10080  {
10081  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10082  return false;
10083  }
10084 
10085  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10086  if(allocSize > size)
10087  {
10088  return false;
10089  }
10090  VkDeviceSize resultBaseOffset = size - allocSize;
10091  if(!suballocations2nd.empty())
10092  {
10093  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10094  resultBaseOffset = lastSuballoc.offset - allocSize;
10095  if(allocSize > lastSuballoc.offset)
10096  {
10097  return false;
10098  }
10099  }
10100 
10101  // Start from offset equal to end of free space.
10102  VkDeviceSize resultOffset = resultBaseOffset;
10103 
10104  // Apply VMA_DEBUG_MARGIN at the end.
10105  if(VMA_DEBUG_MARGIN > 0)
10106  {
10107  if(resultOffset < VMA_DEBUG_MARGIN)
10108  {
10109  return false;
10110  }
10111  resultOffset -= VMA_DEBUG_MARGIN;
10112  }
10113 
10114  // Apply alignment.
10115  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10116 
10117  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10118  // Make bigger alignment if necessary.
10119  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10120  {
10121  bool bufferImageGranularityConflict = false;
10122  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10123  {
10124  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10125  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10126  {
10127  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10128  {
10129  bufferImageGranularityConflict = true;
10130  break;
10131  }
10132  }
10133  else
10134  // Already on previous page.
10135  break;
10136  }
10137  if(bufferImageGranularityConflict)
10138  {
10139  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10140  }
10141  }
10142 
10143  // There is enough free space.
10144  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10145  suballocations1st.back().offset + suballocations1st.back().size :
10146  0;
10147  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10148  {
10149  // Check previous suballocations for BufferImageGranularity conflicts.
10150  // If conflict exists, allocation cannot be made here.
10151  if(bufferImageGranularity > 1)
10152  {
10153  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10154  {
10155  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10156  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10157  {
10158  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10159  {
10160  return false;
10161  }
10162  }
10163  else
10164  {
10165  // Already on next page.
10166  break;
10167  }
10168  }
10169  }
10170 
10171  // All tests passed: Success.
10172  pAllocationRequest->offset = resultOffset;
10173  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10174  pAllocationRequest->sumItemSize = 0;
10175  // pAllocationRequest->item unused.
10176  pAllocationRequest->itemsToMakeLostCount = 0;
10177  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10178  return true;
10179  }
10180 
10181  return false;
10182 }
10183 
10184 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10185  uint32_t currentFrameIndex,
10186  uint32_t frameInUseCount,
10187  VkDeviceSize bufferImageGranularity,
10188  VkDeviceSize allocSize,
10189  VkDeviceSize allocAlignment,
10190  VmaSuballocationType allocType,
10191  bool canMakeOtherLost,
10192  uint32_t strategy,
10193  VmaAllocationRequest* pAllocationRequest)
10194 {
10195  const VkDeviceSize size = GetSize();
10196  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10197  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10198 
10199  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10200  {
10201  // Try to allocate at the end of 1st vector.
10202 
10203  VkDeviceSize resultBaseOffset = 0;
10204  if(!suballocations1st.empty())
10205  {
10206  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10207  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10208  }
10209 
10210  // Start from offset equal to beginning of free space.
10211  VkDeviceSize resultOffset = resultBaseOffset;
10212 
10213  // Apply VMA_DEBUG_MARGIN at the beginning.
10214  if(VMA_DEBUG_MARGIN > 0)
10215  {
10216  resultOffset += VMA_DEBUG_MARGIN;
10217  }
10218 
10219  // Apply alignment.
10220  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10221 
10222  // Check previous suballocations for BufferImageGranularity conflicts.
10223  // Make bigger alignment if necessary.
10224  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10225  {
10226  bool bufferImageGranularityConflict = false;
10227  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10228  {
10229  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10230  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10231  {
10232  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10233  {
10234  bufferImageGranularityConflict = true;
10235  break;
10236  }
10237  }
10238  else
10239  // Already on previous page.
10240  break;
10241  }
10242  if(bufferImageGranularityConflict)
10243  {
10244  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10245  }
10246  }
10247 
10248  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10249  suballocations2nd.back().offset : size;
10250 
10251  // There is enough free space at the end after alignment.
10252  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10253  {
10254  // Check next suballocations for BufferImageGranularity conflicts.
10255  // If conflict exists, allocation cannot be made here.
10256  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10257  {
10258  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10259  {
10260  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10261  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10262  {
10263  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10264  {
10265  return false;
10266  }
10267  }
10268  else
10269  {
10270  // Already on previous page.
10271  break;
10272  }
10273  }
10274  }
10275 
10276  // All tests passed: Success.
10277  pAllocationRequest->offset = resultOffset;
10278  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10279  pAllocationRequest->sumItemSize = 0;
10280  // pAllocationRequest->item, customData unused.
10281  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10282  pAllocationRequest->itemsToMakeLostCount = 0;
10283  return true;
10284  }
10285  }
10286 
10287  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10288  // beginning of 1st vector as the end of free space.
10289  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10290  {
10291  VMA_ASSERT(!suballocations1st.empty());
10292 
10293  VkDeviceSize resultBaseOffset = 0;
10294  if(!suballocations2nd.empty())
10295  {
10296  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10297  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10298  }
10299 
10300  // Start from offset equal to beginning of free space.
10301  VkDeviceSize resultOffset = resultBaseOffset;
10302 
10303  // Apply VMA_DEBUG_MARGIN at the beginning.
10304  if(VMA_DEBUG_MARGIN > 0)
10305  {
10306  resultOffset += VMA_DEBUG_MARGIN;
10307  }
10308 
10309  // Apply alignment.
10310  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10311 
10312  // Check previous suballocations for BufferImageGranularity conflicts.
10313  // Make bigger alignment if necessary.
10314  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10315  {
10316  bool bufferImageGranularityConflict = false;
10317  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10318  {
10319  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10320  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10321  {
10322  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10323  {
10324  bufferImageGranularityConflict = true;
10325  break;
10326  }
10327  }
10328  else
10329  // Already on previous page.
10330  break;
10331  }
10332  if(bufferImageGranularityConflict)
10333  {
10334  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10335  }
10336  }
10337 
10338  pAllocationRequest->itemsToMakeLostCount = 0;
10339  pAllocationRequest->sumItemSize = 0;
10340  size_t index1st = m_1stNullItemsBeginCount;
10341 
10342  if(canMakeOtherLost)
10343  {
10344  while(index1st < suballocations1st.size() &&
10345  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10346  {
10347  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10348  const VmaSuballocation& suballoc = suballocations1st[index1st];
10349  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10350  {
10351  // No problem.
10352  }
10353  else
10354  {
10355  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10356  if(suballoc.hAllocation->CanBecomeLost() &&
10357  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10358  {
10359  ++pAllocationRequest->itemsToMakeLostCount;
10360  pAllocationRequest->sumItemSize += suballoc.size;
10361  }
10362  else
10363  {
10364  return false;
10365  }
10366  }
10367  ++index1st;
10368  }
10369 
10370  // Check next suballocations for BufferImageGranularity conflicts.
10371  // If conflict exists, we must mark more allocations lost or fail.
10372  if(bufferImageGranularity > 1)
10373  {
10374  while(index1st < suballocations1st.size())
10375  {
10376  const VmaSuballocation& suballoc = suballocations1st[index1st];
10377  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10378  {
10379  if(suballoc.hAllocation != VK_NULL_HANDLE)
10380  {
10381  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10382  if(suballoc.hAllocation->CanBecomeLost() &&
10383  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10384  {
10385  ++pAllocationRequest->itemsToMakeLostCount;
10386  pAllocationRequest->sumItemSize += suballoc.size;
10387  }
10388  else
10389  {
10390  return false;
10391  }
10392  }
10393  }
10394  else
10395  {
10396  // Already on next page.
10397  break;
10398  }
10399  ++index1st;
10400  }
10401  }
10402 
10403  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10404  if(index1st == suballocations1st.size() &&
10405  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10406  {
10407  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10408  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10409  }
10410  }
10411 
10412  // There is enough free space at the end after alignment.
10413  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10414  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10415  {
10416  // Check next suballocations for BufferImageGranularity conflicts.
10417  // If conflict exists, allocation cannot be made here.
10418  if(bufferImageGranularity > 1)
10419  {
10420  for(size_t nextSuballocIndex = index1st;
10421  nextSuballocIndex < suballocations1st.size();
10422  nextSuballocIndex++)
10423  {
10424  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10425  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10426  {
10427  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10428  {
10429  return false;
10430  }
10431  }
10432  else
10433  {
10434  // Already on next page.
10435  break;
10436  }
10437  }
10438  }
10439 
10440  // All tests passed: Success.
10441  pAllocationRequest->offset = resultOffset;
10442  pAllocationRequest->sumFreeSize =
10443  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10444  - resultBaseOffset
10445  - pAllocationRequest->sumItemSize;
10446  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10447  // pAllocationRequest->item, customData unused.
10448  return true;
10449  }
10450  }
10451 
10452  return false;
10453 }
10454 
10455 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10456  uint32_t currentFrameIndex,
10457  uint32_t frameInUseCount,
10458  VmaAllocationRequest* pAllocationRequest)
10459 {
10460  if(pAllocationRequest->itemsToMakeLostCount == 0)
10461  {
10462  return true;
10463  }
10464 
10465  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10466 
10467  // We always start from 1st.
10468  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10469  size_t index = m_1stNullItemsBeginCount;
10470  size_t madeLostCount = 0;
10471  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10472  {
10473  if(index == suballocations->size())
10474  {
10475  index = 0;
10476  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10477  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10478  {
10479  suballocations = &AccessSuballocations2nd();
10480  }
10481  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10482  // suballocations continues pointing at AccessSuballocations1st().
10483  VMA_ASSERT(!suballocations->empty());
10484  }
10485  VmaSuballocation& suballoc = (*suballocations)[index];
10486  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10487  {
10488  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10489  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10490  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10491  {
10492  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10493  suballoc.hAllocation = VK_NULL_HANDLE;
10494  m_SumFreeSize += suballoc.size;
10495  if(suballocations == &AccessSuballocations1st())
10496  {
10497  ++m_1stNullItemsMiddleCount;
10498  }
10499  else
10500  {
10501  ++m_2ndNullItemsCount;
10502  }
10503  ++madeLostCount;
10504  }
10505  else
10506  {
10507  return false;
10508  }
10509  }
10510  ++index;
10511  }
10512 
10513  CleanupAfterFree();
10514  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10515 
10516  return true;
10517 }
10518 
10519 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10520 {
10521  uint32_t lostAllocationCount = 0;
10522 
10523  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10524  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10525  {
10526  VmaSuballocation& suballoc = suballocations1st[i];
10527  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10528  suballoc.hAllocation->CanBecomeLost() &&
10529  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10530  {
10531  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10532  suballoc.hAllocation = VK_NULL_HANDLE;
10533  ++m_1stNullItemsMiddleCount;
10534  m_SumFreeSize += suballoc.size;
10535  ++lostAllocationCount;
10536  }
10537  }
10538 
10539  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10540  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10541  {
10542  VmaSuballocation& suballoc = suballocations2nd[i];
10543  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10544  suballoc.hAllocation->CanBecomeLost() &&
10545  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10546  {
10547  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10548  suballoc.hAllocation = VK_NULL_HANDLE;
10549  ++m_2ndNullItemsCount;
10550  m_SumFreeSize += suballoc.size;
10551  ++lostAllocationCount;
10552  }
10553  }
10554 
10555  if(lostAllocationCount)
10556  {
10557  CleanupAfterFree();
10558  }
10559 
10560  return lostAllocationCount;
10561 }
10562 
10563 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10564 {
10565  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10566  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10567  {
10568  const VmaSuballocation& suballoc = suballocations1st[i];
10569  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10570  {
10571  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10572  {
10573  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10574  return VK_ERROR_VALIDATION_FAILED_EXT;
10575  }
10576  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10577  {
10578  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10579  return VK_ERROR_VALIDATION_FAILED_EXT;
10580  }
10581  }
10582  }
10583 
10584  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10585  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10586  {
10587  const VmaSuballocation& suballoc = suballocations2nd[i];
10588  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10589  {
10590  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10591  {
10592  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10593  return VK_ERROR_VALIDATION_FAILED_EXT;
10594  }
10595  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10596  {
10597  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10598  return VK_ERROR_VALIDATION_FAILED_EXT;
10599  }
10600  }
10601  }
10602 
10603  return VK_SUCCESS;
10604 }
10605 
10606 void VmaBlockMetadata_Linear::Alloc(
10607  const VmaAllocationRequest& request,
10608  VmaSuballocationType type,
10609  VkDeviceSize allocSize,
10610  VmaAllocation hAllocation)
10611 {
10612  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10613 
10614  switch(request.type)
10615  {
10616  case VmaAllocationRequestType::UpperAddress:
10617  {
10618  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10619  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10620  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10621  suballocations2nd.push_back(newSuballoc);
10622  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10623  }
10624  break;
10625  case VmaAllocationRequestType::EndOf1st:
10626  {
10627  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10628 
10629  VMA_ASSERT(suballocations1st.empty() ||
10630  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10631  // Check if it fits before the end of the block.
10632  VMA_ASSERT(request.offset + allocSize <= GetSize());
10633 
10634  suballocations1st.push_back(newSuballoc);
10635  }
10636  break;
10637  case VmaAllocationRequestType::EndOf2nd:
10638  {
10639  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10640  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10641  VMA_ASSERT(!suballocations1st.empty() &&
10642  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10643  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10644 
10645  switch(m_2ndVectorMode)
10646  {
10647  case SECOND_VECTOR_EMPTY:
10648  // First allocation from second part ring buffer.
10649  VMA_ASSERT(suballocations2nd.empty());
10650  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10651  break;
10652  case SECOND_VECTOR_RING_BUFFER:
10653  // 2-part ring buffer is already started.
10654  VMA_ASSERT(!suballocations2nd.empty());
10655  break;
10656  case SECOND_VECTOR_DOUBLE_STACK:
10657  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10658  break;
10659  default:
10660  VMA_ASSERT(0);
10661  }
10662 
10663  suballocations2nd.push_back(newSuballoc);
10664  }
10665  break;
10666  default:
10667  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10668  }
10669 
10670  m_SumFreeSize -= newSuballoc.size;
10671 }
10672 
10673 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10674 {
10675  FreeAtOffset(allocation->GetOffset());
10676 }
10677 
10678 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10679 {
10680  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10681  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10682 
10683  if(!suballocations1st.empty())
10684  {
10685  // First allocation: Mark it as next empty at the beginning.
10686  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10687  if(firstSuballoc.offset == offset)
10688  {
10689  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10690  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10691  m_SumFreeSize += firstSuballoc.size;
10692  ++m_1stNullItemsBeginCount;
10693  CleanupAfterFree();
10694  return;
10695  }
10696  }
10697 
10698  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10699  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10700  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10701  {
10702  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10703  if(lastSuballoc.offset == offset)
10704  {
10705  m_SumFreeSize += lastSuballoc.size;
10706  suballocations2nd.pop_back();
10707  CleanupAfterFree();
10708  return;
10709  }
10710  }
10711  // Last allocation in 1st vector.
10712  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10713  {
10714  VmaSuballocation& lastSuballoc = suballocations1st.back();
10715  if(lastSuballoc.offset == offset)
10716  {
10717  m_SumFreeSize += lastSuballoc.size;
10718  suballocations1st.pop_back();
10719  CleanupAfterFree();
10720  return;
10721  }
10722  }
10723 
10724  // Item from the middle of 1st vector.
10725  {
10726  VmaSuballocation refSuballoc;
10727  refSuballoc.offset = offset;
10728  // Rest of members stays uninitialized intentionally for better performance.
10729  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10730  suballocations1st.begin() + m_1stNullItemsBeginCount,
10731  suballocations1st.end(),
10732  refSuballoc,
10733  VmaSuballocationOffsetLess());
10734  if(it != suballocations1st.end())
10735  {
10736  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10737  it->hAllocation = VK_NULL_HANDLE;
10738  ++m_1stNullItemsMiddleCount;
10739  m_SumFreeSize += it->size;
10740  CleanupAfterFree();
10741  return;
10742  }
10743  }
10744 
10745  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10746  {
10747  // Item from the middle of 2nd vector.
10748  VmaSuballocation refSuballoc;
10749  refSuballoc.offset = offset;
10750  // Rest of members stays uninitialized intentionally for better performance.
10751  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10752  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10753  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10754  if(it != suballocations2nd.end())
10755  {
10756  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10757  it->hAllocation = VK_NULL_HANDLE;
10758  ++m_2ndNullItemsCount;
10759  m_SumFreeSize += it->size;
10760  CleanupAfterFree();
10761  return;
10762  }
10763  }
10764 
10765  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10766 }
10767 
10768 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10769 {
10770  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10771  const size_t suballocCount = AccessSuballocations1st().size();
10772  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10773 }
10774 
10775 void VmaBlockMetadata_Linear::CleanupAfterFree()
10776 {
10777  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10778  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10779 
10780  if(IsEmpty())
10781  {
10782  suballocations1st.clear();
10783  suballocations2nd.clear();
10784  m_1stNullItemsBeginCount = 0;
10785  m_1stNullItemsMiddleCount = 0;
10786  m_2ndNullItemsCount = 0;
10787  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10788  }
10789  else
10790  {
10791  const size_t suballoc1stCount = suballocations1st.size();
10792  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10793  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10794 
10795  // Find more null items at the beginning of 1st vector.
10796  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10797  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10798  {
10799  ++m_1stNullItemsBeginCount;
10800  --m_1stNullItemsMiddleCount;
10801  }
10802 
10803  // Find more null items at the end of 1st vector.
10804  while(m_1stNullItemsMiddleCount > 0 &&
10805  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10806  {
10807  --m_1stNullItemsMiddleCount;
10808  suballocations1st.pop_back();
10809  }
10810 
10811  // Find more null items at the end of 2nd vector.
10812  while(m_2ndNullItemsCount > 0 &&
10813  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10814  {
10815  --m_2ndNullItemsCount;
10816  suballocations2nd.pop_back();
10817  }
10818 
10819  // Find more null items at the beginning of 2nd vector.
10820  while(m_2ndNullItemsCount > 0 &&
10821  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10822  {
10823  --m_2ndNullItemsCount;
10824  VmaVectorRemove(suballocations2nd, 0);
10825  }
10826 
10827  if(ShouldCompact1st())
10828  {
10829  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10830  size_t srcIndex = m_1stNullItemsBeginCount;
10831  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10832  {
10833  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10834  {
10835  ++srcIndex;
10836  }
10837  if(dstIndex != srcIndex)
10838  {
10839  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10840  }
10841  ++srcIndex;
10842  }
10843  suballocations1st.resize(nonNullItemCount);
10844  m_1stNullItemsBeginCount = 0;
10845  m_1stNullItemsMiddleCount = 0;
10846  }
10847 
10848  // 2nd vector became empty.
10849  if(suballocations2nd.empty())
10850  {
10851  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10852  }
10853 
10854  // 1st vector became empty.
10855  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10856  {
10857  suballocations1st.clear();
10858  m_1stNullItemsBeginCount = 0;
10859 
10860  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10861  {
10862  // Swap 1st with 2nd. Now 2nd is empty.
10863  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10864  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10865  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10866  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10867  {
10868  ++m_1stNullItemsBeginCount;
10869  --m_1stNullItemsMiddleCount;
10870  }
10871  m_2ndNullItemsCount = 0;
10872  m_1stVectorIndex ^= 1;
10873  }
10874  }
10875  }
10876 
10877  VMA_HEAVY_ASSERT(Validate());
10878 }
10879 
10880 
10882 // class VmaBlockMetadata_Buddy
10883 
10884 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10885  VmaBlockMetadata(hAllocator),
10886  m_Root(VMA_NULL),
10887  m_AllocationCount(0),
10888  m_FreeCount(1),
10889  m_SumFreeSize(0)
10890 {
10891  memset(m_FreeList, 0, sizeof(m_FreeList));
10892 }
10893 
10894 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10895 {
10896  DeleteNode(m_Root);
10897 }
10898 
10899 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10900 {
10901  VmaBlockMetadata::Init(size);
10902 
10903  m_UsableSize = VmaPrevPow2(size);
10904  m_SumFreeSize = m_UsableSize;
10905 
10906  // Calculate m_LevelCount.
10907  m_LevelCount = 1;
10908  while(m_LevelCount < MAX_LEVELS &&
10909  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10910  {
10911  ++m_LevelCount;
10912  }
10913 
10914  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10915  rootNode->offset = 0;
10916  rootNode->type = Node::TYPE_FREE;
10917  rootNode->parent = VMA_NULL;
10918  rootNode->buddy = VMA_NULL;
10919 
10920  m_Root = rootNode;
10921  AddToFreeListFront(0, rootNode);
10922 }
10923 
10924 bool VmaBlockMetadata_Buddy::Validate() const
10925 {
10926  // Validate tree.
10927  ValidationContext ctx;
10928  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10929  {
10930  VMA_VALIDATE(false && "ValidateNode failed.");
10931  }
10932  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10933  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10934 
10935  // Validate free node lists.
10936  for(uint32_t level = 0; level < m_LevelCount; ++level)
10937  {
10938  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10939  m_FreeList[level].front->free.prev == VMA_NULL);
10940 
10941  for(Node* node = m_FreeList[level].front;
10942  node != VMA_NULL;
10943  node = node->free.next)
10944  {
10945  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10946 
10947  if(node->free.next == VMA_NULL)
10948  {
10949  VMA_VALIDATE(m_FreeList[level].back == node);
10950  }
10951  else
10952  {
10953  VMA_VALIDATE(node->free.next->free.prev == node);
10954  }
10955  }
10956  }
10957 
10958  // Validate that free lists ar higher levels are empty.
10959  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10960  {
10961  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10962  }
10963 
10964  return true;
10965 }
10966 
10967 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10968 {
10969  for(uint32_t level = 0; level < m_LevelCount; ++level)
10970  {
10971  if(m_FreeList[level].front != VMA_NULL)
10972  {
10973  return LevelToNodeSize(level);
10974  }
10975  }
10976  return 0;
10977 }
10978 
10979 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10980 {
10981  const VkDeviceSize unusableSize = GetUnusableSize();
10982 
10983  outInfo.blockCount = 1;
10984 
10985  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10986  outInfo.usedBytes = outInfo.unusedBytes = 0;
10987 
10988  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10989  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10990  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10991 
10992  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10993 
10994  if(unusableSize > 0)
10995  {
10996  ++outInfo.unusedRangeCount;
10997  outInfo.unusedBytes += unusableSize;
10998  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10999  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11000  }
11001 }
11002 
11003 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11004 {
11005  const VkDeviceSize unusableSize = GetUnusableSize();
11006 
11007  inoutStats.size += GetSize();
11008  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11009  inoutStats.allocationCount += m_AllocationCount;
11010  inoutStats.unusedRangeCount += m_FreeCount;
11011  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11012 
11013  if(unusableSize > 0)
11014  {
11015  ++inoutStats.unusedRangeCount;
11016  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11017  }
11018 }
11019 
11020 #if VMA_STATS_STRING_ENABLED
11021 
11022 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11023 {
11024  // TODO optimize
11025  VmaStatInfo stat;
11026  CalcAllocationStatInfo(stat);
11027 
11028  PrintDetailedMap_Begin(
11029  json,
11030  stat.unusedBytes,
11031  stat.allocationCount,
11032  stat.unusedRangeCount);
11033 
11034  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11035 
11036  const VkDeviceSize unusableSize = GetUnusableSize();
11037  if(unusableSize > 0)
11038  {
11039  PrintDetailedMap_UnusedRange(json,
11040  m_UsableSize, // offset
11041  unusableSize); // size
11042  }
11043 
11044  PrintDetailedMap_End(json);
11045 }
11046 
11047 #endif // #if VMA_STATS_STRING_ENABLED
11048 
11049 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11050  uint32_t currentFrameIndex,
11051  uint32_t frameInUseCount,
11052  VkDeviceSize bufferImageGranularity,
11053  VkDeviceSize allocSize,
11054  VkDeviceSize allocAlignment,
11055  bool upperAddress,
11056  VmaSuballocationType allocType,
11057  bool canMakeOtherLost,
11058  uint32_t strategy,
11059  VmaAllocationRequest* pAllocationRequest)
11060 {
11061  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11062 
11063  // Simple way to respect bufferImageGranularity. May be optimized some day.
11064  // Whenever it might be an OPTIMAL image...
11065  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11066  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11067  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11068  {
11069  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11070  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11071  }
11072 
11073  if(allocSize > m_UsableSize)
11074  {
11075  return false;
11076  }
11077 
11078  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11079  for(uint32_t level = targetLevel + 1; level--; )
11080  {
11081  for(Node* freeNode = m_FreeList[level].front;
11082  freeNode != VMA_NULL;
11083  freeNode = freeNode->free.next)
11084  {
11085  if(freeNode->offset % allocAlignment == 0)
11086  {
11087  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11088  pAllocationRequest->offset = freeNode->offset;
11089  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11090  pAllocationRequest->sumItemSize = 0;
11091  pAllocationRequest->itemsToMakeLostCount = 0;
11092  pAllocationRequest->customData = (void*)(uintptr_t)level;
11093  return true;
11094  }
11095  }
11096  }
11097 
11098  return false;
11099 }
11100 
11101 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11102  uint32_t currentFrameIndex,
11103  uint32_t frameInUseCount,
11104  VmaAllocationRequest* pAllocationRequest)
11105 {
11106  /*
11107  Lost allocations are not supported in buddy allocator at the moment.
11108  Support might be added in the future.
11109  */
11110  return pAllocationRequest->itemsToMakeLostCount == 0;
11111 }
11112 
11113 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11114 {
11115  /*
11116  Lost allocations are not supported in buddy allocator at the moment.
11117  Support might be added in the future.
11118  */
11119  return 0;
11120 }
11121 
11122 void VmaBlockMetadata_Buddy::Alloc(
11123  const VmaAllocationRequest& request,
11124  VmaSuballocationType type,
11125  VkDeviceSize allocSize,
11126  VmaAllocation hAllocation)
11127 {
11128  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11129 
11130  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11131  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11132 
11133  Node* currNode = m_FreeList[currLevel].front;
11134  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11135  while(currNode->offset != request.offset)
11136  {
11137  currNode = currNode->free.next;
11138  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11139  }
11140 
11141  // Go down, splitting free nodes.
11142  while(currLevel < targetLevel)
11143  {
11144  // currNode is already first free node at currLevel.
11145  // Remove it from list of free nodes at this currLevel.
11146  RemoveFromFreeList(currLevel, currNode);
11147 
11148  const uint32_t childrenLevel = currLevel + 1;
11149 
11150  // Create two free sub-nodes.
11151  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11152  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11153 
11154  leftChild->offset = currNode->offset;
11155  leftChild->type = Node::TYPE_FREE;
11156  leftChild->parent = currNode;
11157  leftChild->buddy = rightChild;
11158 
11159  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11160  rightChild->type = Node::TYPE_FREE;
11161  rightChild->parent = currNode;
11162  rightChild->buddy = leftChild;
11163 
11164  // Convert current currNode to split type.
11165  currNode->type = Node::TYPE_SPLIT;
11166  currNode->split.leftChild = leftChild;
11167 
11168  // Add child nodes to free list. Order is important!
11169  AddToFreeListFront(childrenLevel, rightChild);
11170  AddToFreeListFront(childrenLevel, leftChild);
11171 
11172  ++m_FreeCount;
11173  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11174  ++currLevel;
11175  currNode = m_FreeList[currLevel].front;
11176 
11177  /*
11178  We can be sure that currNode, as left child of node previously split,
11179  also fullfills the alignment requirement.
11180  */
11181  }
11182 
11183  // Remove from free list.
11184  VMA_ASSERT(currLevel == targetLevel &&
11185  currNode != VMA_NULL &&
11186  currNode->type == Node::TYPE_FREE);
11187  RemoveFromFreeList(currLevel, currNode);
11188 
11189  // Convert to allocation node.
11190  currNode->type = Node::TYPE_ALLOCATION;
11191  currNode->allocation.alloc = hAllocation;
11192 
11193  ++m_AllocationCount;
11194  --m_FreeCount;
11195  m_SumFreeSize -= allocSize;
11196 }
11197 
11198 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11199 {
11200  if(node->type == Node::TYPE_SPLIT)
11201  {
11202  DeleteNode(node->split.leftChild->buddy);
11203  DeleteNode(node->split.leftChild);
11204  }
11205 
11206  vma_delete(GetAllocationCallbacks(), node);
11207 }
11208 
11209 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11210 {
11211  VMA_VALIDATE(level < m_LevelCount);
11212  VMA_VALIDATE(curr->parent == parent);
11213  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11214  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11215  switch(curr->type)
11216  {
11217  case Node::TYPE_FREE:
11218  // curr->free.prev, next are validated separately.
11219  ctx.calculatedSumFreeSize += levelNodeSize;
11220  ++ctx.calculatedFreeCount;
11221  break;
11222  case Node::TYPE_ALLOCATION:
11223  ++ctx.calculatedAllocationCount;
11224  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11225  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11226  break;
11227  case Node::TYPE_SPLIT:
11228  {
11229  const uint32_t childrenLevel = level + 1;
11230  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11231  const Node* const leftChild = curr->split.leftChild;
11232  VMA_VALIDATE(leftChild != VMA_NULL);
11233  VMA_VALIDATE(leftChild->offset == curr->offset);
11234  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11235  {
11236  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11237  }
11238  const Node* const rightChild = leftChild->buddy;
11239  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11240  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11241  {
11242  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11243  }
11244  }
11245  break;
11246  default:
11247  return false;
11248  }
11249 
11250  return true;
11251 }
11252 
11253 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11254 {
11255  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11256  uint32_t level = 0;
11257  VkDeviceSize currLevelNodeSize = m_UsableSize;
11258  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11259  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11260  {
11261  ++level;
11262  currLevelNodeSize = nextLevelNodeSize;
11263  nextLevelNodeSize = currLevelNodeSize >> 1;
11264  }
11265  return level;
11266 }
11267 
11268 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11269 {
11270  // Find node and level.
11271  Node* node = m_Root;
11272  VkDeviceSize nodeOffset = 0;
11273  uint32_t level = 0;
11274  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11275  while(node->type == Node::TYPE_SPLIT)
11276  {
11277  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11278  if(offset < nodeOffset + nextLevelSize)
11279  {
11280  node = node->split.leftChild;
11281  }
11282  else
11283  {
11284  node = node->split.leftChild->buddy;
11285  nodeOffset += nextLevelSize;
11286  }
11287  ++level;
11288  levelNodeSize = nextLevelSize;
11289  }
11290 
11291  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11292  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11293 
11294  ++m_FreeCount;
11295  --m_AllocationCount;
11296  m_SumFreeSize += alloc->GetSize();
11297 
11298  node->type = Node::TYPE_FREE;
11299 
11300  // Join free nodes if possible.
11301  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11302  {
11303  RemoveFromFreeList(level, node->buddy);
11304  Node* const parent = node->parent;
11305 
11306  vma_delete(GetAllocationCallbacks(), node->buddy);
11307  vma_delete(GetAllocationCallbacks(), node);
11308  parent->type = Node::TYPE_FREE;
11309 
11310  node = parent;
11311  --level;
11312  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11313  --m_FreeCount;
11314  }
11315 
11316  AddToFreeListFront(level, node);
11317 }
11318 
11319 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11320 {
11321  switch(node->type)
11322  {
11323  case Node::TYPE_FREE:
11324  ++outInfo.unusedRangeCount;
11325  outInfo.unusedBytes += levelNodeSize;
11326  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11327  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11328  break;
11329  case Node::TYPE_ALLOCATION:
11330  {
11331  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11332  ++outInfo.allocationCount;
11333  outInfo.usedBytes += allocSize;
11334  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11335  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11336 
11337  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11338  if(unusedRangeSize > 0)
11339  {
11340  ++outInfo.unusedRangeCount;
11341  outInfo.unusedBytes += unusedRangeSize;
11342  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11343  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11344  }
11345  }
11346  break;
11347  case Node::TYPE_SPLIT:
11348  {
11349  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11350  const Node* const leftChild = node->split.leftChild;
11351  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11352  const Node* const rightChild = leftChild->buddy;
11353  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11354  }
11355  break;
11356  default:
11357  VMA_ASSERT(0);
11358  }
11359 }
11360 
11361 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11362 {
11363  VMA_ASSERT(node->type == Node::TYPE_FREE);
11364 
11365  // List is empty.
11366  Node* const frontNode = m_FreeList[level].front;
11367  if(frontNode == VMA_NULL)
11368  {
11369  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11370  node->free.prev = node->free.next = VMA_NULL;
11371  m_FreeList[level].front = m_FreeList[level].back = node;
11372  }
11373  else
11374  {
11375  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11376  node->free.prev = VMA_NULL;
11377  node->free.next = frontNode;
11378  frontNode->free.prev = node;
11379  m_FreeList[level].front = node;
11380  }
11381 }
11382 
11383 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11384 {
11385  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11386 
11387  // It is at the front.
11388  if(node->free.prev == VMA_NULL)
11389  {
11390  VMA_ASSERT(m_FreeList[level].front == node);
11391  m_FreeList[level].front = node->free.next;
11392  }
11393  else
11394  {
11395  Node* const prevFreeNode = node->free.prev;
11396  VMA_ASSERT(prevFreeNode->free.next == node);
11397  prevFreeNode->free.next = node->free.next;
11398  }
11399 
11400  // It is at the back.
11401  if(node->free.next == VMA_NULL)
11402  {
11403  VMA_ASSERT(m_FreeList[level].back == node);
11404  m_FreeList[level].back = node->free.prev;
11405  }
11406  else
11407  {
11408  Node* const nextFreeNode = node->free.next;
11409  VMA_ASSERT(nextFreeNode->free.prev == node);
11410  nextFreeNode->free.prev = node->free.prev;
11411  }
11412 }
11413 
11414 #if VMA_STATS_STRING_ENABLED
11415 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11416 {
11417  switch(node->type)
11418  {
11419  case Node::TYPE_FREE:
11420  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11421  break;
11422  case Node::TYPE_ALLOCATION:
11423  {
11424  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11425  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11426  if(allocSize < levelNodeSize)
11427  {
11428  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11429  }
11430  }
11431  break;
11432  case Node::TYPE_SPLIT:
11433  {
11434  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11435  const Node* const leftChild = node->split.leftChild;
11436  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11437  const Node* const rightChild = leftChild->buddy;
11438  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11439  }
11440  break;
11441  default:
11442  VMA_ASSERT(0);
11443  }
11444 }
11445 #endif // #if VMA_STATS_STRING_ENABLED
11446 
11447 
11449 // class VmaDeviceMemoryBlock
11450 
11451 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11452  m_pMetadata(VMA_NULL),
11453  m_MemoryTypeIndex(UINT32_MAX),
11454  m_Id(0),
11455  m_hMemory(VK_NULL_HANDLE),
11456  m_MapCount(0),
11457  m_pMappedData(VMA_NULL)
11458 {
11459 }
11460 
11461 void VmaDeviceMemoryBlock::Init(
11462  VmaAllocator hAllocator,
11463  VmaPool hParentPool,
11464  uint32_t newMemoryTypeIndex,
11465  VkDeviceMemory newMemory,
11466  VkDeviceSize newSize,
11467  uint32_t id,
11468  uint32_t algorithm)
11469 {
11470  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11471 
11472  m_hParentPool = hParentPool;
11473  m_MemoryTypeIndex = newMemoryTypeIndex;
11474  m_Id = id;
11475  m_hMemory = newMemory;
11476 
11477  switch(algorithm)
11478  {
11480  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11481  break;
11483  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11484  break;
11485  default:
11486  VMA_ASSERT(0);
11487  // Fall-through.
11488  case 0:
11489  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11490  }
11491  m_pMetadata->Init(newSize);
11492 }
11493 
11494 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11495 {
11496  // This is the most important assert in the entire library.
11497  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11498  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11499 
11500  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11501  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11502  m_hMemory = VK_NULL_HANDLE;
11503 
11504  vma_delete(allocator, m_pMetadata);
11505  m_pMetadata = VMA_NULL;
11506 }
11507 
11508 bool VmaDeviceMemoryBlock::Validate() const
11509 {
11510  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11511  (m_pMetadata->GetSize() != 0));
11512 
11513  return m_pMetadata->Validate();
11514 }
11515 
11516 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11517 {
11518  void* pData = nullptr;
11519  VkResult res = Map(hAllocator, 1, &pData);
11520  if(res != VK_SUCCESS)
11521  {
11522  return res;
11523  }
11524 
11525  res = m_pMetadata->CheckCorruption(pData);
11526 
11527  Unmap(hAllocator, 1);
11528 
11529  return res;
11530 }
11531 
11532 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11533 {
11534  if(count == 0)
11535  {
11536  return VK_SUCCESS;
11537  }
11538 
11539  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11540  if(m_MapCount != 0)
11541  {
11542  m_MapCount += count;
11543  VMA_ASSERT(m_pMappedData != VMA_NULL);
11544  if(ppData != VMA_NULL)
11545  {
11546  *ppData = m_pMappedData;
11547  }
11548  return VK_SUCCESS;
11549  }
11550  else
11551  {
11552  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11553  hAllocator->m_hDevice,
11554  m_hMemory,
11555  0, // offset
11556  VK_WHOLE_SIZE,
11557  0, // flags
11558  &m_pMappedData);
11559  if(result == VK_SUCCESS)
11560  {
11561  if(ppData != VMA_NULL)
11562  {
11563  *ppData = m_pMappedData;
11564  }
11565  m_MapCount = count;
11566  }
11567  return result;
11568  }
11569 }
11570 
11571 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11572 {
11573  if(count == 0)
11574  {
11575  return;
11576  }
11577 
11578  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11579  if(m_MapCount >= count)
11580  {
11581  m_MapCount -= count;
11582  if(m_MapCount == 0)
11583  {
11584  m_pMappedData = VMA_NULL;
11585  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11586  }
11587  }
11588  else
11589  {
11590  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11591  }
11592 }
11593 
11594 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11595 {
11596  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11597  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11598 
11599  void* pData;
11600  VkResult res = Map(hAllocator, 1, &pData);
11601  if(res != VK_SUCCESS)
11602  {
11603  return res;
11604  }
11605 
11606  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11607  VmaWriteMagicValue(pData, allocOffset + allocSize);
11608 
11609  Unmap(hAllocator, 1);
11610 
11611  return VK_SUCCESS;
11612 }
11613 
11614 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11615 {
11616  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11617  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11618 
11619  void* pData;
11620  VkResult res = Map(hAllocator, 1, &pData);
11621  if(res != VK_SUCCESS)
11622  {
11623  return res;
11624  }
11625 
11626  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11627  {
11628  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11629  }
11630  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11631  {
11632  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11633  }
11634 
11635  Unmap(hAllocator, 1);
11636 
11637  return VK_SUCCESS;
11638 }
11639 
11640 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11641  const VmaAllocator hAllocator,
11642  const VmaAllocation hAllocation,
11643  VkDeviceSize allocationLocalOffset,
11644  VkBuffer hBuffer,
11645  const void* pNext)
11646 {
11647  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11648  hAllocation->GetBlock() == this);
11649  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11650  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11651  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11652  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11653  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11654  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11655 }
11656 
11657 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11658  const VmaAllocator hAllocator,
11659  const VmaAllocation hAllocation,
11660  VkDeviceSize allocationLocalOffset,
11661  VkImage hImage,
11662  const void* pNext)
11663 {
11664  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11665  hAllocation->GetBlock() == this);
11666  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11667  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11668  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11669  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11670  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11671  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11672 }
11673 
11674 static void InitStatInfo(VmaStatInfo& outInfo)
11675 {
11676  memset(&outInfo, 0, sizeof(outInfo));
11677  outInfo.allocationSizeMin = UINT64_MAX;
11678  outInfo.unusedRangeSizeMin = UINT64_MAX;
11679 }
11680 
11681 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11682 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11683 {
11684  inoutInfo.blockCount += srcInfo.blockCount;
11685  inoutInfo.allocationCount += srcInfo.allocationCount;
11686  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11687  inoutInfo.usedBytes += srcInfo.usedBytes;
11688  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11689  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11690  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11691  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11692  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11693 }
11694 
11695 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11696 {
11697  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11698  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11699  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11700  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11701 }
11702 
11703 VmaPool_T::VmaPool_T(
11704  VmaAllocator hAllocator,
11705  const VmaPoolCreateInfo& createInfo,
11706  VkDeviceSize preferredBlockSize) :
11707  m_BlockVector(
11708  hAllocator,
11709  this, // hParentPool
11710  createInfo.memoryTypeIndex,
11711  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11712  createInfo.minBlockCount,
11713  createInfo.maxBlockCount,
11714  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11715  createInfo.frameInUseCount,
11716  createInfo.blockSize != 0, // explicitBlockSize
11717  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11718  m_Id(0),
11719  m_Name(VMA_NULL)
11720 {
11721 }
11722 
11723 VmaPool_T::~VmaPool_T()
11724 {
11725 }
11726 
11727 void VmaPool_T::SetName(const char* pName)
11728 {
11729  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
11730  VmaFreeString(allocs, m_Name);
11731 
11732  if(pName != VMA_NULL)
11733  {
11734  m_Name = VmaCreateStringCopy(allocs, pName);
11735  }
11736  else
11737  {
11738  m_Name = VMA_NULL;
11739  }
11740 }
11741 
11742 #if VMA_STATS_STRING_ENABLED
11743 
11744 #endif // #if VMA_STATS_STRING_ENABLED
11745 
11746 VmaBlockVector::VmaBlockVector(
11747  VmaAllocator hAllocator,
11748  VmaPool hParentPool,
11749  uint32_t memoryTypeIndex,
11750  VkDeviceSize preferredBlockSize,
11751  size_t minBlockCount,
11752  size_t maxBlockCount,
11753  VkDeviceSize bufferImageGranularity,
11754  uint32_t frameInUseCount,
11755  bool explicitBlockSize,
11756  uint32_t algorithm) :
11757  m_hAllocator(hAllocator),
11758  m_hParentPool(hParentPool),
11759  m_MemoryTypeIndex(memoryTypeIndex),
11760  m_PreferredBlockSize(preferredBlockSize),
11761  m_MinBlockCount(minBlockCount),
11762  m_MaxBlockCount(maxBlockCount),
11763  m_BufferImageGranularity(bufferImageGranularity),
11764  m_FrameInUseCount(frameInUseCount),
11765  m_ExplicitBlockSize(explicitBlockSize),
11766  m_Algorithm(algorithm),
11767  m_HasEmptyBlock(false),
11768  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11769  m_NextBlockId(0)
11770 {
11771 }
11772 
11773 VmaBlockVector::~VmaBlockVector()
11774 {
11775  for(size_t i = m_Blocks.size(); i--; )
11776  {
11777  m_Blocks[i]->Destroy(m_hAllocator);
11778  vma_delete(m_hAllocator, m_Blocks[i]);
11779  }
11780 }
11781 
11782 VkResult VmaBlockVector::CreateMinBlocks()
11783 {
11784  for(size_t i = 0; i < m_MinBlockCount; ++i)
11785  {
11786  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11787  if(res != VK_SUCCESS)
11788  {
11789  return res;
11790  }
11791  }
11792  return VK_SUCCESS;
11793 }
11794 
11795 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11796 {
11797  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11798 
11799  const size_t blockCount = m_Blocks.size();
11800 
11801  pStats->size = 0;
11802  pStats->unusedSize = 0;
11803  pStats->allocationCount = 0;
11804  pStats->unusedRangeCount = 0;
11805  pStats->unusedRangeSizeMax = 0;
11806  pStats->blockCount = blockCount;
11807 
11808  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11809  {
11810  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11811  VMA_ASSERT(pBlock);
11812  VMA_HEAVY_ASSERT(pBlock->Validate());
11813  pBlock->m_pMetadata->AddPoolStats(*pStats);
11814  }
11815 }
11816 
11817 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11818 {
11819  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11820  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11821  (VMA_DEBUG_MARGIN > 0) &&
11822  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11823  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11824 }
11825 
11826 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11827 
11828 VkResult VmaBlockVector::Allocate(
11829  uint32_t currentFrameIndex,
11830  VkDeviceSize size,
11831  VkDeviceSize alignment,
11832  const VmaAllocationCreateInfo& createInfo,
11833  VmaSuballocationType suballocType,
11834  size_t allocationCount,
11835  VmaAllocation* pAllocations)
11836 {
11837  size_t allocIndex;
11838  VkResult res = VK_SUCCESS;
11839 
11840  if(IsCorruptionDetectionEnabled())
11841  {
11842  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11843  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11844  }
11845 
11846  {
11847  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11848  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11849  {
11850  res = AllocatePage(
11851  currentFrameIndex,
11852  size,
11853  alignment,
11854  createInfo,
11855  suballocType,
11856  pAllocations + allocIndex);
11857  if(res != VK_SUCCESS)
11858  {
11859  break;
11860  }
11861  }
11862  }
11863 
11864  if(res != VK_SUCCESS)
11865  {
11866  // Free all already created allocations.
11867  while(allocIndex--)
11868  {
11869  Free(pAllocations[allocIndex]);
11870  }
11871  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11872  }
11873 
11874  return res;
11875 }
11876 
11877 VkResult VmaBlockVector::AllocatePage(
11878  uint32_t currentFrameIndex,
11879  VkDeviceSize size,
11880  VkDeviceSize alignment,
11881  const VmaAllocationCreateInfo& createInfo,
11882  VmaSuballocationType suballocType,
11883  VmaAllocation* pAllocation)
11884 {
11885  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11886  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11887  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11888  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11889 
11890  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
11891  VkDeviceSize freeMemory;
11892  {
11893  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
11894  VmaBudget heapBudget = {};
11895  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
11896  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
11897  }
11898 
11899  const bool canFallbackToDedicated = !IsCustomPool();
11900  const bool canCreateNewBlock =
11901  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11902  (m_Blocks.size() < m_MaxBlockCount) &&
11903  (freeMemory >= size || !canFallbackToDedicated);
11904  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11905 
11906  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11907  // Which in turn is available only when maxBlockCount = 1.
11908  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11909  {
11910  canMakeOtherLost = false;
11911  }
11912 
11913  // Upper address can only be used with linear allocator and within single memory block.
11914  if(isUpperAddress &&
11915  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11916  {
11917  return VK_ERROR_FEATURE_NOT_PRESENT;
11918  }
11919 
11920  // Validate strategy.
11921  switch(strategy)
11922  {
11923  case 0:
11925  break;
11929  break;
11930  default:
11931  return VK_ERROR_FEATURE_NOT_PRESENT;
11932  }
11933 
11934  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11935  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11936  {
11937  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11938  }
11939 
11940  /*
11941  Under certain condition, this whole section can be skipped for optimization, so
11942  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11943  e.g. for custom pools with linear algorithm.
11944  */
11945  if(!canMakeOtherLost || canCreateNewBlock)
11946  {
11947  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11948  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11950 
11951  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11952  {
11953  // Use only last block.
11954  if(!m_Blocks.empty())
11955  {
11956  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11957  VMA_ASSERT(pCurrBlock);
11958  VkResult res = AllocateFromBlock(
11959  pCurrBlock,
11960  currentFrameIndex,
11961  size,
11962  alignment,
11963  allocFlagsCopy,
11964  createInfo.pUserData,
11965  suballocType,
11966  strategy,
11967  pAllocation);
11968  if(res == VK_SUCCESS)
11969  {
11970  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
11971  return VK_SUCCESS;
11972  }
11973  }
11974  }
11975  else
11976  {
11978  {
11979  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11980  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11981  {
11982  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11983  VMA_ASSERT(pCurrBlock);
11984  VkResult res = AllocateFromBlock(
11985  pCurrBlock,
11986  currentFrameIndex,
11987  size,
11988  alignment,
11989  allocFlagsCopy,
11990  createInfo.pUserData,
11991  suballocType,
11992  strategy,
11993  pAllocation);
11994  if(res == VK_SUCCESS)
11995  {
11996  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
11997  return VK_SUCCESS;
11998  }
11999  }
12000  }
12001  else // WORST_FIT, FIRST_FIT
12002  {
12003  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12004  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12005  {
12006  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12007  VMA_ASSERT(pCurrBlock);
12008  VkResult res = AllocateFromBlock(
12009  pCurrBlock,
12010  currentFrameIndex,
12011  size,
12012  alignment,
12013  allocFlagsCopy,
12014  createInfo.pUserData,
12015  suballocType,
12016  strategy,
12017  pAllocation);
12018  if(res == VK_SUCCESS)
12019  {
12020  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12021  return VK_SUCCESS;
12022  }
12023  }
12024  }
12025  }
12026 
12027  // 2. Try to create new block.
12028  if(canCreateNewBlock)
12029  {
12030  // Calculate optimal size for new block.
12031  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12032  uint32_t newBlockSizeShift = 0;
12033  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12034 
12035  if(!m_ExplicitBlockSize)
12036  {
12037  // Allocate 1/8, 1/4, 1/2 as first blocks.
12038  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12039  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12040  {
12041  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12042  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12043  {
12044  newBlockSize = smallerNewBlockSize;
12045  ++newBlockSizeShift;
12046  }
12047  else
12048  {
12049  break;
12050  }
12051  }
12052  }
12053 
12054  size_t newBlockIndex = 0;
12055  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12056  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12057  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12058  if(!m_ExplicitBlockSize)
12059  {
12060  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12061  {
12062  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12063  if(smallerNewBlockSize >= size)
12064  {
12065  newBlockSize = smallerNewBlockSize;
12066  ++newBlockSizeShift;
12067  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12068  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12069  }
12070  else
12071  {
12072  break;
12073  }
12074  }
12075  }
12076 
12077  if(res == VK_SUCCESS)
12078  {
12079  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12080  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12081 
12082  res = AllocateFromBlock(
12083  pBlock,
12084  currentFrameIndex,
12085  size,
12086  alignment,
12087  allocFlagsCopy,
12088  createInfo.pUserData,
12089  suballocType,
12090  strategy,
12091  pAllocation);
12092  if(res == VK_SUCCESS)
12093  {
12094  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12095  return VK_SUCCESS;
12096  }
12097  else
12098  {
12099  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12100  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12101  }
12102  }
12103  }
12104  }
12105 
12106  // 3. Try to allocate from existing blocks with making other allocations lost.
12107  if(canMakeOtherLost)
12108  {
12109  uint32_t tryIndex = 0;
12110  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12111  {
12112  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12113  VmaAllocationRequest bestRequest = {};
12114  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12115 
12116  // 1. Search existing allocations.
12118  {
12119  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12120  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12121  {
12122  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12123  VMA_ASSERT(pCurrBlock);
12124  VmaAllocationRequest currRequest = {};
12125  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12126  currentFrameIndex,
12127  m_FrameInUseCount,
12128  m_BufferImageGranularity,
12129  size,
12130  alignment,
12131  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12132  suballocType,
12133  canMakeOtherLost,
12134  strategy,
12135  &currRequest))
12136  {
12137  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12138  if(pBestRequestBlock == VMA_NULL ||
12139  currRequestCost < bestRequestCost)
12140  {
12141  pBestRequestBlock = pCurrBlock;
12142  bestRequest = currRequest;
12143  bestRequestCost = currRequestCost;
12144 
12145  if(bestRequestCost == 0)
12146  {
12147  break;
12148  }
12149  }
12150  }
12151  }
12152  }
12153  else // WORST_FIT, FIRST_FIT
12154  {
12155  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12156  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12157  {
12158  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12159  VMA_ASSERT(pCurrBlock);
12160  VmaAllocationRequest currRequest = {};
12161  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12162  currentFrameIndex,
12163  m_FrameInUseCount,
12164  m_BufferImageGranularity,
12165  size,
12166  alignment,
12167  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12168  suballocType,
12169  canMakeOtherLost,
12170  strategy,
12171  &currRequest))
12172  {
12173  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12174  if(pBestRequestBlock == VMA_NULL ||
12175  currRequestCost < bestRequestCost ||
12177  {
12178  pBestRequestBlock = pCurrBlock;
12179  bestRequest = currRequest;
12180  bestRequestCost = currRequestCost;
12181 
12182  if(bestRequestCost == 0 ||
12184  {
12185  break;
12186  }
12187  }
12188  }
12189  }
12190  }
12191 
12192  if(pBestRequestBlock != VMA_NULL)
12193  {
12194  if(mapped)
12195  {
12196  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12197  if(res != VK_SUCCESS)
12198  {
12199  return res;
12200  }
12201  }
12202 
12203  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12204  currentFrameIndex,
12205  m_FrameInUseCount,
12206  &bestRequest))
12207  {
12208  // Allocate from this pBlock.
12209  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12210  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12211  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12212  UpdateHasEmptyBlock();
12213  (*pAllocation)->InitBlockAllocation(
12214  pBestRequestBlock,
12215  bestRequest.offset,
12216  alignment,
12217  size,
12218  m_MemoryTypeIndex,
12219  suballocType,
12220  mapped,
12221  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12222  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12223  VMA_DEBUG_LOG(" Returned from existing block");
12224  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12225  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12226  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12227  {
12228  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12229  }
12230  if(IsCorruptionDetectionEnabled())
12231  {
12232  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12233  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12234  }
12235  return VK_SUCCESS;
12236  }
12237  // else: Some allocations must have been touched while we are here. Next try.
12238  }
12239  else
12240  {
12241  // Could not find place in any of the blocks - break outer loop.
12242  break;
12243  }
12244  }
12245  /* Maximum number of tries exceeded - a very unlike event when many other
12246  threads are simultaneously touching allocations making it impossible to make
12247  lost at the same time as we try to allocate. */
12248  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12249  {
12250  return VK_ERROR_TOO_MANY_OBJECTS;
12251  }
12252  }
12253 
12254  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12255 }
12256 
12257 void VmaBlockVector::Free(
12258  const VmaAllocation hAllocation)
12259 {
12260  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12261 
12262  bool budgetExceeded = false;
12263  {
12264  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12265  VmaBudget heapBudget = {};
12266  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12267  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12268  }
12269 
12270  // Scope for lock.
12271  {
12272  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12273 
12274  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12275 
12276  if(IsCorruptionDetectionEnabled())
12277  {
12278  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12279  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12280  }
12281 
12282  if(hAllocation->IsPersistentMap())
12283  {
12284  pBlock->Unmap(m_hAllocator, 1);
12285  }
12286 
12287  pBlock->m_pMetadata->Free(hAllocation);
12288  VMA_HEAVY_ASSERT(pBlock->Validate());
12289 
12290  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12291 
12292  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12293  // pBlock became empty after this deallocation.
12294  if(pBlock->m_pMetadata->IsEmpty())
12295  {
12296  // Already has empty block. We don't want to have two, so delete this one.
12297  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12298  {
12299  pBlockToDelete = pBlock;
12300  Remove(pBlock);
12301  }
12302  // else: We now have an empty block - leave it.
12303  }
12304  // pBlock didn't become empty, but we have another empty block - find and free that one.
12305  // (This is optional, heuristics.)
12306  else if(m_HasEmptyBlock && canDeleteBlock)
12307  {
12308  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12309  if(pLastBlock->m_pMetadata->IsEmpty())
12310  {
12311  pBlockToDelete = pLastBlock;
12312  m_Blocks.pop_back();
12313  }
12314  }
12315 
12316  UpdateHasEmptyBlock();
12317  IncrementallySortBlocks();
12318  }
12319 
12320  // Destruction of a free block. Deferred until this point, outside of mutex
12321  // lock, for performance reason.
12322  if(pBlockToDelete != VMA_NULL)
12323  {
12324  VMA_DEBUG_LOG(" Deleted empty block");
12325  pBlockToDelete->Destroy(m_hAllocator);
12326  vma_delete(m_hAllocator, pBlockToDelete);
12327  }
12328 }
12329 
12330 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12331 {
12332  VkDeviceSize result = 0;
12333  for(size_t i = m_Blocks.size(); i--; )
12334  {
12335  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12336  if(result >= m_PreferredBlockSize)
12337  {
12338  break;
12339  }
12340  }
12341  return result;
12342 }
12343 
12344 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12345 {
12346  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12347  {
12348  if(m_Blocks[blockIndex] == pBlock)
12349  {
12350  VmaVectorRemove(m_Blocks, blockIndex);
12351  return;
12352  }
12353  }
12354  VMA_ASSERT(0);
12355 }
12356 
12357 void VmaBlockVector::IncrementallySortBlocks()
12358 {
12359  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12360  {
12361  // Bubble sort only until first swap.
12362  for(size_t i = 1; i < m_Blocks.size(); ++i)
12363  {
12364  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12365  {
12366  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12367  return;
12368  }
12369  }
12370  }
12371 }
12372 
12373 VkResult VmaBlockVector::AllocateFromBlock(
12374  VmaDeviceMemoryBlock* pBlock,
12375  uint32_t currentFrameIndex,
12376  VkDeviceSize size,
12377  VkDeviceSize alignment,
12378  VmaAllocationCreateFlags allocFlags,
12379  void* pUserData,
12380  VmaSuballocationType suballocType,
12381  uint32_t strategy,
12382  VmaAllocation* pAllocation)
12383 {
12384  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12385  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12386  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12387  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12388 
12389  VmaAllocationRequest currRequest = {};
12390  if(pBlock->m_pMetadata->CreateAllocationRequest(
12391  currentFrameIndex,
12392  m_FrameInUseCount,
12393  m_BufferImageGranularity,
12394  size,
12395  alignment,
12396  isUpperAddress,
12397  suballocType,
12398  false, // canMakeOtherLost
12399  strategy,
12400  &currRequest))
12401  {
12402  // Allocate from pCurrBlock.
12403  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12404 
12405  if(mapped)
12406  {
12407  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12408  if(res != VK_SUCCESS)
12409  {
12410  return res;
12411  }
12412  }
12413 
12414  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12415  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12416  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12417  UpdateHasEmptyBlock();
12418  (*pAllocation)->InitBlockAllocation(
12419  pBlock,
12420  currRequest.offset,
12421  alignment,
12422  size,
12423  m_MemoryTypeIndex,
12424  suballocType,
12425  mapped,
12426  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12427  VMA_HEAVY_ASSERT(pBlock->Validate());
12428  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12429  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12430  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12431  {
12432  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12433  }
12434  if(IsCorruptionDetectionEnabled())
12435  {
12436  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12437  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12438  }
12439  return VK_SUCCESS;
12440  }
12441  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12442 }
12443 
12444 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12445 {
12446  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12447  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12448  allocInfo.allocationSize = blockSize;
12449  VkDeviceMemory mem = VK_NULL_HANDLE;
12450  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12451  if(res < 0)
12452  {
12453  return res;
12454  }
12455 
12456  // New VkDeviceMemory successfully created.
12457 
12458  // Create new Allocation for it.
12459  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12460  pBlock->Init(
12461  m_hAllocator,
12462  m_hParentPool,
12463  m_MemoryTypeIndex,
12464  mem,
12465  allocInfo.allocationSize,
12466  m_NextBlockId++,
12467  m_Algorithm);
12468 
12469  m_Blocks.push_back(pBlock);
12470  if(pNewBlockIndex != VMA_NULL)
12471  {
12472  *pNewBlockIndex = m_Blocks.size() - 1;
12473  }
12474 
12475  return VK_SUCCESS;
12476 }
12477 
12478 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12479  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12480  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12481 {
12482  const size_t blockCount = m_Blocks.size();
12483  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12484 
12485  enum BLOCK_FLAG
12486  {
12487  BLOCK_FLAG_USED = 0x00000001,
12488  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12489  };
12490 
12491  struct BlockInfo
12492  {
12493  uint32_t flags;
12494  void* pMappedData;
12495  };
12496  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12497  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12498  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12499 
12500  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12501  const size_t moveCount = moves.size();
12502  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12503  {
12504  const VmaDefragmentationMove& move = moves[moveIndex];
12505  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12506  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12507  }
12508 
12509  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12510 
12511  // Go over all blocks. Get mapped pointer or map if necessary.
12512  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12513  {
12514  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12515  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12516  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12517  {
12518  currBlockInfo.pMappedData = pBlock->GetMappedData();
12519  // It is not originally mapped - map it.
12520  if(currBlockInfo.pMappedData == VMA_NULL)
12521  {
12522  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12523  if(pDefragCtx->res == VK_SUCCESS)
12524  {
12525  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12526  }
12527  }
12528  }
12529  }
12530 
12531  // Go over all moves. Do actual data transfer.
12532  if(pDefragCtx->res == VK_SUCCESS)
12533  {
12534  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12535  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12536 
12537  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12538  {
12539  const VmaDefragmentationMove& move = moves[moveIndex];
12540 
12541  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12542  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12543 
12544  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12545 
12546  // Invalidate source.
12547  if(isNonCoherent)
12548  {
12549  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12550  memRange.memory = pSrcBlock->GetDeviceMemory();
12551  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12552  memRange.size = VMA_MIN(
12553  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12554  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12555  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12556  }
12557 
12558  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12559  memmove(
12560  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12561  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12562  static_cast<size_t>(move.size));
12563 
12564  if(IsCorruptionDetectionEnabled())
12565  {
12566  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12567  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12568  }
12569 
12570  // Flush destination.
12571  if(isNonCoherent)
12572  {
12573  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12574  memRange.memory = pDstBlock->GetDeviceMemory();
12575  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12576  memRange.size = VMA_MIN(
12577  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12578  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12579  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12580  }
12581  }
12582  }
12583 
12584  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12585  // Regardless of pCtx->res == VK_SUCCESS.
12586  for(size_t blockIndex = blockCount; blockIndex--; )
12587  {
12588  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12589  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12590  {
12591  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12592  pBlock->Unmap(m_hAllocator, 1);
12593  }
12594  }
12595 }
12596 
12597 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12598  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12599  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12600  VkCommandBuffer commandBuffer)
12601 {
12602  const size_t blockCount = m_Blocks.size();
12603 
12604  pDefragCtx->blockContexts.resize(blockCount);
12605  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12606 
12607  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12608  const size_t moveCount = moves.size();
12609  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12610  {
12611  const VmaDefragmentationMove& move = moves[moveIndex];
12612  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12613  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12614  }
12615 
12616  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12617 
12618  // Go over all blocks. Create and bind buffer for whole block if necessary.
12619  {
12620  VkBufferCreateInfo bufCreateInfo;
12621  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12622 
12623  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12624  {
12625  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12626  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12627  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12628  {
12629  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12630  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12631  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12632  if(pDefragCtx->res == VK_SUCCESS)
12633  {
12634  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12635  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12636  }
12637  }
12638  }
12639  }
12640 
12641  // Go over all moves. Post data transfer commands to command buffer.
12642  if(pDefragCtx->res == VK_SUCCESS)
12643  {
12644  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12645  {
12646  const VmaDefragmentationMove& move = moves[moveIndex];
12647 
12648  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12649  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12650 
12651  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12652 
12653  VkBufferCopy region = {
12654  move.srcOffset,
12655  move.dstOffset,
12656  move.size };
12657  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12658  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12659  }
12660  }
12661 
12662  // Save buffers to defrag context for later destruction.
12663  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12664  {
12665  pDefragCtx->res = VK_NOT_READY;
12666  }
12667 }
12668 
12669 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12670 {
12671  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12672  {
12673  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12674  if(pBlock->m_pMetadata->IsEmpty())
12675  {
12676  if(m_Blocks.size() > m_MinBlockCount)
12677  {
12678  if(pDefragmentationStats != VMA_NULL)
12679  {
12680  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12681  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12682  }
12683 
12684  VmaVectorRemove(m_Blocks, blockIndex);
12685  pBlock->Destroy(m_hAllocator);
12686  vma_delete(m_hAllocator, pBlock);
12687  }
12688  else
12689  {
12690  break;
12691  }
12692  }
12693  }
12694  UpdateHasEmptyBlock();
12695 }
12696 
12697 void VmaBlockVector::UpdateHasEmptyBlock()
12698 {
12699  m_HasEmptyBlock = false;
12700  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12701  {
12702  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12703  if(pBlock->m_pMetadata->IsEmpty())
12704  {
12705  m_HasEmptyBlock = true;
12706  break;
12707  }
12708  }
12709 }
12710 
12711 #if VMA_STATS_STRING_ENABLED
12712 
12713 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12714 {
12715  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12716 
12717  json.BeginObject();
12718 
12719  if(IsCustomPool())
12720  {
12721  const char* poolName = m_hParentPool->GetName();
12722  if(poolName != VMA_NULL && poolName[0] != '\0')
12723  {
12724  json.WriteString("Name");
12725  json.WriteString(poolName);
12726  }
12727 
12728  json.WriteString("MemoryTypeIndex");
12729  json.WriteNumber(m_MemoryTypeIndex);
12730 
12731  json.WriteString("BlockSize");
12732  json.WriteNumber(m_PreferredBlockSize);
12733 
12734  json.WriteString("BlockCount");
12735  json.BeginObject(true);
12736  if(m_MinBlockCount > 0)
12737  {
12738  json.WriteString("Min");
12739  json.WriteNumber((uint64_t)m_MinBlockCount);
12740  }
12741  if(m_MaxBlockCount < SIZE_MAX)
12742  {
12743  json.WriteString("Max");
12744  json.WriteNumber((uint64_t)m_MaxBlockCount);
12745  }
12746  json.WriteString("Cur");
12747  json.WriteNumber((uint64_t)m_Blocks.size());
12748  json.EndObject();
12749 
12750  if(m_FrameInUseCount > 0)
12751  {
12752  json.WriteString("FrameInUseCount");
12753  json.WriteNumber(m_FrameInUseCount);
12754  }
12755 
12756  if(m_Algorithm != 0)
12757  {
12758  json.WriteString("Algorithm");
12759  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12760  }
12761  }
12762  else
12763  {
12764  json.WriteString("PreferredBlockSize");
12765  json.WriteNumber(m_PreferredBlockSize);
12766  }
12767 
12768  json.WriteString("Blocks");
12769  json.BeginObject();
12770  for(size_t i = 0; i < m_Blocks.size(); ++i)
12771  {
12772  json.BeginString();
12773  json.ContinueString(m_Blocks[i]->GetId());
12774  json.EndString();
12775 
12776  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12777  }
12778  json.EndObject();
12779 
12780  json.EndObject();
12781 }
12782 
12783 #endif // #if VMA_STATS_STRING_ENABLED
12784 
12785 void VmaBlockVector::Defragment(
12786  class VmaBlockVectorDefragmentationContext* pCtx,
12787  VmaDefragmentationStats* pStats,
12788  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12789  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12790  VkCommandBuffer commandBuffer)
12791 {
12792  pCtx->res = VK_SUCCESS;
12793 
12794  const VkMemoryPropertyFlags memPropFlags =
12795  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12796  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12797 
12798  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12799  isHostVisible;
12800  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12801  !IsCorruptionDetectionEnabled() &&
12802  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12803 
12804  // There are options to defragment this memory type.
12805  if(canDefragmentOnCpu || canDefragmentOnGpu)
12806  {
12807  bool defragmentOnGpu;
12808  // There is only one option to defragment this memory type.
12809  if(canDefragmentOnGpu != canDefragmentOnCpu)
12810  {
12811  defragmentOnGpu = canDefragmentOnGpu;
12812  }
12813  // Both options are available: Heuristics to choose the best one.
12814  else
12815  {
12816  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12817  m_hAllocator->IsIntegratedGpu();
12818  }
12819 
12820  bool overlappingMoveSupported = !defragmentOnGpu;
12821 
12822  if(m_hAllocator->m_UseMutex)
12823  {
12824  m_Mutex.LockWrite();
12825  pCtx->mutexLocked = true;
12826  }
12827 
12828  pCtx->Begin(overlappingMoveSupported);
12829 
12830  // Defragment.
12831 
12832  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12833  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12834  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12835  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12836  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12837 
12838  // Accumulate statistics.
12839  if(pStats != VMA_NULL)
12840  {
12841  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12842  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12843  pStats->bytesMoved += bytesMoved;
12844  pStats->allocationsMoved += allocationsMoved;
12845  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12846  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12847  if(defragmentOnGpu)
12848  {
12849  maxGpuBytesToMove -= bytesMoved;
12850  maxGpuAllocationsToMove -= allocationsMoved;
12851  }
12852  else
12853  {
12854  maxCpuBytesToMove -= bytesMoved;
12855  maxCpuAllocationsToMove -= allocationsMoved;
12856  }
12857  }
12858 
12859  if(pCtx->res >= VK_SUCCESS)
12860  {
12861  if(defragmentOnGpu)
12862  {
12863  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12864  }
12865  else
12866  {
12867  ApplyDefragmentationMovesCpu(pCtx, moves);
12868  }
12869  }
12870  }
12871 }
12872 
12873 void VmaBlockVector::DefragmentationEnd(
12874  class VmaBlockVectorDefragmentationContext* pCtx,
12875  VmaDefragmentationStats* pStats)
12876 {
12877  // Destroy buffers.
12878  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12879  {
12880  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12881  if(blockCtx.hBuffer)
12882  {
12883  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12884  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12885  }
12886  }
12887 
12888  if(pCtx->res >= VK_SUCCESS)
12889  {
12890  FreeEmptyBlocks(pStats);
12891  }
12892 
12893  if(pCtx->mutexLocked)
12894  {
12895  VMA_ASSERT(m_hAllocator->m_UseMutex);
12896  m_Mutex.UnlockWrite();
12897  }
12898 }
12899 
12900 size_t VmaBlockVector::CalcAllocationCount() const
12901 {
12902  size_t result = 0;
12903  for(size_t i = 0; i < m_Blocks.size(); ++i)
12904  {
12905  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12906  }
12907  return result;
12908 }
12909 
12910 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12911 {
12912  if(m_BufferImageGranularity == 1)
12913  {
12914  return false;
12915  }
12916  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12917  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12918  {
12919  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12920  VMA_ASSERT(m_Algorithm == 0);
12921  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12922  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12923  {
12924  return true;
12925  }
12926  }
12927  return false;
12928 }
12929 
12930 void VmaBlockVector::MakePoolAllocationsLost(
12931  uint32_t currentFrameIndex,
12932  size_t* pLostAllocationCount)
12933 {
12934  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12935  size_t lostAllocationCount = 0;
12936  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12937  {
12938  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12939  VMA_ASSERT(pBlock);
12940  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12941  }
12942  if(pLostAllocationCount != VMA_NULL)
12943  {
12944  *pLostAllocationCount = lostAllocationCount;
12945  }
12946 }
12947 
12948 VkResult VmaBlockVector::CheckCorruption()
12949 {
12950  if(!IsCorruptionDetectionEnabled())
12951  {
12952  return VK_ERROR_FEATURE_NOT_PRESENT;
12953  }
12954 
12955  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12956  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12957  {
12958  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12959  VMA_ASSERT(pBlock);
12960  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12961  if(res != VK_SUCCESS)
12962  {
12963  return res;
12964  }
12965  }
12966  return VK_SUCCESS;
12967 }
12968 
12969 void VmaBlockVector::AddStats(VmaStats* pStats)
12970 {
12971  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12972  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12973 
12974  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12975 
12976  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12977  {
12978  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12979  VMA_ASSERT(pBlock);
12980  VMA_HEAVY_ASSERT(pBlock->Validate());
12981  VmaStatInfo allocationStatInfo;
12982  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12983  VmaAddStatInfo(pStats->total, allocationStatInfo);
12984  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12985  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12986  }
12987 }
12988 
12990 // VmaDefragmentationAlgorithm_Generic members definition
12991 
12992 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12993  VmaAllocator hAllocator,
12994  VmaBlockVector* pBlockVector,
12995  uint32_t currentFrameIndex,
12996  bool overlappingMoveSupported) :
12997  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12998  m_AllocationCount(0),
12999  m_AllAllocations(false),
13000  m_BytesMoved(0),
13001  m_AllocationsMoved(0),
13002  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13003 {
13004  // Create block info for each block.
13005  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13006  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13007  {
13008  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13009  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13010  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13011  m_Blocks.push_back(pBlockInfo);
13012  }
13013 
13014  // Sort them by m_pBlock pointer value.
13015  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13016 }
13017 
13018 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13019 {
13020  for(size_t i = m_Blocks.size(); i--; )
13021  {
13022  vma_delete(m_hAllocator, m_Blocks[i]);
13023  }
13024 }
13025 
13026 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13027 {
13028  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13029  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13030  {
13031  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13032  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13033  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13034  {
13035  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13036  (*it)->m_Allocations.push_back(allocInfo);
13037  }
13038  else
13039  {
13040  VMA_ASSERT(0);
13041  }
13042 
13043  ++m_AllocationCount;
13044  }
13045 }
13046 
13047 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13048  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13049  VkDeviceSize maxBytesToMove,
13050  uint32_t maxAllocationsToMove)
13051 {
13052  if(m_Blocks.empty())
13053  {
13054  return VK_SUCCESS;
13055  }
13056 
13057  // This is a choice based on research.
13058  // Option 1:
13059  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13060  // Option 2:
13061  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13062  // Option 3:
13063  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13064 
13065  size_t srcBlockMinIndex = 0;
13066  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13067  /*
13068  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13069  {
13070  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13071  if(blocksWithNonMovableCount > 0)
13072  {
13073  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13074  }
13075  }
13076  */
13077 
13078  size_t srcBlockIndex = m_Blocks.size() - 1;
13079  size_t srcAllocIndex = SIZE_MAX;
13080  for(;;)
13081  {
13082  // 1. Find next allocation to move.
13083  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13084  // 1.2. Then start from last to first m_Allocations.
13085  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13086  {
13087  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13088  {
13089  // Finished: no more allocations to process.
13090  if(srcBlockIndex == srcBlockMinIndex)
13091  {
13092  return VK_SUCCESS;
13093  }
13094  else
13095  {
13096  --srcBlockIndex;
13097  srcAllocIndex = SIZE_MAX;
13098  }
13099  }
13100  else
13101  {
13102  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13103  }
13104  }
13105 
13106  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13107  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13108 
13109  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13110  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13111  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13112  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13113 
13114  // 2. Try to find new place for this allocation in preceding or current block.
13115  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13116  {
13117  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13118  VmaAllocationRequest dstAllocRequest;
13119  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13120  m_CurrentFrameIndex,
13121  m_pBlockVector->GetFrameInUseCount(),
13122  m_pBlockVector->GetBufferImageGranularity(),
13123  size,
13124  alignment,
13125  false, // upperAddress
13126  suballocType,
13127  false, // canMakeOtherLost
13128  strategy,
13129  &dstAllocRequest) &&
13130  MoveMakesSense(
13131  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13132  {
13133  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13134 
13135  // Reached limit on number of allocations or bytes to move.
13136  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13137  (m_BytesMoved + size > maxBytesToMove))
13138  {
13139  return VK_SUCCESS;
13140  }
13141 
13142  VmaDefragmentationMove move;
13143  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13144  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13145  move.srcOffset = srcOffset;
13146  move.dstOffset = dstAllocRequest.offset;
13147  move.size = size;
13148  moves.push_back(move);
13149 
13150  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13151  dstAllocRequest,
13152  suballocType,
13153  size,
13154  allocInfo.m_hAllocation);
13155  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13156 
13157  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13158 
13159  if(allocInfo.m_pChanged != VMA_NULL)
13160  {
13161  *allocInfo.m_pChanged = VK_TRUE;
13162  }
13163 
13164  ++m_AllocationsMoved;
13165  m_BytesMoved += size;
13166 
13167  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13168 
13169  break;
13170  }
13171  }
13172 
13173  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13174 
13175  if(srcAllocIndex > 0)
13176  {
13177  --srcAllocIndex;
13178  }
13179  else
13180  {
13181  if(srcBlockIndex > 0)
13182  {
13183  --srcBlockIndex;
13184  srcAllocIndex = SIZE_MAX;
13185  }
13186  else
13187  {
13188  return VK_SUCCESS;
13189  }
13190  }
13191  }
13192 }
13193 
13194 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13195 {
13196  size_t result = 0;
13197  for(size_t i = 0; i < m_Blocks.size(); ++i)
13198  {
13199  if(m_Blocks[i]->m_HasNonMovableAllocations)
13200  {
13201  ++result;
13202  }
13203  }
13204  return result;
13205 }
13206 
13207 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13208  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13209  VkDeviceSize maxBytesToMove,
13210  uint32_t maxAllocationsToMove)
13211 {
13212  if(!m_AllAllocations && m_AllocationCount == 0)
13213  {
13214  return VK_SUCCESS;
13215  }
13216 
13217  const size_t blockCount = m_Blocks.size();
13218  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13219  {
13220  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13221 
13222  if(m_AllAllocations)
13223  {
13224  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13225  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13226  it != pMetadata->m_Suballocations.end();
13227  ++it)
13228  {
13229  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13230  {
13231  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13232  pBlockInfo->m_Allocations.push_back(allocInfo);
13233  }
13234  }
13235  }
13236 
13237  pBlockInfo->CalcHasNonMovableAllocations();
13238 
13239  // This is a choice based on research.
13240  // Option 1:
13241  pBlockInfo->SortAllocationsByOffsetDescending();
13242  // Option 2:
13243  //pBlockInfo->SortAllocationsBySizeDescending();
13244  }
13245 
13246  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13247  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13248 
13249  // This is a choice based on research.
13250  const uint32_t roundCount = 2;
13251 
13252  // Execute defragmentation rounds (the main part).
13253  VkResult result = VK_SUCCESS;
13254  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13255  {
13256  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
13257  }
13258 
13259  return result;
13260 }
13261 
13262 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13263  size_t dstBlockIndex, VkDeviceSize dstOffset,
13264  size_t srcBlockIndex, VkDeviceSize srcOffset)
13265 {
13266  if(dstBlockIndex < srcBlockIndex)
13267  {
13268  return true;
13269  }
13270  if(dstBlockIndex > srcBlockIndex)
13271  {
13272  return false;
13273  }
13274  if(dstOffset < srcOffset)
13275  {
13276  return true;
13277  }
13278  return false;
13279 }
13280 
13282 // VmaDefragmentationAlgorithm_Fast
13283 
13284 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13285  VmaAllocator hAllocator,
13286  VmaBlockVector* pBlockVector,
13287  uint32_t currentFrameIndex,
13288  bool overlappingMoveSupported) :
13289  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13290  m_OverlappingMoveSupported(overlappingMoveSupported),
13291  m_AllocationCount(0),
13292  m_AllAllocations(false),
13293  m_BytesMoved(0),
13294  m_AllocationsMoved(0),
13295  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13296 {
13297  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13298 
13299 }
13300 
13301 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13302 {
13303 }
13304 
13305 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13306  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13307  VkDeviceSize maxBytesToMove,
13308  uint32_t maxAllocationsToMove)
13309 {
13310  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13311 
13312  const size_t blockCount = m_pBlockVector->GetBlockCount();
13313  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13314  {
13315  return VK_SUCCESS;
13316  }
13317 
13318  PreprocessMetadata();
13319 
13320  // Sort blocks in order from most destination.
13321 
13322  m_BlockInfos.resize(blockCount);
13323  for(size_t i = 0; i < blockCount; ++i)
13324  {
13325  m_BlockInfos[i].origBlockIndex = i;
13326  }
13327 
13328  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13329  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13330  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13331  });
13332 
13333  // THE MAIN ALGORITHM
13334 
13335  FreeSpaceDatabase freeSpaceDb;
13336 
13337  size_t dstBlockInfoIndex = 0;
13338  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13339  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13340  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13341  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13342  VkDeviceSize dstOffset = 0;
13343 
13344  bool end = false;
13345  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13346  {
13347  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13348  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13349  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13350  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13351  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13352  {
13353  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13354  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13355  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13356  if(m_AllocationsMoved == maxAllocationsToMove ||
13357  m_BytesMoved + srcAllocSize > maxBytesToMove)
13358  {
13359  end = true;
13360  break;
13361  }
13362  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13363 
13364  // Try to place it in one of free spaces from the database.
13365  size_t freeSpaceInfoIndex;
13366  VkDeviceSize dstAllocOffset;
13367  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13368  freeSpaceInfoIndex, dstAllocOffset))
13369  {
13370  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13371  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13372  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13373 
13374  // Same block
13375  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13376  {
13377  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13378 
13379  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13380 
13381  VmaSuballocation suballoc = *srcSuballocIt;
13382  suballoc.offset = dstAllocOffset;
13383  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13384  m_BytesMoved += srcAllocSize;
13385  ++m_AllocationsMoved;
13386 
13387  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13388  ++nextSuballocIt;
13389  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13390  srcSuballocIt = nextSuballocIt;
13391 
13392  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13393 
13394  VmaDefragmentationMove move = {
13395  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13396  srcAllocOffset, dstAllocOffset,
13397  srcAllocSize };
13398  moves.push_back(move);
13399  }
13400  // Different block
13401  else
13402  {
13403  // MOVE OPTION 2: Move the allocation to a different block.
13404 
13405  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13406 
13407  VmaSuballocation suballoc = *srcSuballocIt;
13408  suballoc.offset = dstAllocOffset;
13409  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13410  m_BytesMoved += srcAllocSize;
13411  ++m_AllocationsMoved;
13412 
13413  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13414  ++nextSuballocIt;
13415  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13416  srcSuballocIt = nextSuballocIt;
13417 
13418  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13419 
13420  VmaDefragmentationMove move = {
13421  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13422  srcAllocOffset, dstAllocOffset,
13423  srcAllocSize };
13424  moves.push_back(move);
13425  }
13426  }
13427  else
13428  {
13429  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13430 
13431  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13432  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13433  dstAllocOffset + srcAllocSize > dstBlockSize)
13434  {
13435  // But before that, register remaining free space at the end of dst block.
13436  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13437 
13438  ++dstBlockInfoIndex;
13439  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13440  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13441  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13442  dstBlockSize = pDstMetadata->GetSize();
13443  dstOffset = 0;
13444  dstAllocOffset = 0;
13445  }
13446 
13447  // Same block
13448  if(dstBlockInfoIndex == srcBlockInfoIndex)
13449  {
13450  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13451 
13452  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13453 
13454  bool skipOver = overlap;
13455  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13456  {
13457  // If destination and source place overlap, skip if it would move it
13458  // by only < 1/64 of its size.
13459  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13460  }
13461 
13462  if(skipOver)
13463  {
13464  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13465 
13466  dstOffset = srcAllocOffset + srcAllocSize;
13467  ++srcSuballocIt;
13468  }
13469  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13470  else
13471  {
13472  srcSuballocIt->offset = dstAllocOffset;
13473  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13474  dstOffset = dstAllocOffset + srcAllocSize;
13475  m_BytesMoved += srcAllocSize;
13476  ++m_AllocationsMoved;
13477  ++srcSuballocIt;
13478  VmaDefragmentationMove move = {
13479  srcOrigBlockIndex, dstOrigBlockIndex,
13480  srcAllocOffset, dstAllocOffset,
13481  srcAllocSize };
13482  moves.push_back(move);
13483  }
13484  }
13485  // Different block
13486  else
13487  {
13488  // MOVE OPTION 2: Move the allocation to a different block.
13489 
13490  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13491  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13492 
13493  VmaSuballocation suballoc = *srcSuballocIt;
13494  suballoc.offset = dstAllocOffset;
13495  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13496  dstOffset = dstAllocOffset + srcAllocSize;
13497  m_BytesMoved += srcAllocSize;
13498  ++m_AllocationsMoved;
13499 
13500  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13501  ++nextSuballocIt;
13502  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13503  srcSuballocIt = nextSuballocIt;
13504 
13505  pDstMetadata->m_Suballocations.push_back(suballoc);
13506 
13507  VmaDefragmentationMove move = {
13508  srcOrigBlockIndex, dstOrigBlockIndex,
13509  srcAllocOffset, dstAllocOffset,
13510  srcAllocSize };
13511  moves.push_back(move);
13512  }
13513  }
13514  }
13515  }
13516 
13517  m_BlockInfos.clear();
13518 
13519  PostprocessMetadata();
13520 
13521  return VK_SUCCESS;
13522 }
13523 
13524 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13525 {
13526  const size_t blockCount = m_pBlockVector->GetBlockCount();
13527  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13528  {
13529  VmaBlockMetadata_Generic* const pMetadata =
13530  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13531  pMetadata->m_FreeCount = 0;
13532  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13533  pMetadata->m_FreeSuballocationsBySize.clear();
13534  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13535  it != pMetadata->m_Suballocations.end(); )
13536  {
13537  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13538  {
13539  VmaSuballocationList::iterator nextIt = it;
13540  ++nextIt;
13541  pMetadata->m_Suballocations.erase(it);
13542  it = nextIt;
13543  }
13544  else
13545  {
13546  ++it;
13547  }
13548  }
13549  }
13550 }
13551 
13552 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13553 {
13554  const size_t blockCount = m_pBlockVector->GetBlockCount();
13555  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13556  {
13557  VmaBlockMetadata_Generic* const pMetadata =
13558  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13559  const VkDeviceSize blockSize = pMetadata->GetSize();
13560 
13561  // No allocations in this block - entire area is free.
13562  if(pMetadata->m_Suballocations.empty())
13563  {
13564  pMetadata->m_FreeCount = 1;
13565  //pMetadata->m_SumFreeSize is already set to blockSize.
13566  VmaSuballocation suballoc = {
13567  0, // offset
13568  blockSize, // size
13569  VMA_NULL, // hAllocation
13570  VMA_SUBALLOCATION_TYPE_FREE };
13571  pMetadata->m_Suballocations.push_back(suballoc);
13572  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13573  }
13574  // There are some allocations in this block.
13575  else
13576  {
13577  VkDeviceSize offset = 0;
13578  VmaSuballocationList::iterator it;
13579  for(it = pMetadata->m_Suballocations.begin();
13580  it != pMetadata->m_Suballocations.end();
13581  ++it)
13582  {
13583  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13584  VMA_ASSERT(it->offset >= offset);
13585 
13586  // Need to insert preceding free space.
13587  if(it->offset > offset)
13588  {
13589  ++pMetadata->m_FreeCount;
13590  const VkDeviceSize freeSize = it->offset - offset;
13591  VmaSuballocation suballoc = {
13592  offset, // offset
13593  freeSize, // size
13594  VMA_NULL, // hAllocation
13595  VMA_SUBALLOCATION_TYPE_FREE };
13596  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13597  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13598  {
13599  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13600  }
13601  }
13602 
13603  pMetadata->m_SumFreeSize -= it->size;
13604  offset = it->offset + it->size;
13605  }
13606 
13607  // Need to insert trailing free space.
13608  if(offset < blockSize)
13609  {
13610  ++pMetadata->m_FreeCount;
13611  const VkDeviceSize freeSize = blockSize - offset;
13612  VmaSuballocation suballoc = {
13613  offset, // offset
13614  freeSize, // size
13615  VMA_NULL, // hAllocation
13616  VMA_SUBALLOCATION_TYPE_FREE };
13617  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13618  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13619  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13620  {
13621  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13622  }
13623  }
13624 
13625  VMA_SORT(
13626  pMetadata->m_FreeSuballocationsBySize.begin(),
13627  pMetadata->m_FreeSuballocationsBySize.end(),
13628  VmaSuballocationItemSizeLess());
13629  }
13630 
13631  VMA_HEAVY_ASSERT(pMetadata->Validate());
13632  }
13633 }
13634 
13635 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13636 {
13637  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13638  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13639  while(it != pMetadata->m_Suballocations.end())
13640  {
13641  if(it->offset < suballoc.offset)
13642  {
13643  ++it;
13644  }
13645  }
13646  pMetadata->m_Suballocations.insert(it, suballoc);
13647 }
13648 
13650 // VmaBlockVectorDefragmentationContext
13651 
13652 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13653  VmaAllocator hAllocator,
13654  VmaPool hCustomPool,
13655  VmaBlockVector* pBlockVector,
13656  uint32_t currFrameIndex) :
13657  res(VK_SUCCESS),
13658  mutexLocked(false),
13659  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13660  m_hAllocator(hAllocator),
13661  m_hCustomPool(hCustomPool),
13662  m_pBlockVector(pBlockVector),
13663  m_CurrFrameIndex(currFrameIndex),
13664  m_pAlgorithm(VMA_NULL),
13665  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13666  m_AllAllocations(false)
13667 {
13668 }
13669 
13670 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13671 {
13672  vma_delete(m_hAllocator, m_pAlgorithm);
13673 }
13674 
13675 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13676 {
13677  AllocInfo info = { hAlloc, pChanged };
13678  m_Allocations.push_back(info);
13679 }
13680 
13681 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13682 {
13683  const bool allAllocations = m_AllAllocations ||
13684  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13685 
13686  /********************************
13687  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13688  ********************************/
13689 
13690  /*
13691  Fast algorithm is supported only when certain criteria are met:
13692  - VMA_DEBUG_MARGIN is 0.
13693  - All allocations in this block vector are moveable.
13694  - There is no possibility of image/buffer granularity conflict.
13695  */
13696  if(VMA_DEBUG_MARGIN == 0 &&
13697  allAllocations &&
13698  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13699  {
13700  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13701  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13702  }
13703  else
13704  {
13705  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13706  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13707  }
13708 
13709  if(allAllocations)
13710  {
13711  m_pAlgorithm->AddAll();
13712  }
13713  else
13714  {
13715  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13716  {
13717  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13718  }
13719  }
13720 }
13721 
13723 // VmaDefragmentationContext
13724 
13725 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13726  VmaAllocator hAllocator,
13727  uint32_t currFrameIndex,
13728  uint32_t flags,
13729  VmaDefragmentationStats* pStats) :
13730  m_hAllocator(hAllocator),
13731  m_CurrFrameIndex(currFrameIndex),
13732  m_Flags(flags),
13733  m_pStats(pStats),
13734  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13735 {
13736  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13737 }
13738 
13739 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13740 {
13741  for(size_t i = m_CustomPoolContexts.size(); i--; )
13742  {
13743  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13744  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13745  vma_delete(m_hAllocator, pBlockVectorCtx);
13746  }
13747  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13748  {
13749  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13750  if(pBlockVectorCtx)
13751  {
13752  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13753  vma_delete(m_hAllocator, pBlockVectorCtx);
13754  }
13755  }
13756 }
13757 
13758 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13759 {
13760  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13761  {
13762  VmaPool pool = pPools[poolIndex];
13763  VMA_ASSERT(pool);
13764  // Pools with algorithm other than default are not defragmented.
13765  if(pool->m_BlockVector.GetAlgorithm() == 0)
13766  {
13767  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13768 
13769  for(size_t i = m_CustomPoolContexts.size(); i--; )
13770  {
13771  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13772  {
13773  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13774  break;
13775  }
13776  }
13777 
13778  if(!pBlockVectorDefragCtx)
13779  {
13780  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13781  m_hAllocator,
13782  pool,
13783  &pool->m_BlockVector,
13784  m_CurrFrameIndex);
13785  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13786  }
13787 
13788  pBlockVectorDefragCtx->AddAll();
13789  }
13790  }
13791 }
13792 
13793 void VmaDefragmentationContext_T::AddAllocations(
13794  uint32_t allocationCount,
13795  VmaAllocation* pAllocations,
13796  VkBool32* pAllocationsChanged)
13797 {
13798  // Dispatch pAllocations among defragmentators. Create them when necessary.
13799  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13800  {
13801  const VmaAllocation hAlloc = pAllocations[allocIndex];
13802  VMA_ASSERT(hAlloc);
13803  // DedicatedAlloc cannot be defragmented.
13804  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13805  // Lost allocation cannot be defragmented.
13806  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13807  {
13808  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13809 
13810  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13811  // This allocation belongs to custom pool.
13812  if(hAllocPool != VK_NULL_HANDLE)
13813  {
13814  // Pools with algorithm other than default are not defragmented.
13815  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13816  {
13817  for(size_t i = m_CustomPoolContexts.size(); i--; )
13818  {
13819  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13820  {
13821  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13822  break;
13823  }
13824  }
13825  if(!pBlockVectorDefragCtx)
13826  {
13827  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13828  m_hAllocator,
13829  hAllocPool,
13830  &hAllocPool->m_BlockVector,
13831  m_CurrFrameIndex);
13832  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13833  }
13834  }
13835  }
13836  // This allocation belongs to default pool.
13837  else
13838  {
13839  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13840  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13841  if(!pBlockVectorDefragCtx)
13842  {
13843  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13844  m_hAllocator,
13845  VMA_NULL, // hCustomPool
13846  m_hAllocator->m_pBlockVectors[memTypeIndex],
13847  m_CurrFrameIndex);
13848  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13849  }
13850  }
13851 
13852  if(pBlockVectorDefragCtx)
13853  {
13854  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13855  &pAllocationsChanged[allocIndex] : VMA_NULL;
13856  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13857  }
13858  }
13859  }
13860 }
13861 
13862 VkResult VmaDefragmentationContext_T::Defragment(
13863  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13864  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13865  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13866 {
13867  if(pStats)
13868  {
13869  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13870  }
13871 
13872  if(commandBuffer == VK_NULL_HANDLE)
13873  {
13874  maxGpuBytesToMove = 0;
13875  maxGpuAllocationsToMove = 0;
13876  }
13877 
13878  VkResult res = VK_SUCCESS;
13879 
13880  // Process default pools.
13881  for(uint32_t memTypeIndex = 0;
13882  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13883  ++memTypeIndex)
13884  {
13885  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13886  if(pBlockVectorCtx)
13887  {
13888  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13889  pBlockVectorCtx->GetBlockVector()->Defragment(
13890  pBlockVectorCtx,
13891  pStats,
13892  maxCpuBytesToMove, maxCpuAllocationsToMove,
13893  maxGpuBytesToMove, maxGpuAllocationsToMove,
13894  commandBuffer);
13895  if(pBlockVectorCtx->res != VK_SUCCESS)
13896  {
13897  res = pBlockVectorCtx->res;
13898  }
13899  }
13900  }
13901 
13902  // Process custom pools.
13903  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13904  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13905  ++customCtxIndex)
13906  {
13907  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13908  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13909  pBlockVectorCtx->GetBlockVector()->Defragment(
13910  pBlockVectorCtx,
13911  pStats,
13912  maxCpuBytesToMove, maxCpuAllocationsToMove,
13913  maxGpuBytesToMove, maxGpuAllocationsToMove,
13914  commandBuffer);
13915  if(pBlockVectorCtx->res != VK_SUCCESS)
13916  {
13917  res = pBlockVectorCtx->res;
13918  }
13919  }
13920 
13921  return res;
13922 }
13923 
13925 // VmaRecorder
13926 
13927 #if VMA_RECORDING_ENABLED
13928 
13929 VmaRecorder::VmaRecorder() :
13930  m_UseMutex(true),
13931  m_Flags(0),
13932  m_File(VMA_NULL),
13933  m_Freq(INT64_MAX),
13934  m_StartCounter(INT64_MAX)
13935 {
13936 }
13937 
13938 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13939 {
13940  m_UseMutex = useMutex;
13941  m_Flags = settings.flags;
13942 
13943  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13944  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13945 
13946  // Open file for writing.
13947  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13948  if(err != 0)
13949  {
13950  return VK_ERROR_INITIALIZATION_FAILED;
13951  }
13952 
13953  // Write header.
13954  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13955  fprintf(m_File, "%s\n", "1,8");
13956 
13957  return VK_SUCCESS;
13958 }
13959 
13960 VmaRecorder::~VmaRecorder()
13961 {
13962  if(m_File != VMA_NULL)
13963  {
13964  fclose(m_File);
13965  }
13966 }
13967 
13968 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13969 {
13970  CallParams callParams;
13971  GetBasicParams(callParams);
13972 
13973  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13974  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13975  Flush();
13976 }
13977 
13978 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13979 {
13980  CallParams callParams;
13981  GetBasicParams(callParams);
13982 
13983  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13984  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13985  Flush();
13986 }
13987 
13988 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13989 {
13990  CallParams callParams;
13991  GetBasicParams(callParams);
13992 
13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13994  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13995  createInfo.memoryTypeIndex,
13996  createInfo.flags,
13997  createInfo.blockSize,
13998  (uint64_t)createInfo.minBlockCount,
13999  (uint64_t)createInfo.maxBlockCount,
14000  createInfo.frameInUseCount,
14001  pool);
14002  Flush();
14003 }
14004 
14005 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14006 {
14007  CallParams callParams;
14008  GetBasicParams(callParams);
14009 
14010  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14011  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14012  pool);
14013  Flush();
14014 }
14015 
14016 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14017  const VkMemoryRequirements& vkMemReq,
14018  const VmaAllocationCreateInfo& createInfo,
14019  VmaAllocation allocation)
14020 {
14021  CallParams callParams;
14022  GetBasicParams(callParams);
14023 
14024  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14025  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14026  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14027  vkMemReq.size,
14028  vkMemReq.alignment,
14029  vkMemReq.memoryTypeBits,
14030  createInfo.flags,
14031  createInfo.usage,
14032  createInfo.requiredFlags,
14033  createInfo.preferredFlags,
14034  createInfo.memoryTypeBits,
14035  createInfo.pool,
14036  allocation,
14037  userDataStr.GetString());
14038  Flush();
14039 }
14040 
14041 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14042  const VkMemoryRequirements& vkMemReq,
14043  const VmaAllocationCreateInfo& createInfo,
14044  uint64_t allocationCount,
14045  const VmaAllocation* pAllocations)
14046 {
14047  CallParams callParams;
14048  GetBasicParams(callParams);
14049 
14050  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14051  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14052  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14053  vkMemReq.size,
14054  vkMemReq.alignment,
14055  vkMemReq.memoryTypeBits,
14056  createInfo.flags,
14057  createInfo.usage,
14058  createInfo.requiredFlags,
14059  createInfo.preferredFlags,
14060  createInfo.memoryTypeBits,
14061  createInfo.pool);
14062  PrintPointerList(allocationCount, pAllocations);
14063  fprintf(m_File, ",%s\n", userDataStr.GetString());
14064  Flush();
14065 }
14066 
14067 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14068  const VkMemoryRequirements& vkMemReq,
14069  bool requiresDedicatedAllocation,
14070  bool prefersDedicatedAllocation,
14071  const VmaAllocationCreateInfo& createInfo,
14072  VmaAllocation allocation)
14073 {
14074  CallParams callParams;
14075  GetBasicParams(callParams);
14076 
14077  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14078  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14079  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14080  vkMemReq.size,
14081  vkMemReq.alignment,
14082  vkMemReq.memoryTypeBits,
14083  requiresDedicatedAllocation ? 1 : 0,
14084  prefersDedicatedAllocation ? 1 : 0,
14085  createInfo.flags,
14086  createInfo.usage,
14087  createInfo.requiredFlags,
14088  createInfo.preferredFlags,
14089  createInfo.memoryTypeBits,
14090  createInfo.pool,
14091  allocation,
14092  userDataStr.GetString());
14093  Flush();
14094 }
14095 
14096 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14097  const VkMemoryRequirements& vkMemReq,
14098  bool requiresDedicatedAllocation,
14099  bool prefersDedicatedAllocation,
14100  const VmaAllocationCreateInfo& createInfo,
14101  VmaAllocation allocation)
14102 {
14103  CallParams callParams;
14104  GetBasicParams(callParams);
14105 
14106  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14107  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14108  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14109  vkMemReq.size,
14110  vkMemReq.alignment,
14111  vkMemReq.memoryTypeBits,
14112  requiresDedicatedAllocation ? 1 : 0,
14113  prefersDedicatedAllocation ? 1 : 0,
14114  createInfo.flags,
14115  createInfo.usage,
14116  createInfo.requiredFlags,
14117  createInfo.preferredFlags,
14118  createInfo.memoryTypeBits,
14119  createInfo.pool,
14120  allocation,
14121  userDataStr.GetString());
14122  Flush();
14123 }
14124 
14125 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14126  VmaAllocation allocation)
14127 {
14128  CallParams callParams;
14129  GetBasicParams(callParams);
14130 
14131  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14132  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14133  allocation);
14134  Flush();
14135 }
14136 
14137 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14138  uint64_t allocationCount,
14139  const VmaAllocation* pAllocations)
14140 {
14141  CallParams callParams;
14142  GetBasicParams(callParams);
14143 
14144  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14145  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14146  PrintPointerList(allocationCount, pAllocations);
14147  fprintf(m_File, "\n");
14148  Flush();
14149 }
14150 
14151 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14152  VmaAllocation allocation,
14153  const void* pUserData)
14154 {
14155  CallParams callParams;
14156  GetBasicParams(callParams);
14157 
14158  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14159  UserDataString userDataStr(
14160  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14161  pUserData);
14162  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14163  allocation,
14164  userDataStr.GetString());
14165  Flush();
14166 }
14167 
14168 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14169  VmaAllocation allocation)
14170 {
14171  CallParams callParams;
14172  GetBasicParams(callParams);
14173 
14174  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14175  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14176  allocation);
14177  Flush();
14178 }
14179 
14180 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14181  VmaAllocation allocation)
14182 {
14183  CallParams callParams;
14184  GetBasicParams(callParams);
14185 
14186  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14187  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14188  allocation);
14189  Flush();
14190 }
14191 
14192 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14193  VmaAllocation allocation)
14194 {
14195  CallParams callParams;
14196  GetBasicParams(callParams);
14197 
14198  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14199  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14200  allocation);
14201  Flush();
14202 }
14203 
14204 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14205  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14206 {
14207  CallParams callParams;
14208  GetBasicParams(callParams);
14209 
14210  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14211  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14212  allocation,
14213  offset,
14214  size);
14215  Flush();
14216 }
14217 
14218 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14219  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14220 {
14221  CallParams callParams;
14222  GetBasicParams(callParams);
14223 
14224  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14225  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14226  allocation,
14227  offset,
14228  size);
14229  Flush();
14230 }
14231 
14232 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14233  const VkBufferCreateInfo& bufCreateInfo,
14234  const VmaAllocationCreateInfo& allocCreateInfo,
14235  VmaAllocation allocation)
14236 {
14237  CallParams callParams;
14238  GetBasicParams(callParams);
14239 
14240  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14241  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14242  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14243  bufCreateInfo.flags,
14244  bufCreateInfo.size,
14245  bufCreateInfo.usage,
14246  bufCreateInfo.sharingMode,
14247  allocCreateInfo.flags,
14248  allocCreateInfo.usage,
14249  allocCreateInfo.requiredFlags,
14250  allocCreateInfo.preferredFlags,
14251  allocCreateInfo.memoryTypeBits,
14252  allocCreateInfo.pool,
14253  allocation,
14254  userDataStr.GetString());
14255  Flush();
14256 }
14257 
14258 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14259  const VkImageCreateInfo& imageCreateInfo,
14260  const VmaAllocationCreateInfo& allocCreateInfo,
14261  VmaAllocation allocation)
14262 {
14263  CallParams callParams;
14264  GetBasicParams(callParams);
14265 
14266  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14267  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14268  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14269  imageCreateInfo.flags,
14270  imageCreateInfo.imageType,
14271  imageCreateInfo.format,
14272  imageCreateInfo.extent.width,
14273  imageCreateInfo.extent.height,
14274  imageCreateInfo.extent.depth,
14275  imageCreateInfo.mipLevels,
14276  imageCreateInfo.arrayLayers,
14277  imageCreateInfo.samples,
14278  imageCreateInfo.tiling,
14279  imageCreateInfo.usage,
14280  imageCreateInfo.sharingMode,
14281  imageCreateInfo.initialLayout,
14282  allocCreateInfo.flags,
14283  allocCreateInfo.usage,
14284  allocCreateInfo.requiredFlags,
14285  allocCreateInfo.preferredFlags,
14286  allocCreateInfo.memoryTypeBits,
14287  allocCreateInfo.pool,
14288  allocation,
14289  userDataStr.GetString());
14290  Flush();
14291 }
14292 
14293 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14294  VmaAllocation allocation)
14295 {
14296  CallParams callParams;
14297  GetBasicParams(callParams);
14298 
14299  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14300  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14301  allocation);
14302  Flush();
14303 }
14304 
14305 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14306  VmaAllocation allocation)
14307 {
14308  CallParams callParams;
14309  GetBasicParams(callParams);
14310 
14311  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14312  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14313  allocation);
14314  Flush();
14315 }
14316 
14317 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14318  VmaAllocation allocation)
14319 {
14320  CallParams callParams;
14321  GetBasicParams(callParams);
14322 
14323  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14324  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14325  allocation);
14326  Flush();
14327 }
14328 
14329 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14330  VmaAllocation allocation)
14331 {
14332  CallParams callParams;
14333  GetBasicParams(callParams);
14334 
14335  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14336  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14337  allocation);
14338  Flush();
14339 }
14340 
14341 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14342  VmaPool pool)
14343 {
14344  CallParams callParams;
14345  GetBasicParams(callParams);
14346 
14347  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14348  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14349  pool);
14350  Flush();
14351 }
14352 
14353 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14354  const VmaDefragmentationInfo2& info,
14356 {
14357  CallParams callParams;
14358  GetBasicParams(callParams);
14359 
14360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14361  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14362  info.flags);
14363  PrintPointerList(info.allocationCount, info.pAllocations);
14364  fprintf(m_File, ",");
14365  PrintPointerList(info.poolCount, info.pPools);
14366  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14367  info.maxCpuBytesToMove,
14369  info.maxGpuBytesToMove,
14371  info.commandBuffer,
14372  ctx);
14373  Flush();
14374 }
14375 
14376 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14378 {
14379  CallParams callParams;
14380  GetBasicParams(callParams);
14381 
14382  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14383  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14384  ctx);
14385  Flush();
14386 }
14387 
14388 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14389  VmaPool pool,
14390  const char* name)
14391 {
14392  CallParams callParams;
14393  GetBasicParams(callParams);
14394 
14395  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14396  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14397  pool, name != VMA_NULL ? name : "");
14398  Flush();
14399 }
14400 
14401 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14402 {
14403  if(pUserData != VMA_NULL)
14404  {
14405  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14406  {
14407  m_Str = (const char*)pUserData;
14408  }
14409  else
14410  {
14411  sprintf_s(m_PtrStr, "%p", pUserData);
14412  m_Str = m_PtrStr;
14413  }
14414  }
14415  else
14416  {
14417  m_Str = "";
14418  }
14419 }
14420 
14421 void VmaRecorder::WriteConfiguration(
14422  const VkPhysicalDeviceProperties& devProps,
14423  const VkPhysicalDeviceMemoryProperties& memProps,
14424  uint32_t vulkanApiVersion,
14425  bool dedicatedAllocationExtensionEnabled,
14426  bool bindMemory2ExtensionEnabled,
14427  bool memoryBudgetExtensionEnabled)
14428 {
14429  fprintf(m_File, "Config,Begin\n");
14430 
14431  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14432 
14433  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14434  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14435  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14436  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14437  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14438  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14439 
14440  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14441  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14442  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14443 
14444  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14445  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14446  {
14447  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14448  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14449  }
14450  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14451  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14452  {
14453  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14454  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14455  }
14456 
14457  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14458  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14459  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
14460 
14461  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14462  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14463  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14464  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14465  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14466  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14467  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14468  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14469  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14470 
14471  fprintf(m_File, "Config,End\n");
14472 }
14473 
14474 void VmaRecorder::GetBasicParams(CallParams& outParams)
14475 {
14476  outParams.threadId = GetCurrentThreadId();
14477 
14478  LARGE_INTEGER counter;
14479  QueryPerformanceCounter(&counter);
14480  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14481 }
14482 
14483 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14484 {
14485  if(count)
14486  {
14487  fprintf(m_File, "%p", pItems[0]);
14488  for(uint64_t i = 1; i < count; ++i)
14489  {
14490  fprintf(m_File, " %p", pItems[i]);
14491  }
14492  }
14493 }
14494 
14495 void VmaRecorder::Flush()
14496 {
14497  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14498  {
14499  fflush(m_File);
14500  }
14501 }
14502 
14503 #endif // #if VMA_RECORDING_ENABLED
14504 
14506 // VmaAllocationObjectAllocator
14507 
14508 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14509  m_Allocator(pAllocationCallbacks, 1024)
14510 {
14511 }
14512 
14513 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14514 {
14515  VmaMutexLock mutexLock(m_Mutex);
14516  return m_Allocator.Alloc();
14517 }
14518 
14519 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14520 {
14521  VmaMutexLock mutexLock(m_Mutex);
14522  m_Allocator.Free(hAlloc);
14523 }
14524 
14526 // VmaAllocator_T
14527 
14528 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14529  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14530  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
14531  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14532  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14533  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14534  m_hDevice(pCreateInfo->device),
14535  m_hInstance(pCreateInfo->instance),
14536  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14537  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14538  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14539  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14540  m_HeapSizeLimitMask(0),
14541  m_PreferredLargeHeapBlockSize(0),
14542  m_PhysicalDevice(pCreateInfo->physicalDevice),
14543  m_CurrentFrameIndex(0),
14544  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14545  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14546  m_NextPoolId(0)
14548  ,m_pRecorder(VMA_NULL)
14549 #endif
14550 {
14551  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14552  {
14553  m_UseKhrDedicatedAllocation = false;
14554  m_UseKhrBindMemory2 = false;
14555  }
14556 
14557  if(VMA_DEBUG_DETECT_CORRUPTION)
14558  {
14559  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14560  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14561  }
14562 
14563  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14564 
14565  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14566  {
14567 #if !(VMA_DEDICATED_ALLOCATION)
14569  {
14570  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14571  }
14572 #endif
14573 #if !(VMA_BIND_MEMORY2)
14574  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14575  {
14576  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14577  }
14578 #endif
14579  }
14580 #if !(VMA_MEMORY_BUDGET)
14581  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14582  {
14583  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14584  }
14585 #endif
14586 #if VMA_VULKAN_VERSION < 1001000
14587  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14588  {
14589  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14590  }
14591 #endif
14592 
14593  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14594  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14595  memset(&m_MemProps, 0, sizeof(m_MemProps));
14596 
14597  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14598  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14599  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14600 
14601  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14602  {
14603  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14604  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14605  }
14606 
14607  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14608 
14609  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14610  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14611 
14612  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14613  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14614  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14615  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14616 
14617  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14618  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14619 
14620  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14621  {
14622  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14623  {
14624  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14625  if(limit != VK_WHOLE_SIZE)
14626  {
14627  m_HeapSizeLimitMask |= 1u << heapIndex;
14628  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14629  {
14630  m_MemProps.memoryHeaps[heapIndex].size = limit;
14631  }
14632  }
14633  }
14634  }
14635 
14636  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14637  {
14638  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14639 
14640  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14641  this,
14642  VK_NULL_HANDLE, // hParentPool
14643  memTypeIndex,
14644  preferredBlockSize,
14645  0,
14646  SIZE_MAX,
14647  GetBufferImageGranularity(),
14648  pCreateInfo->frameInUseCount,
14649  false, // explicitBlockSize
14650  false); // linearAlgorithm
14651  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14652  // becase minBlockCount is 0.
14653  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14654 
14655  }
14656 }
14657 
14658 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14659 {
14660  VkResult res = VK_SUCCESS;
14661 
14662  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14663  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14664  {
14665 #if VMA_RECORDING_ENABLED
14666  m_pRecorder = vma_new(this, VmaRecorder)();
14667  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14668  if(res != VK_SUCCESS)
14669  {
14670  return res;
14671  }
14672  m_pRecorder->WriteConfiguration(
14673  m_PhysicalDeviceProperties,
14674  m_MemProps,
14675  m_VulkanApiVersion,
14676  m_UseKhrDedicatedAllocation,
14677  m_UseKhrBindMemory2,
14678  m_UseExtMemoryBudget);
14679  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14680 #else
14681  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14682  return VK_ERROR_FEATURE_NOT_PRESENT;
14683 #endif
14684  }
14685 
14686 #if VMA_MEMORY_BUDGET
14687  if(m_UseExtMemoryBudget)
14688  {
14689  UpdateVulkanBudget();
14690  }
14691 #endif // #if VMA_MEMORY_BUDGET
14692 
14693  return res;
14694 }
14695 
14696 VmaAllocator_T::~VmaAllocator_T()
14697 {
14698 #if VMA_RECORDING_ENABLED
14699  if(m_pRecorder != VMA_NULL)
14700  {
14701  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14702  vma_delete(this, m_pRecorder);
14703  }
14704 #endif
14705 
14706  VMA_ASSERT(m_Pools.empty());
14707 
14708  for(size_t i = GetMemoryTypeCount(); i--; )
14709  {
14710  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14711  {
14712  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14713  }
14714 
14715  vma_delete(this, m_pDedicatedAllocations[i]);
14716  vma_delete(this, m_pBlockVectors[i]);
14717  }
14718 }
14719 
14720 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14721 {
14722 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14723  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14724  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14725  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14726  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14727  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14728  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14729  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14730  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14731  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14732  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14733  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14734  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14735  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14736  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14737  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14738  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14739  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14740 #if VMA_VULKAN_VERSION >= 1001000
14741  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14742  {
14743  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14744  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14745  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
14746  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14747  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
14748  m_VulkanFunctions.vkBindBufferMemory2KHR =
14749  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
14750  m_VulkanFunctions.vkBindImageMemory2KHR =
14751  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
14752  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14753  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
14754  }
14755 #endif
14756 #if VMA_DEDICATED_ALLOCATION
14757  if(m_UseKhrDedicatedAllocation)
14758  {
14759  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14760  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14761  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14762  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14763  }
14764 #endif
14765 #if VMA_BIND_MEMORY2
14766  if(m_UseKhrBindMemory2)
14767  {
14768  m_VulkanFunctions.vkBindBufferMemory2KHR =
14769  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14770  m_VulkanFunctions.vkBindImageMemory2KHR =
14771  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14772  }
14773 #endif // #if VMA_BIND_MEMORY2
14774 #if VMA_MEMORY_BUDGET
14775  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14776  {
14777  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14778  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14779  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
14780  }
14781 #endif // #if VMA_MEMORY_BUDGET
14782 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14783 
14784 #define VMA_COPY_IF_NOT_NULL(funcName) \
14785  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14786 
14787  if(pVulkanFunctions != VMA_NULL)
14788  {
14789  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14790  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14791  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14792  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14793  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14794  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14795  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14796  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14797  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14798  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14799  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14800  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14801  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14802  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14803  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14804  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14805  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14806 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14807  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14808  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14809 #endif
14810 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14811  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14812  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14813 #endif
14814 #if VMA_MEMORY_BUDGET
14815  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14816 #endif
14817  }
14818 
14819 #undef VMA_COPY_IF_NOT_NULL
14820 
14821  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14822  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14823  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14824  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14825  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14826  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14827  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14828  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14829  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14830  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14831  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14832  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14833  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14834  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14835  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14836  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14837  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14838  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14839  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14840 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14841  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14842  {
14843  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14844  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14845  }
14846 #endif
14847 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14848  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14849  {
14850  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14851  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14852  }
14853 #endif
14854 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14855  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14856  {
14857  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14858  }
14859 #endif
14860 }
14861 
14862 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14863 {
14864  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14865  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14866  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14867  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14868 }
14869 
14870 VkResult VmaAllocator_T::AllocateMemoryOfType(
14871  VkDeviceSize size,
14872  VkDeviceSize alignment,
14873  bool dedicatedAllocation,
14874  VkBuffer dedicatedBuffer,
14875  VkImage dedicatedImage,
14876  const VmaAllocationCreateInfo& createInfo,
14877  uint32_t memTypeIndex,
14878  VmaSuballocationType suballocType,
14879  size_t allocationCount,
14880  VmaAllocation* pAllocations)
14881 {
14882  VMA_ASSERT(pAllocations != VMA_NULL);
14883  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14884 
14885  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14886 
14887  // If memory type is not HOST_VISIBLE, disable MAPPED.
14888  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14889  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14890  {
14891  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14892  }
14893  // If memory is lazily allocated, it should be always dedicated.
14894  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
14895  {
14897  }
14898 
14899  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14900  VMA_ASSERT(blockVector);
14901 
14902  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14903  bool preferDedicatedMemory =
14904  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14905  dedicatedAllocation ||
14906  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14907  size > preferredBlockSize / 2;
14908 
14909  if(preferDedicatedMemory &&
14910  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14911  finalCreateInfo.pool == VK_NULL_HANDLE)
14912  {
14914  }
14915 
14916  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14917  {
14918  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14919  {
14920  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14921  }
14922  else
14923  {
14924  return AllocateDedicatedMemory(
14925  size,
14926  suballocType,
14927  memTypeIndex,
14928  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14929  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14930  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14931  finalCreateInfo.pUserData,
14932  dedicatedBuffer,
14933  dedicatedImage,
14934  allocationCount,
14935  pAllocations);
14936  }
14937  }
14938  else
14939  {
14940  VkResult res = blockVector->Allocate(
14941  m_CurrentFrameIndex.load(),
14942  size,
14943  alignment,
14944  finalCreateInfo,
14945  suballocType,
14946  allocationCount,
14947  pAllocations);
14948  if(res == VK_SUCCESS)
14949  {
14950  return res;
14951  }
14952 
14953  // 5. Try dedicated memory.
14954  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14955  {
14956  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14957  }
14958  else
14959  {
14960  res = AllocateDedicatedMemory(
14961  size,
14962  suballocType,
14963  memTypeIndex,
14964  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14965  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14966  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14967  finalCreateInfo.pUserData,
14968  dedicatedBuffer,
14969  dedicatedImage,
14970  allocationCount,
14971  pAllocations);
14972  if(res == VK_SUCCESS)
14973  {
14974  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14975  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14976  return VK_SUCCESS;
14977  }
14978  else
14979  {
14980  // Everything failed: Return error code.
14981  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14982  return res;
14983  }
14984  }
14985  }
14986 }
14987 
14988 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14989  VkDeviceSize size,
14990  VmaSuballocationType suballocType,
14991  uint32_t memTypeIndex,
14992  bool withinBudget,
14993  bool map,
14994  bool isUserDataString,
14995  void* pUserData,
14996  VkBuffer dedicatedBuffer,
14997  VkImage dedicatedImage,
14998  size_t allocationCount,
14999  VmaAllocation* pAllocations)
15000 {
15001  VMA_ASSERT(allocationCount > 0 && pAllocations);
15002 
15003  if(withinBudget)
15004  {
15005  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15006  VmaBudget heapBudget = {};
15007  GetBudget(&heapBudget, heapIndex, 1);
15008  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15009  {
15010  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15011  }
15012  }
15013 
15014  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15015  allocInfo.memoryTypeIndex = memTypeIndex;
15016  allocInfo.allocationSize = size;
15017 
15018 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15019  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15020  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15021  {
15022  if(dedicatedBuffer != VK_NULL_HANDLE)
15023  {
15024  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15025  dedicatedAllocInfo.buffer = dedicatedBuffer;
15026  allocInfo.pNext = &dedicatedAllocInfo;
15027  }
15028  else if(dedicatedImage != VK_NULL_HANDLE)
15029  {
15030  dedicatedAllocInfo.image = dedicatedImage;
15031  allocInfo.pNext = &dedicatedAllocInfo;
15032  }
15033  }
15034 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15035 
15036  size_t allocIndex;
15037  VkResult res = VK_SUCCESS;
15038  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15039  {
15040  res = AllocateDedicatedMemoryPage(
15041  size,
15042  suballocType,
15043  memTypeIndex,
15044  allocInfo,
15045  map,
15046  isUserDataString,
15047  pUserData,
15048  pAllocations + allocIndex);
15049  if(res != VK_SUCCESS)
15050  {
15051  break;
15052  }
15053  }
15054 
15055  if(res == VK_SUCCESS)
15056  {
15057  // Register them in m_pDedicatedAllocations.
15058  {
15059  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15060  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15061  VMA_ASSERT(pDedicatedAllocations);
15062  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15063  {
15064  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15065  }
15066  }
15067 
15068  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15069  }
15070  else
15071  {
15072  // Free all already created allocations.
15073  while(allocIndex--)
15074  {
15075  VmaAllocation currAlloc = pAllocations[allocIndex];
15076  VkDeviceMemory hMemory = currAlloc->GetMemory();
15077 
15078  /*
15079  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15080  before vkFreeMemory.
15081 
15082  if(currAlloc->GetMappedData() != VMA_NULL)
15083  {
15084  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15085  }
15086  */
15087 
15088  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15089  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15090  currAlloc->SetUserData(this, VMA_NULL);
15091  currAlloc->Dtor();
15092  m_AllocationObjectAllocator.Free(currAlloc);
15093  }
15094 
15095  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15096  }
15097 
15098  return res;
15099 }
15100 
15101 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15102  VkDeviceSize size,
15103  VmaSuballocationType suballocType,
15104  uint32_t memTypeIndex,
15105  const VkMemoryAllocateInfo& allocInfo,
15106  bool map,
15107  bool isUserDataString,
15108  void* pUserData,
15109  VmaAllocation* pAllocation)
15110 {
15111  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15112  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15113  if(res < 0)
15114  {
15115  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15116  return res;
15117  }
15118 
15119  void* pMappedData = VMA_NULL;
15120  if(map)
15121  {
15122  res = (*m_VulkanFunctions.vkMapMemory)(
15123  m_hDevice,
15124  hMemory,
15125  0,
15126  VK_WHOLE_SIZE,
15127  0,
15128  &pMappedData);
15129  if(res < 0)
15130  {
15131  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15132  FreeVulkanMemory(memTypeIndex, size, hMemory);
15133  return res;
15134  }
15135  }
15136 
15137  *pAllocation = m_AllocationObjectAllocator.Allocate();
15138  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
15139  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15140  (*pAllocation)->SetUserData(this, pUserData);
15141  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15142  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15143  {
15144  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15145  }
15146 
15147  return VK_SUCCESS;
15148 }
15149 
15150 void VmaAllocator_T::GetBufferMemoryRequirements(
15151  VkBuffer hBuffer,
15152  VkMemoryRequirements& memReq,
15153  bool& requiresDedicatedAllocation,
15154  bool& prefersDedicatedAllocation) const
15155 {
15156 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15157  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15158  {
15159  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15160  memReqInfo.buffer = hBuffer;
15161 
15162  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15163 
15164  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15165  memReq2.pNext = &memDedicatedReq;
15166 
15167  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15168 
15169  memReq = memReq2.memoryRequirements;
15170  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15171  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15172  }
15173  else
15174 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15175  {
15176  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15177  requiresDedicatedAllocation = false;
15178  prefersDedicatedAllocation = false;
15179  }
15180 }
15181 
15182 void VmaAllocator_T::GetImageMemoryRequirements(
15183  VkImage hImage,
15184  VkMemoryRequirements& memReq,
15185  bool& requiresDedicatedAllocation,
15186  bool& prefersDedicatedAllocation) const
15187 {
15188 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15189  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15190  {
15191  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15192  memReqInfo.image = hImage;
15193 
15194  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15195 
15196  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15197  memReq2.pNext = &memDedicatedReq;
15198 
15199  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15200 
15201  memReq = memReq2.memoryRequirements;
15202  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15203  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15204  }
15205  else
15206 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15207  {
15208  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15209  requiresDedicatedAllocation = false;
15210  prefersDedicatedAllocation = false;
15211  }
15212 }
15213 
15214 VkResult VmaAllocator_T::AllocateMemory(
15215  const VkMemoryRequirements& vkMemReq,
15216  bool requiresDedicatedAllocation,
15217  bool prefersDedicatedAllocation,
15218  VkBuffer dedicatedBuffer,
15219  VkImage dedicatedImage,
15220  const VmaAllocationCreateInfo& createInfo,
15221  VmaSuballocationType suballocType,
15222  size_t allocationCount,
15223  VmaAllocation* pAllocations)
15224 {
15225  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15226 
15227  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15228 
15229  if(vkMemReq.size == 0)
15230  {
15231  return VK_ERROR_VALIDATION_FAILED_EXT;
15232  }
15233  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15234  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15235  {
15236  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15237  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15238  }
15239  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15241  {
15242  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15243  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15244  }
15245  if(requiresDedicatedAllocation)
15246  {
15247  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15248  {
15249  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15250  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15251  }
15252  if(createInfo.pool != VK_NULL_HANDLE)
15253  {
15254  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15255  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15256  }
15257  }
15258  if((createInfo.pool != VK_NULL_HANDLE) &&
15259  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15260  {
15261  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15262  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15263  }
15264 
15265  if(createInfo.pool != VK_NULL_HANDLE)
15266  {
15267  const VkDeviceSize alignmentForPool = VMA_MAX(
15268  vkMemReq.alignment,
15269  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15270 
15271  VmaAllocationCreateInfo createInfoForPool = createInfo;
15272  // If memory type is not HOST_VISIBLE, disable MAPPED.
15273  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15274  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15275  {
15276  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15277  }
15278 
15279  return createInfo.pool->m_BlockVector.Allocate(
15280  m_CurrentFrameIndex.load(),
15281  vkMemReq.size,
15282  alignmentForPool,
15283  createInfoForPool,
15284  suballocType,
15285  allocationCount,
15286  pAllocations);
15287  }
15288  else
15289  {
15290  // Bit mask of memory Vulkan types acceptable for this allocation.
15291  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15292  uint32_t memTypeIndex = UINT32_MAX;
15293  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15294  if(res == VK_SUCCESS)
15295  {
15296  VkDeviceSize alignmentForMemType = VMA_MAX(
15297  vkMemReq.alignment,
15298  GetMemoryTypeMinAlignment(memTypeIndex));
15299 
15300  res = AllocateMemoryOfType(
15301  vkMemReq.size,
15302  alignmentForMemType,
15303  requiresDedicatedAllocation || prefersDedicatedAllocation,
15304  dedicatedBuffer,
15305  dedicatedImage,
15306  createInfo,
15307  memTypeIndex,
15308  suballocType,
15309  allocationCount,
15310  pAllocations);
15311  // Succeeded on first try.
15312  if(res == VK_SUCCESS)
15313  {
15314  return res;
15315  }
15316  // Allocation from this memory type failed. Try other compatible memory types.
15317  else
15318  {
15319  for(;;)
15320  {
15321  // Remove old memTypeIndex from list of possibilities.
15322  memoryTypeBits &= ~(1u << memTypeIndex);
15323  // Find alternative memTypeIndex.
15324  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15325  if(res == VK_SUCCESS)
15326  {
15327  alignmentForMemType = VMA_MAX(
15328  vkMemReq.alignment,
15329  GetMemoryTypeMinAlignment(memTypeIndex));
15330 
15331  res = AllocateMemoryOfType(
15332  vkMemReq.size,
15333  alignmentForMemType,
15334  requiresDedicatedAllocation || prefersDedicatedAllocation,
15335  dedicatedBuffer,
15336  dedicatedImage,
15337  createInfo,
15338  memTypeIndex,
15339  suballocType,
15340  allocationCount,
15341  pAllocations);
15342  // Allocation from this alternative memory type succeeded.
15343  if(res == VK_SUCCESS)
15344  {
15345  return res;
15346  }
15347  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15348  }
15349  // No other matching memory type index could be found.
15350  else
15351  {
15352  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15353  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15354  }
15355  }
15356  }
15357  }
15358  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15359  else
15360  return res;
15361  }
15362 }
15363 
15364 void VmaAllocator_T::FreeMemory(
15365  size_t allocationCount,
15366  const VmaAllocation* pAllocations)
15367 {
15368  VMA_ASSERT(pAllocations);
15369 
15370  for(size_t allocIndex = allocationCount; allocIndex--; )
15371  {
15372  VmaAllocation allocation = pAllocations[allocIndex];
15373 
15374  if(allocation != VK_NULL_HANDLE)
15375  {
15376  if(TouchAllocation(allocation))
15377  {
15378  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15379  {
15380  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15381  }
15382 
15383  switch(allocation->GetType())
15384  {
15385  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15386  {
15387  VmaBlockVector* pBlockVector = VMA_NULL;
15388  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15389  if(hPool != VK_NULL_HANDLE)
15390  {
15391  pBlockVector = &hPool->m_BlockVector;
15392  }
15393  else
15394  {
15395  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15396  pBlockVector = m_pBlockVectors[memTypeIndex];
15397  }
15398  pBlockVector->Free(allocation);
15399  }
15400  break;
15401  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15402  FreeDedicatedMemory(allocation);
15403  break;
15404  default:
15405  VMA_ASSERT(0);
15406  }
15407  }
15408 
15409  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
15410  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15411  allocation->SetUserData(this, VMA_NULL);
15412  allocation->Dtor();
15413  m_AllocationObjectAllocator.Free(allocation);
15414  }
15415  }
15416 }
15417 
15418 VkResult VmaAllocator_T::ResizeAllocation(
15419  const VmaAllocation alloc,
15420  VkDeviceSize newSize)
15421 {
15422  // This function is deprecated and so it does nothing. It's left for backward compatibility.
15423  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
15424  {
15425  return VK_ERROR_VALIDATION_FAILED_EXT;
15426  }
15427  if(newSize == alloc->GetSize())
15428  {
15429  return VK_SUCCESS;
15430  }
15431  return VK_ERROR_OUT_OF_POOL_MEMORY;
15432 }
15433 
15434 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15435 {
15436  // Initialize.
15437  InitStatInfo(pStats->total);
15438  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15439  InitStatInfo(pStats->memoryType[i]);
15440  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15441  InitStatInfo(pStats->memoryHeap[i]);
15442 
15443  // Process default pools.
15444  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15445  {
15446  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15447  VMA_ASSERT(pBlockVector);
15448  pBlockVector->AddStats(pStats);
15449  }
15450 
15451  // Process custom pools.
15452  {
15453  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15454  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15455  {
15456  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15457  }
15458  }
15459 
15460  // Process dedicated allocations.
15461  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15462  {
15463  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15464  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15465  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15466  VMA_ASSERT(pDedicatedAllocVector);
15467  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15468  {
15469  VmaStatInfo allocationStatInfo;
15470  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15471  VmaAddStatInfo(pStats->total, allocationStatInfo);
15472  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15473  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15474  }
15475  }
15476 
15477  // Postprocess.
15478  VmaPostprocessCalcStatInfo(pStats->total);
15479  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15480  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15481  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15482  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15483 }
15484 
15485 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
15486 {
15487 #if VMA_MEMORY_BUDGET
15488  if(m_UseExtMemoryBudget)
15489  {
15490  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15491  {
15492  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15493  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15494  {
15495  const uint32_t heapIndex = firstHeap + i;
15496 
15497  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15498  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15499 
15500  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15501  {
15502  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
15503  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15504  }
15505  else
15506  {
15507  outBudget->usage = 0;
15508  }
15509 
15510  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15511  outBudget->budget = VMA_MIN(
15512  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15513  }
15514  }
15515  else
15516  {
15517  UpdateVulkanBudget(); // Outside of mutex lock
15518  GetBudget(outBudget, firstHeap, heapCount); // Recursion
15519  }
15520  }
15521  else
15522 #endif
15523  {
15524  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15525  {
15526  const uint32_t heapIndex = firstHeap + i;
15527 
15528  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15529  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15530 
15531  outBudget->usage = outBudget->blockBytes;
15532  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15533  }
15534  }
15535 }
15536 
15537 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15538 
15539 VkResult VmaAllocator_T::DefragmentationBegin(
15540  const VmaDefragmentationInfo2& info,
15541  VmaDefragmentationStats* pStats,
15542  VmaDefragmentationContext* pContext)
15543 {
15544  if(info.pAllocationsChanged != VMA_NULL)
15545  {
15546  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15547  }
15548 
15549  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15550  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15551 
15552  (*pContext)->AddPools(info.poolCount, info.pPools);
15553  (*pContext)->AddAllocations(
15555 
15556  VkResult res = (*pContext)->Defragment(
15559  info.commandBuffer, pStats);
15560 
15561  if(res != VK_NOT_READY)
15562  {
15563  vma_delete(this, *pContext);
15564  *pContext = VMA_NULL;
15565  }
15566 
15567  return res;
15568 }
15569 
15570 VkResult VmaAllocator_T::DefragmentationEnd(
15571  VmaDefragmentationContext context)
15572 {
15573  vma_delete(this, context);
15574  return VK_SUCCESS;
15575 }
15576 
15577 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15578 {
15579  if(hAllocation->CanBecomeLost())
15580  {
15581  /*
15582  Warning: This is a carefully designed algorithm.
15583  Do not modify unless you really know what you're doing :)
15584  */
15585  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15586  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15587  for(;;)
15588  {
15589  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15590  {
15591  pAllocationInfo->memoryType = UINT32_MAX;
15592  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15593  pAllocationInfo->offset = 0;
15594  pAllocationInfo->size = hAllocation->GetSize();
15595  pAllocationInfo->pMappedData = VMA_NULL;
15596  pAllocationInfo->pUserData = hAllocation->GetUserData();
15597  return;
15598  }
15599  else if(localLastUseFrameIndex == localCurrFrameIndex)
15600  {
15601  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15602  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15603  pAllocationInfo->offset = hAllocation->GetOffset();
15604  pAllocationInfo->size = hAllocation->GetSize();
15605  pAllocationInfo->pMappedData = VMA_NULL;
15606  pAllocationInfo->pUserData = hAllocation->GetUserData();
15607  return;
15608  }
15609  else // Last use time earlier than current time.
15610  {
15611  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15612  {
15613  localLastUseFrameIndex = localCurrFrameIndex;
15614  }
15615  }
15616  }
15617  }
15618  else
15619  {
15620 #if VMA_STATS_STRING_ENABLED
15621  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15622  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15623  for(;;)
15624  {
15625  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15626  if(localLastUseFrameIndex == localCurrFrameIndex)
15627  {
15628  break;
15629  }
15630  else // Last use time earlier than current time.
15631  {
15632  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15633  {
15634  localLastUseFrameIndex = localCurrFrameIndex;
15635  }
15636  }
15637  }
15638 #endif
15639 
15640  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15641  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15642  pAllocationInfo->offset = hAllocation->GetOffset();
15643  pAllocationInfo->size = hAllocation->GetSize();
15644  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15645  pAllocationInfo->pUserData = hAllocation->GetUserData();
15646  }
15647 }
15648 
15649 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15650 {
15651  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15652  if(hAllocation->CanBecomeLost())
15653  {
15654  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15655  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15656  for(;;)
15657  {
15658  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15659  {
15660  return false;
15661  }
15662  else if(localLastUseFrameIndex == localCurrFrameIndex)
15663  {
15664  return true;
15665  }
15666  else // Last use time earlier than current time.
15667  {
15668  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15669  {
15670  localLastUseFrameIndex = localCurrFrameIndex;
15671  }
15672  }
15673  }
15674  }
15675  else
15676  {
15677 #if VMA_STATS_STRING_ENABLED
15678  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15679  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15680  for(;;)
15681  {
15682  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15683  if(localLastUseFrameIndex == localCurrFrameIndex)
15684  {
15685  break;
15686  }
15687  else // Last use time earlier than current time.
15688  {
15689  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15690  {
15691  localLastUseFrameIndex = localCurrFrameIndex;
15692  }
15693  }
15694  }
15695 #endif
15696 
15697  return true;
15698  }
15699 }
15700 
15701 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15702 {
15703  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15704 
15705  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15706 
15707  if(newCreateInfo.maxBlockCount == 0)
15708  {
15709  newCreateInfo.maxBlockCount = SIZE_MAX;
15710  }
15711  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15712  {
15713  return VK_ERROR_INITIALIZATION_FAILED;
15714  }
15715 
15716  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15717 
15718  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15719 
15720  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15721  if(res != VK_SUCCESS)
15722  {
15723  vma_delete(this, *pPool);
15724  *pPool = VMA_NULL;
15725  return res;
15726  }
15727 
15728  // Add to m_Pools.
15729  {
15730  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15731  (*pPool)->SetId(m_NextPoolId++);
15732  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15733  }
15734 
15735  return VK_SUCCESS;
15736 }
15737 
15738 void VmaAllocator_T::DestroyPool(VmaPool pool)
15739 {
15740  // Remove from m_Pools.
15741  {
15742  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15743  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15744  VMA_ASSERT(success && "Pool not found in Allocator.");
15745  }
15746 
15747  vma_delete(this, pool);
15748 }
15749 
15750 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15751 {
15752  pool->m_BlockVector.GetPoolStats(pPoolStats);
15753 }
15754 
15755 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15756 {
15757  m_CurrentFrameIndex.store(frameIndex);
15758 
15759 #if VMA_MEMORY_BUDGET
15760  if(m_UseExtMemoryBudget)
15761  {
15762  UpdateVulkanBudget();
15763  }
15764 #endif // #if VMA_MEMORY_BUDGET
15765 }
15766 
15767 void VmaAllocator_T::MakePoolAllocationsLost(
15768  VmaPool hPool,
15769  size_t* pLostAllocationCount)
15770 {
15771  hPool->m_BlockVector.MakePoolAllocationsLost(
15772  m_CurrentFrameIndex.load(),
15773  pLostAllocationCount);
15774 }
15775 
15776 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15777 {
15778  return hPool->m_BlockVector.CheckCorruption();
15779 }
15780 
15781 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15782 {
15783  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15784 
15785  // Process default pools.
15786  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15787  {
15788  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15789  {
15790  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15791  VMA_ASSERT(pBlockVector);
15792  VkResult localRes = pBlockVector->CheckCorruption();
15793  switch(localRes)
15794  {
15795  case VK_ERROR_FEATURE_NOT_PRESENT:
15796  break;
15797  case VK_SUCCESS:
15798  finalRes = VK_SUCCESS;
15799  break;
15800  default:
15801  return localRes;
15802  }
15803  }
15804  }
15805 
15806  // Process custom pools.
15807  {
15808  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15809  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15810  {
15811  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15812  {
15813  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15814  switch(localRes)
15815  {
15816  case VK_ERROR_FEATURE_NOT_PRESENT:
15817  break;
15818  case VK_SUCCESS:
15819  finalRes = VK_SUCCESS;
15820  break;
15821  default:
15822  return localRes;
15823  }
15824  }
15825  }
15826  }
15827 
15828  return finalRes;
15829 }
15830 
15831 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15832 {
15833  *pAllocation = m_AllocationObjectAllocator.Allocate();
15834  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15835  (*pAllocation)->InitLost();
15836 }
15837 
15838 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15839 {
15840  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15841 
15842  // HeapSizeLimit is in effect for this heap.
15843  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15844  {
15845  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15846  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15847  for(;;)
15848  {
15849  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15850  if(blockBytesAfterAllocation > heapSize)
15851  {
15852  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15853  }
15854  if(m_Budget.m_BlockBytes->compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15855  {
15856  break;
15857  }
15858  }
15859  }
15860  else
15861  {
15862  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15863  }
15864 
15865  // VULKAN CALL vkAllocateMemory.
15866  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15867 
15868  if(res == VK_SUCCESS)
15869  {
15870 #if VMA_MEMORY_BUDGET
15871  ++m_Budget.m_OperationsSinceBudgetFetch;
15872 #endif
15873 
15874  // Informative callback.
15875  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15876  {
15877  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15878  }
15879  }
15880  else
15881  {
15882  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15883  }
15884 
15885  return res;
15886 }
15887 
15888 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15889 {
15890  // Informative callback.
15891  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15892  {
15893  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15894  }
15895 
15896  // VULKAN CALL vkFreeMemory.
15897  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15898 
15899  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
15900 }
15901 
15902 VkResult VmaAllocator_T::BindVulkanBuffer(
15903  VkDeviceMemory memory,
15904  VkDeviceSize memoryOffset,
15905  VkBuffer buffer,
15906  const void* pNext)
15907 {
15908  if(pNext != VMA_NULL)
15909  {
15910 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15911  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15912  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15913  {
15914  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15915  bindBufferMemoryInfo.pNext = pNext;
15916  bindBufferMemoryInfo.buffer = buffer;
15917  bindBufferMemoryInfo.memory = memory;
15918  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15919  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15920  }
15921  else
15922 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15923  {
15924  return VK_ERROR_EXTENSION_NOT_PRESENT;
15925  }
15926  }
15927  else
15928  {
15929  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15930  }
15931 }
15932 
15933 VkResult VmaAllocator_T::BindVulkanImage(
15934  VkDeviceMemory memory,
15935  VkDeviceSize memoryOffset,
15936  VkImage image,
15937  const void* pNext)
15938 {
15939  if(pNext != VMA_NULL)
15940  {
15941 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15942  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15943  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15944  {
15945  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15946  bindBufferMemoryInfo.pNext = pNext;
15947  bindBufferMemoryInfo.image = image;
15948  bindBufferMemoryInfo.memory = memory;
15949  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15950  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15951  }
15952  else
15953 #endif // #if VMA_BIND_MEMORY2
15954  {
15955  return VK_ERROR_EXTENSION_NOT_PRESENT;
15956  }
15957  }
15958  else
15959  {
15960  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15961  }
15962 }
15963 
15964 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15965 {
15966  if(hAllocation->CanBecomeLost())
15967  {
15968  return VK_ERROR_MEMORY_MAP_FAILED;
15969  }
15970 
15971  switch(hAllocation->GetType())
15972  {
15973  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15974  {
15975  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15976  char *pBytes = VMA_NULL;
15977  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15978  if(res == VK_SUCCESS)
15979  {
15980  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15981  hAllocation->BlockAllocMap();
15982  }
15983  return res;
15984  }
15985  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15986  return hAllocation->DedicatedAllocMap(this, ppData);
15987  default:
15988  VMA_ASSERT(0);
15989  return VK_ERROR_MEMORY_MAP_FAILED;
15990  }
15991 }
15992 
15993 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15994 {
15995  switch(hAllocation->GetType())
15996  {
15997  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15998  {
15999  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16000  hAllocation->BlockAllocUnmap();
16001  pBlock->Unmap(this, 1);
16002  }
16003  break;
16004  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16005  hAllocation->DedicatedAllocUnmap(this);
16006  break;
16007  default:
16008  VMA_ASSERT(0);
16009  }
16010 }
16011 
16012 VkResult VmaAllocator_T::BindBufferMemory(
16013  VmaAllocation hAllocation,
16014  VkDeviceSize allocationLocalOffset,
16015  VkBuffer hBuffer,
16016  const void* pNext)
16017 {
16018  VkResult res = VK_SUCCESS;
16019  switch(hAllocation->GetType())
16020  {
16021  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16022  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16023  break;
16024  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16025  {
16026  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16027  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16028  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16029  break;
16030  }
16031  default:
16032  VMA_ASSERT(0);
16033  }
16034  return res;
16035 }
16036 
16037 VkResult VmaAllocator_T::BindImageMemory(
16038  VmaAllocation hAllocation,
16039  VkDeviceSize allocationLocalOffset,
16040  VkImage hImage,
16041  const void* pNext)
16042 {
16043  VkResult res = VK_SUCCESS;
16044  switch(hAllocation->GetType())
16045  {
16046  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16047  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16048  break;
16049  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16050  {
16051  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16052  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16053  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16054  break;
16055  }
16056  default:
16057  VMA_ASSERT(0);
16058  }
16059  return res;
16060 }
16061 
16062 void VmaAllocator_T::FlushOrInvalidateAllocation(
16063  VmaAllocation hAllocation,
16064  VkDeviceSize offset, VkDeviceSize size,
16065  VMA_CACHE_OPERATION op)
16066 {
16067  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16068  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16069  {
16070  const VkDeviceSize allocationSize = hAllocation->GetSize();
16071  VMA_ASSERT(offset <= allocationSize);
16072 
16073  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16074 
16075  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16076  memRange.memory = hAllocation->GetMemory();
16077 
16078  switch(hAllocation->GetType())
16079  {
16080  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16081  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16082  if(size == VK_WHOLE_SIZE)
16083  {
16084  memRange.size = allocationSize - memRange.offset;
16085  }
16086  else
16087  {
16088  VMA_ASSERT(offset + size <= allocationSize);
16089  memRange.size = VMA_MIN(
16090  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16091  allocationSize - memRange.offset);
16092  }
16093  break;
16094 
16095  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16096  {
16097  // 1. Still within this allocation.
16098  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16099  if(size == VK_WHOLE_SIZE)
16100  {
16101  size = allocationSize - offset;
16102  }
16103  else
16104  {
16105  VMA_ASSERT(offset + size <= allocationSize);
16106  }
16107  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16108 
16109  // 2. Adjust to whole block.
16110  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16111  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16112  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16113  memRange.offset += allocationOffset;
16114  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16115 
16116  break;
16117  }
16118 
16119  default:
16120  VMA_ASSERT(0);
16121  }
16122 
16123  switch(op)
16124  {
16125  case VMA_CACHE_FLUSH:
16126  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16127  break;
16128  case VMA_CACHE_INVALIDATE:
16129  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16130  break;
16131  default:
16132  VMA_ASSERT(0);
16133  }
16134  }
16135  // else: Just ignore this call.
16136 }
16137 
16138 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16139 {
16140  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16141 
16142  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16143  {
16144  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16145  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16146  VMA_ASSERT(pDedicatedAllocations);
16147  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16148  VMA_ASSERT(success);
16149  }
16150 
16151  VkDeviceMemory hMemory = allocation->GetMemory();
16152 
16153  /*
16154  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16155  before vkFreeMemory.
16156 
16157  if(allocation->GetMappedData() != VMA_NULL)
16158  {
16159  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16160  }
16161  */
16162 
16163  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16164 
16165  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16166 }
16167 
16168 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16169 {
16170  VkBufferCreateInfo dummyBufCreateInfo;
16171  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16172 
16173  uint32_t memoryTypeBits = 0;
16174 
16175  // Create buffer.
16176  VkBuffer buf = VK_NULL_HANDLE;
16177  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16178  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16179  if(res == VK_SUCCESS)
16180  {
16181  // Query for supported memory types.
16182  VkMemoryRequirements memReq;
16183  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16184  memoryTypeBits = memReq.memoryTypeBits;
16185 
16186  // Destroy buffer.
16187  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16188  }
16189 
16190  return memoryTypeBits;
16191 }
16192 
16193 #if VMA_MEMORY_BUDGET
16194 
16195 void VmaAllocator_T::UpdateVulkanBudget()
16196 {
16197  VMA_ASSERT(m_UseExtMemoryBudget);
16198 
16199  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16200 
16201  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16202  memProps.pNext = &budgetProps;
16203 
16204  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16205 
16206  {
16207  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16208 
16209  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16210  {
16211  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16212  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16213  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16214  }
16215  m_Budget.m_OperationsSinceBudgetFetch = 0;
16216  }
16217 }
16218 
16219 #endif // #if VMA_MEMORY_BUDGET
16220 
16221 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16222 {
16223  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16224  !hAllocation->CanBecomeLost() &&
16225  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16226  {
16227  void* pData = VMA_NULL;
16228  VkResult res = Map(hAllocation, &pData);
16229  if(res == VK_SUCCESS)
16230  {
16231  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16232  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16233  Unmap(hAllocation);
16234  }
16235  else
16236  {
16237  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16238  }
16239  }
16240 }
16241 
16242 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16243 {
16244  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16245  if(memoryTypeBits == UINT32_MAX)
16246  {
16247  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16248  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16249  }
16250  return memoryTypeBits;
16251 }
16252 
16253 #if VMA_STATS_STRING_ENABLED
16254 
16255 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16256 {
16257  bool dedicatedAllocationsStarted = false;
16258  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16259  {
16260  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16261  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16262  VMA_ASSERT(pDedicatedAllocVector);
16263  if(pDedicatedAllocVector->empty() == false)
16264  {
16265  if(dedicatedAllocationsStarted == false)
16266  {
16267  dedicatedAllocationsStarted = true;
16268  json.WriteString("DedicatedAllocations");
16269  json.BeginObject();
16270  }
16271 
16272  json.BeginString("Type ");
16273  json.ContinueString(memTypeIndex);
16274  json.EndString();
16275 
16276  json.BeginArray();
16277 
16278  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16279  {
16280  json.BeginObject(true);
16281  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16282  hAlloc->PrintParameters(json);
16283  json.EndObject();
16284  }
16285 
16286  json.EndArray();
16287  }
16288  }
16289  if(dedicatedAllocationsStarted)
16290  {
16291  json.EndObject();
16292  }
16293 
16294  {
16295  bool allocationsStarted = false;
16296  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16297  {
16298  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16299  {
16300  if(allocationsStarted == false)
16301  {
16302  allocationsStarted = true;
16303  json.WriteString("DefaultPools");
16304  json.BeginObject();
16305  }
16306 
16307  json.BeginString("Type ");
16308  json.ContinueString(memTypeIndex);
16309  json.EndString();
16310 
16311  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16312  }
16313  }
16314  if(allocationsStarted)
16315  {
16316  json.EndObject();
16317  }
16318  }
16319 
16320  // Custom pools
16321  {
16322  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16323  const size_t poolCount = m_Pools.size();
16324  if(poolCount > 0)
16325  {
16326  json.WriteString("Pools");
16327  json.BeginObject();
16328  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16329  {
16330  json.BeginString();
16331  json.ContinueString(m_Pools[poolIndex]->GetId());
16332  json.EndString();
16333 
16334  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16335  }
16336  json.EndObject();
16337  }
16338  }
16339 }
16340 
16341 #endif // #if VMA_STATS_STRING_ENABLED
16342 
16344 // Public interface
16345 
16346 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16347  const VmaAllocatorCreateInfo* pCreateInfo,
16348  VmaAllocator* pAllocator)
16349 {
16350  VMA_ASSERT(pCreateInfo && pAllocator);
16351  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16352  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
16353  VMA_DEBUG_LOG("vmaCreateAllocator");
16354  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16355  return (*pAllocator)->Init(pCreateInfo);
16356 }
16357 
16358 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16359  VmaAllocator allocator)
16360 {
16361  if(allocator != VK_NULL_HANDLE)
16362  {
16363  VMA_DEBUG_LOG("vmaDestroyAllocator");
16364  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
16365  vma_delete(&allocationCallbacks, allocator);
16366  }
16367 }
16368 
16369 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16370  VmaAllocator allocator,
16371  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16372 {
16373  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16374  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16375 }
16376 
16377 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16378  VmaAllocator allocator,
16379  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16380 {
16381  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16382  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16383 }
16384 
16385 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16386  VmaAllocator allocator,
16387  uint32_t memoryTypeIndex,
16388  VkMemoryPropertyFlags* pFlags)
16389 {
16390  VMA_ASSERT(allocator && pFlags);
16391  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16392  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16393 }
16394 
16395 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16396  VmaAllocator allocator,
16397  uint32_t frameIndex)
16398 {
16399  VMA_ASSERT(allocator);
16400  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
16401 
16402  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16403 
16404  allocator->SetCurrentFrameIndex(frameIndex);
16405 }
16406 
16407 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
16408  VmaAllocator allocator,
16409  VmaStats* pStats)
16410 {
16411  VMA_ASSERT(allocator && pStats);
16412  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16413  allocator->CalculateStats(pStats);
16414 }
16415 
16416 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
16417  VmaAllocator allocator,
16418  VmaBudget* pBudget)
16419 {
16420  VMA_ASSERT(allocator && pBudget);
16421  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16422  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
16423 }
16424 
16425 #if VMA_STATS_STRING_ENABLED
16426 
16427 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16428  VmaAllocator allocator,
16429  char** ppStatsString,
16430  VkBool32 detailedMap)
16431 {
16432  VMA_ASSERT(allocator && ppStatsString);
16433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16434 
16435  VmaStringBuilder sb(allocator);
16436  {
16437  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16438  json.BeginObject();
16439 
16440  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
16441  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
16442 
16443  VmaStats stats;
16444  allocator->CalculateStats(&stats);
16445 
16446  json.WriteString("Total");
16447  VmaPrintStatInfo(json, stats.total);
16448 
16449  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16450  {
16451  json.BeginString("Heap ");
16452  json.ContinueString(heapIndex);
16453  json.EndString();
16454  json.BeginObject();
16455 
16456  json.WriteString("Size");
16457  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
16458 
16459  json.WriteString("Flags");
16460  json.BeginArray(true);
16461  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
16462  {
16463  json.WriteString("DEVICE_LOCAL");
16464  }
16465  json.EndArray();
16466 
16467  json.WriteString("Budget");
16468  json.BeginObject();
16469  {
16470  json.WriteString("BlockBytes");
16471  json.WriteNumber(budget[heapIndex].blockBytes);
16472  json.WriteString("AllocationBytes");
16473  json.WriteNumber(budget[heapIndex].allocationBytes);
16474  json.WriteString("Usage");
16475  json.WriteNumber(budget[heapIndex].usage);
16476  json.WriteString("Budget");
16477  json.WriteNumber(budget[heapIndex].budget);
16478  }
16479  json.EndObject();
16480 
16481  if(stats.memoryHeap[heapIndex].blockCount > 0)
16482  {
16483  json.WriteString("Stats");
16484  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
16485  }
16486 
16487  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16488  {
16489  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16490  {
16491  json.BeginString("Type ");
16492  json.ContinueString(typeIndex);
16493  json.EndString();
16494 
16495  json.BeginObject();
16496 
16497  json.WriteString("Flags");
16498  json.BeginArray(true);
16499  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16500  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
16501  {
16502  json.WriteString("DEVICE_LOCAL");
16503  }
16504  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16505  {
16506  json.WriteString("HOST_VISIBLE");
16507  }
16508  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
16509  {
16510  json.WriteString("HOST_COHERENT");
16511  }
16512  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
16513  {
16514  json.WriteString("HOST_CACHED");
16515  }
16516  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
16517  {
16518  json.WriteString("LAZILY_ALLOCATED");
16519  }
16520  json.EndArray();
16521 
16522  if(stats.memoryType[typeIndex].blockCount > 0)
16523  {
16524  json.WriteString("Stats");
16525  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
16526  }
16527 
16528  json.EndObject();
16529  }
16530  }
16531 
16532  json.EndObject();
16533  }
16534  if(detailedMap == VK_TRUE)
16535  {
16536  allocator->PrintDetailedMap(json);
16537  }
16538 
16539  json.EndObject();
16540  }
16541 
16542  const size_t len = sb.GetLength();
16543  char* const pChars = vma_new_array(allocator, char, len + 1);
16544  if(len > 0)
16545  {
16546  memcpy(pChars, sb.GetData(), len);
16547  }
16548  pChars[len] = '\0';
16549  *ppStatsString = pChars;
16550 }
16551 
16552 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
16553  VmaAllocator allocator,
16554  char* pStatsString)
16555 {
16556  if(pStatsString != VMA_NULL)
16557  {
16558  VMA_ASSERT(allocator);
16559  size_t len = strlen(pStatsString);
16560  vma_delete_array(allocator, pStatsString, len + 1);
16561  }
16562 }
16563 
16564 #endif // #if VMA_STATS_STRING_ENABLED
16565 
16566 /*
16567 This function is not protected by any mutex because it just reads immutable data.
16568 */
16569 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
16570  VmaAllocator allocator,
16571  uint32_t memoryTypeBits,
16572  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16573  uint32_t* pMemoryTypeIndex)
16574 {
16575  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16576  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16577  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16578 
16579  if(pAllocationCreateInfo->memoryTypeBits != 0)
16580  {
16581  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
16582  }
16583 
16584  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
16585  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
16586  uint32_t notPreferredFlags = 0;
16587 
16588  // Convert usage to requiredFlags and preferredFlags.
16589  switch(pAllocationCreateInfo->usage)
16590  {
16592  break;
16594  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16595  {
16596  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16597  }
16598  break;
16600  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
16601  break;
16603  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16604  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16605  {
16606  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16607  }
16608  break;
16610  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16611  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16612  break;
16614  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16615  break;
16617  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
16618  break;
16619  default:
16620  VMA_ASSERT(0);
16621  break;
16622  }
16623 
16624  *pMemoryTypeIndex = UINT32_MAX;
16625  uint32_t minCost = UINT32_MAX;
16626  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16627  memTypeIndex < allocator->GetMemoryTypeCount();
16628  ++memTypeIndex, memTypeBit <<= 1)
16629  {
16630  // This memory type is acceptable according to memoryTypeBits bitmask.
16631  if((memTypeBit & memoryTypeBits) != 0)
16632  {
16633  const VkMemoryPropertyFlags currFlags =
16634  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16635  // This memory type contains requiredFlags.
16636  if((requiredFlags & ~currFlags) == 0)
16637  {
16638  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16639  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
16640  VmaCountBitsSet(currFlags & notPreferredFlags);
16641  // Remember memory type with lowest cost.
16642  if(currCost < minCost)
16643  {
16644  *pMemoryTypeIndex = memTypeIndex;
16645  if(currCost == 0)
16646  {
16647  return VK_SUCCESS;
16648  }
16649  minCost = currCost;
16650  }
16651  }
16652  }
16653  }
16654  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16655 }
16656 
16657 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
16658  VmaAllocator allocator,
16659  const VkBufferCreateInfo* pBufferCreateInfo,
16660  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16661  uint32_t* pMemoryTypeIndex)
16662 {
16663  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16664  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16665  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16666  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16667 
16668  const VkDevice hDev = allocator->m_hDevice;
16669  VkBuffer hBuffer = VK_NULL_HANDLE;
16670  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16671  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16672  if(res == VK_SUCCESS)
16673  {
16674  VkMemoryRequirements memReq = {};
16675  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16676  hDev, hBuffer, &memReq);
16677 
16678  res = vmaFindMemoryTypeIndex(
16679  allocator,
16680  memReq.memoryTypeBits,
16681  pAllocationCreateInfo,
16682  pMemoryTypeIndex);
16683 
16684  allocator->GetVulkanFunctions().vkDestroyBuffer(
16685  hDev, hBuffer, allocator->GetAllocationCallbacks());
16686  }
16687  return res;
16688 }
16689 
16690 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
16691  VmaAllocator allocator,
16692  const VkImageCreateInfo* pImageCreateInfo,
16693  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16694  uint32_t* pMemoryTypeIndex)
16695 {
16696  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16697  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16698  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16699  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16700 
16701  const VkDevice hDev = allocator->m_hDevice;
16702  VkImage hImage = VK_NULL_HANDLE;
16703  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16704  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16705  if(res == VK_SUCCESS)
16706  {
16707  VkMemoryRequirements memReq = {};
16708  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16709  hDev, hImage, &memReq);
16710 
16711  res = vmaFindMemoryTypeIndex(
16712  allocator,
16713  memReq.memoryTypeBits,
16714  pAllocationCreateInfo,
16715  pMemoryTypeIndex);
16716 
16717  allocator->GetVulkanFunctions().vkDestroyImage(
16718  hDev, hImage, allocator->GetAllocationCallbacks());
16719  }
16720  return res;
16721 }
16722 
16723 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
16724  VmaAllocator allocator,
16725  const VmaPoolCreateInfo* pCreateInfo,
16726  VmaPool* pPool)
16727 {
16728  VMA_ASSERT(allocator && pCreateInfo && pPool);
16729 
16730  VMA_DEBUG_LOG("vmaCreatePool");
16731 
16732  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16733 
16734  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16735 
16736 #if VMA_RECORDING_ENABLED
16737  if(allocator->GetRecorder() != VMA_NULL)
16738  {
16739  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16740  }
16741 #endif
16742 
16743  return res;
16744 }
16745 
16746 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
16747  VmaAllocator allocator,
16748  VmaPool pool)
16749 {
16750  VMA_ASSERT(allocator);
16751 
16752  if(pool == VK_NULL_HANDLE)
16753  {
16754  return;
16755  }
16756 
16757  VMA_DEBUG_LOG("vmaDestroyPool");
16758 
16759  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16760 
16761 #if VMA_RECORDING_ENABLED
16762  if(allocator->GetRecorder() != VMA_NULL)
16763  {
16764  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16765  }
16766 #endif
16767 
16768  allocator->DestroyPool(pool);
16769 }
16770 
16771 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
16772  VmaAllocator allocator,
16773  VmaPool pool,
16774  VmaPoolStats* pPoolStats)
16775 {
16776  VMA_ASSERT(allocator && pool && pPoolStats);
16777 
16778  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16779 
16780  allocator->GetPoolStats(pool, pPoolStats);
16781 }
16782 
16783 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
16784  VmaAllocator allocator,
16785  VmaPool pool,
16786  size_t* pLostAllocationCount)
16787 {
16788  VMA_ASSERT(allocator && pool);
16789 
16790  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16791 
16792 #if VMA_RECORDING_ENABLED
16793  if(allocator->GetRecorder() != VMA_NULL)
16794  {
16795  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16796  }
16797 #endif
16798 
16799  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16800 }
16801 
16802 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16803 {
16804  VMA_ASSERT(allocator && pool);
16805 
16806  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16807 
16808  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16809 
16810  return allocator->CheckPoolCorruption(pool);
16811 }
16812 
16813 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
16814  VmaAllocator allocator,
16815  VmaPool pool,
16816  const char** ppName)
16817 {
16818  VMA_ASSERT(allocator && pool);
16819 
16820  VMA_DEBUG_LOG("vmaGetPoolName");
16821 
16822  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16823 
16824  *ppName = pool->GetName();
16825 }
16826 
16827 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
16828  VmaAllocator allocator,
16829  VmaPool pool,
16830  const char* pName)
16831 {
16832  VMA_ASSERT(allocator && pool);
16833 
16834  VMA_DEBUG_LOG("vmaSetPoolName");
16835 
16836  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16837 
16838  pool->SetName(pName);
16839 
16840 #if VMA_RECORDING_ENABLED
16841  if(allocator->GetRecorder() != VMA_NULL)
16842  {
16843  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
16844  }
16845 #endif
16846 }
16847 
16848 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
16849  VmaAllocator allocator,
16850  const VkMemoryRequirements* pVkMemoryRequirements,
16851  const VmaAllocationCreateInfo* pCreateInfo,
16852  VmaAllocation* pAllocation,
16853  VmaAllocationInfo* pAllocationInfo)
16854 {
16855  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16856 
16857  VMA_DEBUG_LOG("vmaAllocateMemory");
16858 
16859  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16860 
16861  VkResult result = allocator->AllocateMemory(
16862  *pVkMemoryRequirements,
16863  false, // requiresDedicatedAllocation
16864  false, // prefersDedicatedAllocation
16865  VK_NULL_HANDLE, // dedicatedBuffer
16866  VK_NULL_HANDLE, // dedicatedImage
16867  *pCreateInfo,
16868  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16869  1, // allocationCount
16870  pAllocation);
16871 
16872 #if VMA_RECORDING_ENABLED
16873  if(allocator->GetRecorder() != VMA_NULL)
16874  {
16875  allocator->GetRecorder()->RecordAllocateMemory(
16876  allocator->GetCurrentFrameIndex(),
16877  *pVkMemoryRequirements,
16878  *pCreateInfo,
16879  *pAllocation);
16880  }
16881 #endif
16882 
16883  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16884  {
16885  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16886  }
16887 
16888  return result;
16889 }
16890 
16891 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
16892  VmaAllocator allocator,
16893  const VkMemoryRequirements* pVkMemoryRequirements,
16894  const VmaAllocationCreateInfo* pCreateInfo,
16895  size_t allocationCount,
16896  VmaAllocation* pAllocations,
16897  VmaAllocationInfo* pAllocationInfo)
16898 {
16899  if(allocationCount == 0)
16900  {
16901  return VK_SUCCESS;
16902  }
16903 
16904  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16905 
16906  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16907 
16908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16909 
16910  VkResult result = allocator->AllocateMemory(
16911  *pVkMemoryRequirements,
16912  false, // requiresDedicatedAllocation
16913  false, // prefersDedicatedAllocation
16914  VK_NULL_HANDLE, // dedicatedBuffer
16915  VK_NULL_HANDLE, // dedicatedImage
16916  *pCreateInfo,
16917  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16918  allocationCount,
16919  pAllocations);
16920 
16921 #if VMA_RECORDING_ENABLED
16922  if(allocator->GetRecorder() != VMA_NULL)
16923  {
16924  allocator->GetRecorder()->RecordAllocateMemoryPages(
16925  allocator->GetCurrentFrameIndex(),
16926  *pVkMemoryRequirements,
16927  *pCreateInfo,
16928  (uint64_t)allocationCount,
16929  pAllocations);
16930  }
16931 #endif
16932 
16933  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16934  {
16935  for(size_t i = 0; i < allocationCount; ++i)
16936  {
16937  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16938  }
16939  }
16940 
16941  return result;
16942 }
16943 
16944 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
16945  VmaAllocator allocator,
16946  VkBuffer buffer,
16947  const VmaAllocationCreateInfo* pCreateInfo,
16948  VmaAllocation* pAllocation,
16949  VmaAllocationInfo* pAllocationInfo)
16950 {
16951  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16952 
16953  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16954 
16955  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16956 
16957  VkMemoryRequirements vkMemReq = {};
16958  bool requiresDedicatedAllocation = false;
16959  bool prefersDedicatedAllocation = false;
16960  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16961  requiresDedicatedAllocation,
16962  prefersDedicatedAllocation);
16963 
16964  VkResult result = allocator->AllocateMemory(
16965  vkMemReq,
16966  requiresDedicatedAllocation,
16967  prefersDedicatedAllocation,
16968  buffer, // dedicatedBuffer
16969  VK_NULL_HANDLE, // dedicatedImage
16970  *pCreateInfo,
16971  VMA_SUBALLOCATION_TYPE_BUFFER,
16972  1, // allocationCount
16973  pAllocation);
16974 
16975 #if VMA_RECORDING_ENABLED
16976  if(allocator->GetRecorder() != VMA_NULL)
16977  {
16978  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16979  allocator->GetCurrentFrameIndex(),
16980  vkMemReq,
16981  requiresDedicatedAllocation,
16982  prefersDedicatedAllocation,
16983  *pCreateInfo,
16984  *pAllocation);
16985  }
16986 #endif
16987 
16988  if(pAllocationInfo && result == VK_SUCCESS)
16989  {
16990  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16991  }
16992 
16993  return result;
16994 }
16995 
16996 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
16997  VmaAllocator allocator,
16998  VkImage image,
16999  const VmaAllocationCreateInfo* pCreateInfo,
17000  VmaAllocation* pAllocation,
17001  VmaAllocationInfo* pAllocationInfo)
17002 {
17003  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17004 
17005  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17006 
17007  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17008 
17009  VkMemoryRequirements vkMemReq = {};
17010  bool requiresDedicatedAllocation = false;
17011  bool prefersDedicatedAllocation = false;
17012  allocator->GetImageMemoryRequirements(image, vkMemReq,
17013  requiresDedicatedAllocation, prefersDedicatedAllocation);
17014 
17015  VkResult result = allocator->AllocateMemory(
17016  vkMemReq,
17017  requiresDedicatedAllocation,
17018  prefersDedicatedAllocation,
17019  VK_NULL_HANDLE, // dedicatedBuffer
17020  image, // dedicatedImage
17021  *pCreateInfo,
17022  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17023  1, // allocationCount
17024  pAllocation);
17025 
17026 #if VMA_RECORDING_ENABLED
17027  if(allocator->GetRecorder() != VMA_NULL)
17028  {
17029  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17030  allocator->GetCurrentFrameIndex(),
17031  vkMemReq,
17032  requiresDedicatedAllocation,
17033  prefersDedicatedAllocation,
17034  *pCreateInfo,
17035  *pAllocation);
17036  }
17037 #endif
17038 
17039  if(pAllocationInfo && result == VK_SUCCESS)
17040  {
17041  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17042  }
17043 
17044  return result;
17045 }
17046 
17047 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17048  VmaAllocator allocator,
17049  VmaAllocation allocation)
17050 {
17051  VMA_ASSERT(allocator);
17052 
17053  if(allocation == VK_NULL_HANDLE)
17054  {
17055  return;
17056  }
17057 
17058  VMA_DEBUG_LOG("vmaFreeMemory");
17059 
17060  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17061 
17062 #if VMA_RECORDING_ENABLED
17063  if(allocator->GetRecorder() != VMA_NULL)
17064  {
17065  allocator->GetRecorder()->RecordFreeMemory(
17066  allocator->GetCurrentFrameIndex(),
17067  allocation);
17068  }
17069 #endif
17070 
17071  allocator->FreeMemory(
17072  1, // allocationCount
17073  &allocation);
17074 }
17075 
17076 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17077  VmaAllocator allocator,
17078  size_t allocationCount,
17079  VmaAllocation* pAllocations)
17080 {
17081  if(allocationCount == 0)
17082  {
17083  return;
17084  }
17085 
17086  VMA_ASSERT(allocator);
17087 
17088  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17089 
17090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17091 
17092 #if VMA_RECORDING_ENABLED
17093  if(allocator->GetRecorder() != VMA_NULL)
17094  {
17095  allocator->GetRecorder()->RecordFreeMemoryPages(
17096  allocator->GetCurrentFrameIndex(),
17097  (uint64_t)allocationCount,
17098  pAllocations);
17099  }
17100 #endif
17101 
17102  allocator->FreeMemory(allocationCount, pAllocations);
17103 }
17104 
17105 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17106  VmaAllocator allocator,
17107  VmaAllocation allocation,
17108  VkDeviceSize newSize)
17109 {
17110  VMA_ASSERT(allocator && allocation);
17111 
17112  VMA_DEBUG_LOG("vmaResizeAllocation");
17113 
17114  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17115 
17116  return allocator->ResizeAllocation(allocation, newSize);
17117 }
17118 
17119 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17120  VmaAllocator allocator,
17121  VmaAllocation allocation,
17122  VmaAllocationInfo* pAllocationInfo)
17123 {
17124  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17125 
17126  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17127 
17128 #if VMA_RECORDING_ENABLED
17129  if(allocator->GetRecorder() != VMA_NULL)
17130  {
17131  allocator->GetRecorder()->RecordGetAllocationInfo(
17132  allocator->GetCurrentFrameIndex(),
17133  allocation);
17134  }
17135 #endif
17136 
17137  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17138 }
17139 
17140 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17141  VmaAllocator allocator,
17142  VmaAllocation allocation)
17143 {
17144  VMA_ASSERT(allocator && allocation);
17145 
17146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17147 
17148 #if VMA_RECORDING_ENABLED
17149  if(allocator->GetRecorder() != VMA_NULL)
17150  {
17151  allocator->GetRecorder()->RecordTouchAllocation(
17152  allocator->GetCurrentFrameIndex(),
17153  allocation);
17154  }
17155 #endif
17156 
17157  return allocator->TouchAllocation(allocation);
17158 }
17159 
17160 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17161  VmaAllocator allocator,
17162  VmaAllocation allocation,
17163  void* pUserData)
17164 {
17165  VMA_ASSERT(allocator && allocation);
17166 
17167  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17168 
17169  allocation->SetUserData(allocator, pUserData);
17170 
17171 #if VMA_RECORDING_ENABLED
17172  if(allocator->GetRecorder() != VMA_NULL)
17173  {
17174  allocator->GetRecorder()->RecordSetAllocationUserData(
17175  allocator->GetCurrentFrameIndex(),
17176  allocation,
17177  pUserData);
17178  }
17179 #endif
17180 }
17181 
17182 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17183  VmaAllocator allocator,
17184  VmaAllocation* pAllocation)
17185 {
17186  VMA_ASSERT(allocator && pAllocation);
17187 
17188  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17189 
17190  allocator->CreateLostAllocation(pAllocation);
17191 
17192 #if VMA_RECORDING_ENABLED
17193  if(allocator->GetRecorder() != VMA_NULL)
17194  {
17195  allocator->GetRecorder()->RecordCreateLostAllocation(
17196  allocator->GetCurrentFrameIndex(),
17197  *pAllocation);
17198  }
17199 #endif
17200 }
17201 
17202 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17203  VmaAllocator allocator,
17204  VmaAllocation allocation,
17205  void** ppData)
17206 {
17207  VMA_ASSERT(allocator && allocation && ppData);
17208 
17209  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17210 
17211  VkResult res = allocator->Map(allocation, ppData);
17212 
17213 #if VMA_RECORDING_ENABLED
17214  if(allocator->GetRecorder() != VMA_NULL)
17215  {
17216  allocator->GetRecorder()->RecordMapMemory(
17217  allocator->GetCurrentFrameIndex(),
17218  allocation);
17219  }
17220 #endif
17221 
17222  return res;
17223 }
17224 
17225 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17226  VmaAllocator allocator,
17227  VmaAllocation allocation)
17228 {
17229  VMA_ASSERT(allocator && allocation);
17230 
17231  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17232 
17233 #if VMA_RECORDING_ENABLED
17234  if(allocator->GetRecorder() != VMA_NULL)
17235  {
17236  allocator->GetRecorder()->RecordUnmapMemory(
17237  allocator->GetCurrentFrameIndex(),
17238  allocation);
17239  }
17240 #endif
17241 
17242  allocator->Unmap(allocation);
17243 }
17244 
17245 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17246 {
17247  VMA_ASSERT(allocator && allocation);
17248 
17249  VMA_DEBUG_LOG("vmaFlushAllocation");
17250 
17251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17252 
17253  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17254 
17255 #if VMA_RECORDING_ENABLED
17256  if(allocator->GetRecorder() != VMA_NULL)
17257  {
17258  allocator->GetRecorder()->RecordFlushAllocation(
17259  allocator->GetCurrentFrameIndex(),
17260  allocation, offset, size);
17261  }
17262 #endif
17263 }
17264 
17265 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17266 {
17267  VMA_ASSERT(allocator && allocation);
17268 
17269  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17270 
17271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17272 
17273  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17274 
17275 #if VMA_RECORDING_ENABLED
17276  if(allocator->GetRecorder() != VMA_NULL)
17277  {
17278  allocator->GetRecorder()->RecordInvalidateAllocation(
17279  allocator->GetCurrentFrameIndex(),
17280  allocation, offset, size);
17281  }
17282 #endif
17283 }
17284 
17285 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17286 {
17287  VMA_ASSERT(allocator);
17288 
17289  VMA_DEBUG_LOG("vmaCheckCorruption");
17290 
17291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17292 
17293  return allocator->CheckCorruption(memoryTypeBits);
17294 }
17295 
17296 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17297  VmaAllocator allocator,
17298  VmaAllocation* pAllocations,
17299  size_t allocationCount,
17300  VkBool32* pAllocationsChanged,
17301  const VmaDefragmentationInfo *pDefragmentationInfo,
17302  VmaDefragmentationStats* pDefragmentationStats)
17303 {
17304  // Deprecated interface, reimplemented using new one.
17305 
17306  VmaDefragmentationInfo2 info2 = {};
17307  info2.allocationCount = (uint32_t)allocationCount;
17308  info2.pAllocations = pAllocations;
17309  info2.pAllocationsChanged = pAllocationsChanged;
17310  if(pDefragmentationInfo != VMA_NULL)
17311  {
17312  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17313  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17314  }
17315  else
17316  {
17317  info2.maxCpuAllocationsToMove = UINT32_MAX;
17318  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
17319  }
17320  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
17321 
17323  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
17324  if(res == VK_NOT_READY)
17325  {
17326  res = vmaDefragmentationEnd( allocator, ctx);
17327  }
17328  return res;
17329 }
17330 
17331 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
17332  VmaAllocator allocator,
17333  const VmaDefragmentationInfo2* pInfo,
17334  VmaDefragmentationStats* pStats,
17335  VmaDefragmentationContext *pContext)
17336 {
17337  VMA_ASSERT(allocator && pInfo && pContext);
17338 
17339  // Degenerate case: Nothing to defragment.
17340  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
17341  {
17342  return VK_SUCCESS;
17343  }
17344 
17345  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
17346  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
17347  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
17348  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17349 
17350  VMA_DEBUG_LOG("vmaDefragmentationBegin");
17351 
17352  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17353 
17354  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
17355 
17356 #if VMA_RECORDING_ENABLED
17357  if(allocator->GetRecorder() != VMA_NULL)
17358  {
17359  allocator->GetRecorder()->RecordDefragmentationBegin(
17360  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
17361  }
17362 #endif
17363 
17364  return res;
17365 }
17366 
17367 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
17368  VmaAllocator allocator,
17369  VmaDefragmentationContext context)
17370 {
17371  VMA_ASSERT(allocator);
17372 
17373  VMA_DEBUG_LOG("vmaDefragmentationEnd");
17374 
17375  if(context != VK_NULL_HANDLE)
17376  {
17377  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17378 
17379 #if VMA_RECORDING_ENABLED
17380  if(allocator->GetRecorder() != VMA_NULL)
17381  {
17382  allocator->GetRecorder()->RecordDefragmentationEnd(
17383  allocator->GetCurrentFrameIndex(), context);
17384  }
17385 #endif
17386 
17387  return allocator->DefragmentationEnd(context);
17388  }
17389  else
17390  {
17391  return VK_SUCCESS;
17392  }
17393 }
17394 
17395 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
17396  VmaAllocator allocator,
17397  VmaAllocation allocation,
17398  VkBuffer buffer)
17399 {
17400  VMA_ASSERT(allocator && allocation && buffer);
17401 
17402  VMA_DEBUG_LOG("vmaBindBufferMemory");
17403 
17404  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17405 
17406  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17407 }
17408 
17409 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
17410  VmaAllocator allocator,
17411  VmaAllocation allocation,
17412  VkDeviceSize allocationLocalOffset,
17413  VkBuffer buffer,
17414  const void* pNext)
17415 {
17416  VMA_ASSERT(allocator && allocation && buffer);
17417 
17418  VMA_DEBUG_LOG("vmaBindBufferMemory2");
17419 
17420  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17421 
17422  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17423 }
17424 
17425 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
17426  VmaAllocator allocator,
17427  VmaAllocation allocation,
17428  VkImage image)
17429 {
17430  VMA_ASSERT(allocator && allocation && image);
17431 
17432  VMA_DEBUG_LOG("vmaBindImageMemory");
17433 
17434  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17435 
17436  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17437 }
17438 
17439 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
17440  VmaAllocator allocator,
17441  VmaAllocation allocation,
17442  VkDeviceSize allocationLocalOffset,
17443  VkImage image,
17444  const void* pNext)
17445 {
17446  VMA_ASSERT(allocator && allocation && image);
17447 
17448  VMA_DEBUG_LOG("vmaBindImageMemory2");
17449 
17450  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17451 
17452  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
17453 }
17454 
17455 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
17456  VmaAllocator allocator,
17457  const VkBufferCreateInfo* pBufferCreateInfo,
17458  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17459  VkBuffer* pBuffer,
17460  VmaAllocation* pAllocation,
17461  VmaAllocationInfo* pAllocationInfo)
17462 {
17463  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
17464 
17465  if(pBufferCreateInfo->size == 0)
17466  {
17467  return VK_ERROR_VALIDATION_FAILED_EXT;
17468  }
17469 
17470  VMA_DEBUG_LOG("vmaCreateBuffer");
17471 
17472  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17473 
17474  *pBuffer = VK_NULL_HANDLE;
17475  *pAllocation = VK_NULL_HANDLE;
17476 
17477  // 1. Create VkBuffer.
17478  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17479  allocator->m_hDevice,
17480  pBufferCreateInfo,
17481  allocator->GetAllocationCallbacks(),
17482  pBuffer);
17483  if(res >= 0)
17484  {
17485  // 2. vkGetBufferMemoryRequirements.
17486  VkMemoryRequirements vkMemReq = {};
17487  bool requiresDedicatedAllocation = false;
17488  bool prefersDedicatedAllocation = false;
17489  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17490  requiresDedicatedAllocation, prefersDedicatedAllocation);
17491 
17492  // Make sure alignment requirements for specific buffer usages reported
17493  // in Physical Device Properties are included in alignment reported by memory requirements.
17494  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
17495  {
17496  VMA_ASSERT(vkMemReq.alignment %
17497  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
17498  }
17499  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
17500  {
17501  VMA_ASSERT(vkMemReq.alignment %
17502  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
17503  }
17504  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
17505  {
17506  VMA_ASSERT(vkMemReq.alignment %
17507  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
17508  }
17509 
17510  // 3. Allocate memory using allocator.
17511  res = allocator->AllocateMemory(
17512  vkMemReq,
17513  requiresDedicatedAllocation,
17514  prefersDedicatedAllocation,
17515  *pBuffer, // dedicatedBuffer
17516  VK_NULL_HANDLE, // dedicatedImage
17517  *pAllocationCreateInfo,
17518  VMA_SUBALLOCATION_TYPE_BUFFER,
17519  1, // allocationCount
17520  pAllocation);
17521 
17522 #if VMA_RECORDING_ENABLED
17523  if(allocator->GetRecorder() != VMA_NULL)
17524  {
17525  allocator->GetRecorder()->RecordCreateBuffer(
17526  allocator->GetCurrentFrameIndex(),
17527  *pBufferCreateInfo,
17528  *pAllocationCreateInfo,
17529  *pAllocation);
17530  }
17531 #endif
17532 
17533  if(res >= 0)
17534  {
17535  // 3. Bind buffer with memory.
17536  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17537  {
17538  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17539  }
17540  if(res >= 0)
17541  {
17542  // All steps succeeded.
17543  #if VMA_STATS_STRING_ENABLED
17544  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17545  #endif
17546  if(pAllocationInfo != VMA_NULL)
17547  {
17548  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17549  }
17550 
17551  return VK_SUCCESS;
17552  }
17553  allocator->FreeMemory(
17554  1, // allocationCount
17555  pAllocation);
17556  *pAllocation = VK_NULL_HANDLE;
17557  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17558  *pBuffer = VK_NULL_HANDLE;
17559  return res;
17560  }
17561  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17562  *pBuffer = VK_NULL_HANDLE;
17563  return res;
17564  }
17565  return res;
17566 }
17567 
17568 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
17569  VmaAllocator allocator,
17570  VkBuffer buffer,
17571  VmaAllocation allocation)
17572 {
17573  VMA_ASSERT(allocator);
17574 
17575  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17576  {
17577  return;
17578  }
17579 
17580  VMA_DEBUG_LOG("vmaDestroyBuffer");
17581 
17582  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17583 
17584 #if VMA_RECORDING_ENABLED
17585  if(allocator->GetRecorder() != VMA_NULL)
17586  {
17587  allocator->GetRecorder()->RecordDestroyBuffer(
17588  allocator->GetCurrentFrameIndex(),
17589  allocation);
17590  }
17591 #endif
17592 
17593  if(buffer != VK_NULL_HANDLE)
17594  {
17595  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17596  }
17597 
17598  if(allocation != VK_NULL_HANDLE)
17599  {
17600  allocator->FreeMemory(
17601  1, // allocationCount
17602  &allocation);
17603  }
17604 }
17605 
17606 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
17607  VmaAllocator allocator,
17608  const VkImageCreateInfo* pImageCreateInfo,
17609  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17610  VkImage* pImage,
17611  VmaAllocation* pAllocation,
17612  VmaAllocationInfo* pAllocationInfo)
17613 {
17614  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17615 
17616  if(pImageCreateInfo->extent.width == 0 ||
17617  pImageCreateInfo->extent.height == 0 ||
17618  pImageCreateInfo->extent.depth == 0 ||
17619  pImageCreateInfo->mipLevels == 0 ||
17620  pImageCreateInfo->arrayLayers == 0)
17621  {
17622  return VK_ERROR_VALIDATION_FAILED_EXT;
17623  }
17624 
17625  VMA_DEBUG_LOG("vmaCreateImage");
17626 
17627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17628 
17629  *pImage = VK_NULL_HANDLE;
17630  *pAllocation = VK_NULL_HANDLE;
17631 
17632  // 1. Create VkImage.
17633  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17634  allocator->m_hDevice,
17635  pImageCreateInfo,
17636  allocator->GetAllocationCallbacks(),
17637  pImage);
17638  if(res >= 0)
17639  {
17640  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17641  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17642  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17643 
17644  // 2. Allocate memory using allocator.
17645  VkMemoryRequirements vkMemReq = {};
17646  bool requiresDedicatedAllocation = false;
17647  bool prefersDedicatedAllocation = false;
17648  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17649  requiresDedicatedAllocation, prefersDedicatedAllocation);
17650 
17651  res = allocator->AllocateMemory(
17652  vkMemReq,
17653  requiresDedicatedAllocation,
17654  prefersDedicatedAllocation,
17655  VK_NULL_HANDLE, // dedicatedBuffer
17656  *pImage, // dedicatedImage
17657  *pAllocationCreateInfo,
17658  suballocType,
17659  1, // allocationCount
17660  pAllocation);
17661 
17662 #if VMA_RECORDING_ENABLED
17663  if(allocator->GetRecorder() != VMA_NULL)
17664  {
17665  allocator->GetRecorder()->RecordCreateImage(
17666  allocator->GetCurrentFrameIndex(),
17667  *pImageCreateInfo,
17668  *pAllocationCreateInfo,
17669  *pAllocation);
17670  }
17671 #endif
17672 
17673  if(res >= 0)
17674  {
17675  // 3. Bind image with memory.
17676  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17677  {
17678  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17679  }
17680  if(res >= 0)
17681  {
17682  // All steps succeeded.
17683  #if VMA_STATS_STRING_ENABLED
17684  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17685  #endif
17686  if(pAllocationInfo != VMA_NULL)
17687  {
17688  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17689  }
17690 
17691  return VK_SUCCESS;
17692  }
17693  allocator->FreeMemory(
17694  1, // allocationCount
17695  pAllocation);
17696  *pAllocation = VK_NULL_HANDLE;
17697  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17698  *pImage = VK_NULL_HANDLE;
17699  return res;
17700  }
17701  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17702  *pImage = VK_NULL_HANDLE;
17703  return res;
17704  }
17705  return res;
17706 }
17707 
17708 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
17709  VmaAllocator allocator,
17710  VkImage image,
17711  VmaAllocation allocation)
17712 {
17713  VMA_ASSERT(allocator);
17714 
17715  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17716  {
17717  return;
17718  }
17719 
17720  VMA_DEBUG_LOG("vmaDestroyImage");
17721 
17722  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17723 
17724 #if VMA_RECORDING_ENABLED
17725  if(allocator->GetRecorder() != VMA_NULL)
17726  {
17727  allocator->GetRecorder()->RecordDestroyImage(
17728  allocator->GetCurrentFrameIndex(),
17729  allocation);
17730  }
17731 #endif
17732 
17733  if(image != VK_NULL_HANDLE)
17734  {
17735  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17736  }
17737  if(allocation != VK_NULL_HANDLE)
17738  {
17739  allocator->FreeMemory(
17740  1, // allocationCount
17741  &allocation);
17742  }
17743 }
17744 
17745 #endif // #ifdef VMA_IMPLEMENTATION
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1978
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1936
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1862
VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1973
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1934
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1839
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1999
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2575
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3112
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2025
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:1923
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2176
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2321
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2647
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2404
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1871
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2653
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2384
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1965
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1858
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2371
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2005
VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1971
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1939
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2790
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2144
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2478
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2150
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1938
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1765
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2415
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2201
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1950
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1993
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2345
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3098
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2666
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2408
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1896
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3192
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2399
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2259
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2140
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2603
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2615
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3130
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2656
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1927
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1925
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3097
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2795
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2327
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1935
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2596
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2471
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2158
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2334
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:1911
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3109
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1943
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3161
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3196
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2650
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1937
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1975
VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2291
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3146
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2313
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2069
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1942
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1944
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3175
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1845
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2154
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2620
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2084
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2137
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3200
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2281
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2148
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2146
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2008
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1866
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2628
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2600
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1860
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaPool
Represents custom memory pool.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2307
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2378
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2606
VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2323
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2142
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1941
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3106
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3187
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2180
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2594
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2452
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2425
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2149
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2011
VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2429
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2002
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2050
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2212
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1933
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2809
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1996
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3100
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1948
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2150
VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2298
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2422
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2419
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2447
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2149
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2800
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1981
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1940
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2814
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2558
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2157
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2062
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2669
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2436
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2156
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2075
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2264
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3166
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1947
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2642
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1949
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3151
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2781
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2358
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1946
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2659
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2540
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3198
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2150
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2389
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2441
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1945
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2438
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2457
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3127
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3202
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2465
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3204
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1989
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2149
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2776
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2586
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2191
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2434
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3121
VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2590
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3156
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3182
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2395