Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1761 /*
1762 Define this macro to 0/1 to disable/enable support for recording functionality,
1763 available through VmaAllocatorCreateInfo::pRecordSettings.
1764 */
1765 #ifndef VMA_RECORDING_ENABLED
1766  #define VMA_RECORDING_ENABLED 0
1767 #endif
1768 
1769 #ifndef NOMINMAX
1770  #define NOMINMAX // For windows.h
1771 #endif
1772 
1773 #ifndef VULKAN_H_
1774  #include <vulkan/vulkan.h>
1775 #endif
1776 
1777 #if VMA_RECORDING_ENABLED
1778  #include <windows.h>
1779 #endif
1780 
1781 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1782 // where AAA = major, BBB = minor, CCC = patch.
1783 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1784 #if !defined(VMA_VULKAN_VERSION)
1785  #if defined(VK_VERSION_1_1)
1786  #define VMA_VULKAN_VERSION 1001000
1787  #else
1788  #define VMA_VULKAN_VERSION 1000000
1789  #endif
1790 #endif
1791 
1792 #if !defined(VMA_DEDICATED_ALLOCATION)
1793  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1794  #define VMA_DEDICATED_ALLOCATION 1
1795  #else
1796  #define VMA_DEDICATED_ALLOCATION 0
1797  #endif
1798 #endif
1799 
1800 #if !defined(VMA_BIND_MEMORY2)
1801  #if VK_KHR_bind_memory2
1802  #define VMA_BIND_MEMORY2 1
1803  #else
1804  #define VMA_BIND_MEMORY2 0
1805  #endif
1806 #endif
1807 
1808 #if !defined(VMA_MEMORY_BUDGET)
1809  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1810  #define VMA_MEMORY_BUDGET 1
1811  #else
1812  #define VMA_MEMORY_BUDGET 0
1813  #endif
1814 #endif
1815 
1816 // Define these macros to decorate all public functions with additional code,
1817 // before and after returned type, appropriately. This may be useful for
1818 // exporing the functions when compiling VMA as a separate library. Example:
1819 // #define VMA_CALL_PRE __declspec(dllexport)
1820 // #define VMA_CALL_POST __cdecl
1821 #ifndef VMA_CALL_PRE
1822  #define VMA_CALL_PRE
1823 #endif
1824 #ifndef VMA_CALL_POST
1825  #define VMA_CALL_POST
1826 #endif
1827 
1837 VK_DEFINE_HANDLE(VmaAllocator)
1838 
1839 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1841  VmaAllocator allocator,
1842  uint32_t memoryType,
1843  VkDeviceMemory memory,
1844  VkDeviceSize size);
1846 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1847  VmaAllocator allocator,
1848  uint32_t memoryType,
1849  VkDeviceMemory memory,
1850  VkDeviceSize size);
1851 
1865 
1925 
1928 typedef VkFlags VmaAllocatorCreateFlags;
1929 
1934 typedef struct VmaVulkanFunctions {
1935  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1936  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1937  PFN_vkAllocateMemory vkAllocateMemory;
1938  PFN_vkFreeMemory vkFreeMemory;
1939  PFN_vkMapMemory vkMapMemory;
1940  PFN_vkUnmapMemory vkUnmapMemory;
1941  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1942  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1943  PFN_vkBindBufferMemory vkBindBufferMemory;
1944  PFN_vkBindImageMemory vkBindImageMemory;
1945  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1946  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1947  PFN_vkCreateBuffer vkCreateBuffer;
1948  PFN_vkDestroyBuffer vkDestroyBuffer;
1949  PFN_vkCreateImage vkCreateImage;
1950  PFN_vkDestroyImage vkDestroyImage;
1951  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1952 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
1953  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1954  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1955 #endif
1956 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
1957  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1958  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1959 #endif
1960 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
1961  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
1962 #endif
1964 
1966 typedef enum VmaRecordFlagBits {
1973 
1976 typedef VkFlags VmaRecordFlags;
1977 
1979 typedef struct VmaRecordSettings
1980 {
1990  const char* pFilePath;
1992 
1995 {
1999 
2000  VkPhysicalDevice physicalDevice;
2002 
2003  VkDevice device;
2005 
2008 
2009  const VkAllocationCallbacks* pAllocationCallbacks;
2011 
2051  const VkDeviceSize* pHeapSizeLimit;
2076  VkInstance instance;
2087 
2089 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2090  const VmaAllocatorCreateInfo* pCreateInfo,
2091  VmaAllocator* pAllocator);
2092 
2094 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2095  VmaAllocator allocator);
2096 
2101 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2102  VmaAllocator allocator,
2103  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2104 
2109 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2110  VmaAllocator allocator,
2111  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2112 
2119 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2120  VmaAllocator allocator,
2121  uint32_t memoryTypeIndex,
2122  VkMemoryPropertyFlags* pFlags);
2123 
2132 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2133  VmaAllocator allocator,
2134  uint32_t frameIndex);
2135 
2138 typedef struct VmaStatInfo
2139 {
2141  uint32_t blockCount;
2147  VkDeviceSize usedBytes;
2149  VkDeviceSize unusedBytes;
2152 } VmaStatInfo;
2153 
2155 typedef struct VmaStats
2156 {
2157  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2158  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2160 } VmaStats;
2161 
2171 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2172  VmaAllocator allocator,
2173  VmaStats* pStats);
2174 
2177 typedef struct VmaBudget
2178 {
2181  VkDeviceSize blockBytes;
2182 
2192  VkDeviceSize allocationBytes;
2193 
2202  VkDeviceSize usage;
2203 
2213  VkDeviceSize budget;
2214 } VmaBudget;
2215 
2226 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2227  VmaAllocator allocator,
2228  VmaBudget* pBudget);
2229 
2230 #ifndef VMA_STATS_STRING_ENABLED
2231 #define VMA_STATS_STRING_ENABLED 1
2232 #endif
2233 
2234 #if VMA_STATS_STRING_ENABLED
2235 
2237 
2239 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2240  VmaAllocator allocator,
2241  char** ppStatsString,
2242  VkBool32 detailedMap);
2243 
2244 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2245  VmaAllocator allocator,
2246  char* pStatsString);
2247 
2248 #endif // #if VMA_STATS_STRING_ENABLED
2249 
2258 VK_DEFINE_HANDLE(VmaPool)
2259 
2260 typedef enum VmaMemoryUsage
2261 {
2323 
2325 } VmaMemoryUsage;
2326 
2336 
2401 
2417 
2427 
2434 
2438 
2440 {
2453  VkMemoryPropertyFlags requiredFlags;
2458  VkMemoryPropertyFlags preferredFlags;
2466  uint32_t memoryTypeBits;
2479  void* pUserData;
2481 
2498 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2499  VmaAllocator allocator,
2500  uint32_t memoryTypeBits,
2501  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2502  uint32_t* pMemoryTypeIndex);
2503 
2516 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2517  VmaAllocator allocator,
2518  const VkBufferCreateInfo* pBufferCreateInfo,
2519  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2520  uint32_t* pMemoryTypeIndex);
2521 
2534 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2535  VmaAllocator allocator,
2536  const VkImageCreateInfo* pImageCreateInfo,
2537  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2538  uint32_t* pMemoryTypeIndex);
2539 
2560 
2577 
2588 
2594 
2597 typedef VkFlags VmaPoolCreateFlags;
2598 
2601 typedef struct VmaPoolCreateInfo {
2616  VkDeviceSize blockSize;
2645 
2648 typedef struct VmaPoolStats {
2651  VkDeviceSize size;
2654  VkDeviceSize unusedSize;
2667  VkDeviceSize unusedRangeSizeMax;
2670  size_t blockCount;
2671 } VmaPoolStats;
2672 
2679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2680  VmaAllocator allocator,
2681  const VmaPoolCreateInfo* pCreateInfo,
2682  VmaPool* pPool);
2683 
2686 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2687  VmaAllocator allocator,
2688  VmaPool pool);
2689 
2696 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2697  VmaAllocator allocator,
2698  VmaPool pool,
2699  VmaPoolStats* pPoolStats);
2700 
2707 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2708  VmaAllocator allocator,
2709  VmaPool pool,
2710  size_t* pLostAllocationCount);
2711 
2726 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2727 
2734 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2735  VmaAllocator allocator,
2736  VmaPool pool,
2737  const char** ppName);
2738 
2744 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2745  VmaAllocator allocator,
2746  VmaPool pool,
2747  const char* pName);
2748 
2773 VK_DEFINE_HANDLE(VmaAllocation)
2774 
2775 
2777 typedef struct VmaAllocationInfo {
2782  uint32_t memoryType;
2791  VkDeviceMemory deviceMemory;
2796  VkDeviceSize offset;
2801  VkDeviceSize size;
2815  void* pUserData;
2817 
2828 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
2829  VmaAllocator allocator,
2830  const VkMemoryRequirements* pVkMemoryRequirements,
2831  const VmaAllocationCreateInfo* pCreateInfo,
2832  VmaAllocation* pAllocation,
2833  VmaAllocationInfo* pAllocationInfo);
2834 
2854 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
2855  VmaAllocator allocator,
2856  const VkMemoryRequirements* pVkMemoryRequirements,
2857  const VmaAllocationCreateInfo* pCreateInfo,
2858  size_t allocationCount,
2859  VmaAllocation* pAllocations,
2860  VmaAllocationInfo* pAllocationInfo);
2861 
2868 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
2869  VmaAllocator allocator,
2870  VkBuffer buffer,
2871  const VmaAllocationCreateInfo* pCreateInfo,
2872  VmaAllocation* pAllocation,
2873  VmaAllocationInfo* pAllocationInfo);
2874 
2876 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
2877  VmaAllocator allocator,
2878  VkImage image,
2879  const VmaAllocationCreateInfo* pCreateInfo,
2880  VmaAllocation* pAllocation,
2881  VmaAllocationInfo* pAllocationInfo);
2882 
2887 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
2888  VmaAllocator allocator,
2889  VmaAllocation allocation);
2890 
2901 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
2902  VmaAllocator allocator,
2903  size_t allocationCount,
2904  VmaAllocation* pAllocations);
2905 
2912 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
2913  VmaAllocator allocator,
2914  VmaAllocation allocation,
2915  VkDeviceSize newSize);
2916 
2933 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
2934  VmaAllocator allocator,
2935  VmaAllocation allocation,
2936  VmaAllocationInfo* pAllocationInfo);
2937 
2952 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
2953  VmaAllocator allocator,
2954  VmaAllocation allocation);
2955 
2969 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
2970  VmaAllocator allocator,
2971  VmaAllocation allocation,
2972  void* pUserData);
2973 
2984 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
2985  VmaAllocator allocator,
2986  VmaAllocation* pAllocation);
2987 
3026 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3027  VmaAllocator allocator,
3028  VmaAllocation allocation,
3029  void** ppData);
3030 
3039 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3040  VmaAllocator allocator,
3041  VmaAllocation allocation);
3042 
3061 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3062 
3081 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3082 
3099 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3100 
3107 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3108 
3109 typedef enum VmaDefragmentationFlagBits {
3113 typedef VkFlags VmaDefragmentationFlags;
3114 
3119 typedef struct VmaDefragmentationInfo2 {
3143  uint32_t poolCount;
3164  VkDeviceSize maxCpuBytesToMove;
3174  VkDeviceSize maxGpuBytesToMove;
3188  VkCommandBuffer commandBuffer;
3190 
3195 typedef struct VmaDefragmentationInfo {
3200  VkDeviceSize maxBytesToMove;
3207 
3209 typedef struct VmaDefragmentationStats {
3211  VkDeviceSize bytesMoved;
3213  VkDeviceSize bytesFreed;
3219 
3249 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3250  VmaAllocator allocator,
3251  const VmaDefragmentationInfo2* pInfo,
3252  VmaDefragmentationStats* pStats,
3253  VmaDefragmentationContext *pContext);
3254 
3260 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3261  VmaAllocator allocator,
3262  VmaDefragmentationContext context);
3263 
3304 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3305  VmaAllocator allocator,
3306  VmaAllocation* pAllocations,
3307  size_t allocationCount,
3308  VkBool32* pAllocationsChanged,
3309  const VmaDefragmentationInfo *pDefragmentationInfo,
3310  VmaDefragmentationStats* pDefragmentationStats);
3311 
3324 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3325  VmaAllocator allocator,
3326  VmaAllocation allocation,
3327  VkBuffer buffer);
3328 
3339 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3340  VmaAllocator allocator,
3341  VmaAllocation allocation,
3342  VkDeviceSize allocationLocalOffset,
3343  VkBuffer buffer,
3344  const void* pNext);
3345 
3358 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3359  VmaAllocator allocator,
3360  VmaAllocation allocation,
3361  VkImage image);
3362 
3373 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3374  VmaAllocator allocator,
3375  VmaAllocation allocation,
3376  VkDeviceSize allocationLocalOffset,
3377  VkImage image,
3378  const void* pNext);
3379 
3406 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3407  VmaAllocator allocator,
3408  const VkBufferCreateInfo* pBufferCreateInfo,
3409  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3410  VkBuffer* pBuffer,
3411  VmaAllocation* pAllocation,
3412  VmaAllocationInfo* pAllocationInfo);
3413 
3425 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3426  VmaAllocator allocator,
3427  VkBuffer buffer,
3428  VmaAllocation allocation);
3429 
3431 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3432  VmaAllocator allocator,
3433  const VkImageCreateInfo* pImageCreateInfo,
3434  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3435  VkImage* pImage,
3436  VmaAllocation* pAllocation,
3437  VmaAllocationInfo* pAllocationInfo);
3438 
3450 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3451  VmaAllocator allocator,
3452  VkImage image,
3453  VmaAllocation allocation);
3454 
3455 #ifdef __cplusplus
3456 }
3457 #endif
3458 
3459 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3460 
3461 // For Visual Studio IntelliSense.
3462 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3463 #define VMA_IMPLEMENTATION
3464 #endif
3465 
3466 #ifdef VMA_IMPLEMENTATION
3467 #undef VMA_IMPLEMENTATION
3468 
3469 #include <cstdint>
3470 #include <cstdlib>
3471 #include <cstring>
3472 
3473 /*******************************************************************************
3474 CONFIGURATION SECTION
3475 
3476 Define some of these macros before each #include of this header or change them
3477 here if you need other then default behavior depending on your environment.
3478 */
3479 
3480 /*
3481 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3482 internally, like:
3483 
3484  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3485 
3486 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3487 VmaAllocatorCreateInfo::pVulkanFunctions.
3488 */
3489 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3490 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3491 #endif
3492 
3493 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3494 //#define VMA_USE_STL_CONTAINERS 1
3495 
3496 /* Set this macro to 1 to make the library including and using STL containers:
3497 std::pair, std::vector, std::list, std::unordered_map.
3498 
3499 Set it to 0 or undefined to make the library using its own implementation of
3500 the containers.
3501 */
3502 #if VMA_USE_STL_CONTAINERS
3503  #define VMA_USE_STL_VECTOR 1
3504  #define VMA_USE_STL_UNORDERED_MAP 1
3505  #define VMA_USE_STL_LIST 1
3506 #endif
3507 
3508 #ifndef VMA_USE_STL_SHARED_MUTEX
3509  // Compiler conforms to C++17.
3510  #if __cplusplus >= 201703L
3511  #define VMA_USE_STL_SHARED_MUTEX 1
3512  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3513  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3514  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3515  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3516  #define VMA_USE_STL_SHARED_MUTEX 1
3517  #else
3518  #define VMA_USE_STL_SHARED_MUTEX 0
3519  #endif
3520 #endif
3521 
3522 /*
3523 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3524 Library has its own container implementation.
3525 */
3526 #if VMA_USE_STL_VECTOR
3527  #include <vector>
3528 #endif
3529 
3530 #if VMA_USE_STL_UNORDERED_MAP
3531  #include <unordered_map>
3532 #endif
3533 
3534 #if VMA_USE_STL_LIST
3535  #include <list>
3536 #endif
3537 
3538 /*
3539 Following headers are used in this CONFIGURATION section only, so feel free to
3540 remove them if not needed.
3541 */
3542 #include <cassert> // for assert
3543 #include <algorithm> // for min, max
3544 #include <mutex>
3545 
3546 #ifndef VMA_NULL
3547  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3548  #define VMA_NULL nullptr
3549 #endif
3550 
3551 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3552 #include <cstdlib>
3553 void *aligned_alloc(size_t alignment, size_t size)
3554 {
3555  // alignment must be >= sizeof(void*)
3556  if(alignment < sizeof(void*))
3557  {
3558  alignment = sizeof(void*);
3559  }
3560 
3561  return memalign(alignment, size);
3562 }
3563 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3564 #include <cstdlib>
3565 void *aligned_alloc(size_t alignment, size_t size)
3566 {
3567  // alignment must be >= sizeof(void*)
3568  if(alignment < sizeof(void*))
3569  {
3570  alignment = sizeof(void*);
3571  }
3572 
3573  void *pointer;
3574  if(posix_memalign(&pointer, alignment, size) == 0)
3575  return pointer;
3576  return VMA_NULL;
3577 }
3578 #endif
3579 
3580 // If your compiler is not compatible with C++11 and definition of
3581 // aligned_alloc() function is missing, uncommeting following line may help:
3582 
3583 //#include <malloc.h>
3584 
3585 // Normal assert to check for programmer's errors, especially in Debug configuration.
3586 #ifndef VMA_ASSERT
3587  #ifdef _DEBUG
3588  #define VMA_ASSERT(expr) assert(expr)
3589  #else
3590  #define VMA_ASSERT(expr)
3591  #endif
3592 #endif
3593 
3594 // Assert that will be called very often, like inside data structures e.g. operator[].
3595 // Making it non-empty can make program slow.
3596 #ifndef VMA_HEAVY_ASSERT
3597  #ifdef _DEBUG
3598  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3599  #else
3600  #define VMA_HEAVY_ASSERT(expr)
3601  #endif
3602 #endif
3603 
3604 #ifndef VMA_ALIGN_OF
3605  #define VMA_ALIGN_OF(type) (__alignof(type))
3606 #endif
3607 
3608 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3609  #if defined(_WIN32)
3610  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3611  #else
3612  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3613  #endif
3614 #endif
3615 
3616 #ifndef VMA_SYSTEM_FREE
3617  #if defined(_WIN32)
3618  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3619  #else
3620  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3621  #endif
3622 #endif
3623 
3624 #ifndef VMA_MIN
3625  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3626 #endif
3627 
3628 #ifndef VMA_MAX
3629  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3630 #endif
3631 
3632 #ifndef VMA_SWAP
3633  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3634 #endif
3635 
3636 #ifndef VMA_SORT
3637  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3638 #endif
3639 
3640 #ifndef VMA_DEBUG_LOG
3641  #define VMA_DEBUG_LOG(format, ...)
3642  /*
3643  #define VMA_DEBUG_LOG(format, ...) do { \
3644  printf(format, __VA_ARGS__); \
3645  printf("\n"); \
3646  } while(false)
3647  */
3648 #endif
3649 
3650 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3651 #if VMA_STATS_STRING_ENABLED
3652  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3653  {
3654  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3655  }
3656  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3657  {
3658  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3659  }
3660  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3661  {
3662  snprintf(outStr, strLen, "%p", ptr);
3663  }
3664 #endif
3665 
3666 #ifndef VMA_MUTEX
3667  class VmaMutex
3668  {
3669  public:
3670  void Lock() { m_Mutex.lock(); }
3671  void Unlock() { m_Mutex.unlock(); }
3672  private:
3673  std::mutex m_Mutex;
3674  };
3675  #define VMA_MUTEX VmaMutex
3676 #endif
3677 
3678 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3679 #ifndef VMA_RW_MUTEX
3680  #if VMA_USE_STL_SHARED_MUTEX
3681  // Use std::shared_mutex from C++17.
3682  #include <shared_mutex>
3683  class VmaRWMutex
3684  {
3685  public:
3686  void LockRead() { m_Mutex.lock_shared(); }
3687  void UnlockRead() { m_Mutex.unlock_shared(); }
3688  void LockWrite() { m_Mutex.lock(); }
3689  void UnlockWrite() { m_Mutex.unlock(); }
3690  private:
3691  std::shared_mutex m_Mutex;
3692  };
3693  #define VMA_RW_MUTEX VmaRWMutex
3694  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3695  // Use SRWLOCK from WinAPI.
3696  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3697  class VmaRWMutex
3698  {
3699  public:
3700  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3701  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3702  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3703  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3704  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3705  private:
3706  SRWLOCK m_Lock;
3707  };
3708  #define VMA_RW_MUTEX VmaRWMutex
3709  #else
3710  // Less efficient fallback: Use normal mutex.
3711  class VmaRWMutex
3712  {
3713  public:
3714  void LockRead() { m_Mutex.Lock(); }
3715  void UnlockRead() { m_Mutex.Unlock(); }
3716  void LockWrite() { m_Mutex.Lock(); }
3717  void UnlockWrite() { m_Mutex.Unlock(); }
3718  private:
3719  VMA_MUTEX m_Mutex;
3720  };
3721  #define VMA_RW_MUTEX VmaRWMutex
3722  #endif // #if VMA_USE_STL_SHARED_MUTEX
3723 #endif // #ifndef VMA_RW_MUTEX
3724 
3725 /*
3726 If providing your own implementation, you need to implement a subset of std::atomic.
3727 */
3728 #ifndef VMA_ATOMIC_UINT32
3729  #include <atomic>
3730  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3731 #endif
3732 
3733 #ifndef VMA_ATOMIC_UINT64
3734  #include <atomic>
3735  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3736 #endif
3737 
3738 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3739 
3743  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3744 #endif
3745 
3746 #ifndef VMA_DEBUG_ALIGNMENT
3747 
3751  #define VMA_DEBUG_ALIGNMENT (1)
3752 #endif
3753 
3754 #ifndef VMA_DEBUG_MARGIN
3755 
3759  #define VMA_DEBUG_MARGIN (0)
3760 #endif
3761 
3762 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3763 
3767  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3768 #endif
3769 
3770 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3771 
3776  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3777 #endif
3778 
3779 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3780 
3784  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3785 #endif
3786 
3787 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3788 
3792  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3793 #endif
3794 
3795 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3796  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3798 #endif
3799 
3800 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3801  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3803 #endif
3804 
3805 #ifndef VMA_CLASS_NO_COPY
3806  #define VMA_CLASS_NO_COPY(className) \
3807  private: \
3808  className(const className&) = delete; \
3809  className& operator=(const className&) = delete;
3810 #endif
3811 
3812 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3813 
3814 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3815 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3816 
3817 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3818 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3819 
3820 /*******************************************************************************
3821 END OF CONFIGURATION
3822 */
3823 
3824 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3825 
3826 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3827  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3828 
3829 // Returns number of bits set to 1 in (v).
3830 static inline uint32_t VmaCountBitsSet(uint32_t v)
3831 {
3832  uint32_t c = v - ((v >> 1) & 0x55555555);
3833  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3834  c = ((c >> 4) + c) & 0x0F0F0F0F;
3835  c = ((c >> 8) + c) & 0x00FF00FF;
3836  c = ((c >> 16) + c) & 0x0000FFFF;
3837  return c;
3838 }
3839 
3840 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3841 // Use types like uint32_t, uint64_t as T.
3842 template <typename T>
3843 static inline T VmaAlignUp(T val, T align)
3844 {
3845  return (val + align - 1) / align * align;
3846 }
3847 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3848 // Use types like uint32_t, uint64_t as T.
3849 template <typename T>
3850 static inline T VmaAlignDown(T val, T align)
3851 {
3852  return val / align * align;
3853 }
3854 
3855 // Division with mathematical rounding to nearest number.
3856 template <typename T>
3857 static inline T VmaRoundDiv(T x, T y)
3858 {
3859  return (x + (y / (T)2)) / y;
3860 }
3861 
3862 /*
3863 Returns true if given number is a power of two.
3864 T must be unsigned integer number or signed integer but always nonnegative.
3865 For 0 returns true.
3866 */
3867 template <typename T>
3868 inline bool VmaIsPow2(T x)
3869 {
3870  return (x & (x-1)) == 0;
3871 }
3872 
3873 // Returns smallest power of 2 greater or equal to v.
3874 static inline uint32_t VmaNextPow2(uint32_t v)
3875 {
3876  v--;
3877  v |= v >> 1;
3878  v |= v >> 2;
3879  v |= v >> 4;
3880  v |= v >> 8;
3881  v |= v >> 16;
3882  v++;
3883  return v;
3884 }
3885 static inline uint64_t VmaNextPow2(uint64_t v)
3886 {
3887  v--;
3888  v |= v >> 1;
3889  v |= v >> 2;
3890  v |= v >> 4;
3891  v |= v >> 8;
3892  v |= v >> 16;
3893  v |= v >> 32;
3894  v++;
3895  return v;
3896 }
3897 
3898 // Returns largest power of 2 less or equal to v.
3899 static inline uint32_t VmaPrevPow2(uint32_t v)
3900 {
3901  v |= v >> 1;
3902  v |= v >> 2;
3903  v |= v >> 4;
3904  v |= v >> 8;
3905  v |= v >> 16;
3906  v = v ^ (v >> 1);
3907  return v;
3908 }
3909 static inline uint64_t VmaPrevPow2(uint64_t v)
3910 {
3911  v |= v >> 1;
3912  v |= v >> 2;
3913  v |= v >> 4;
3914  v |= v >> 8;
3915  v |= v >> 16;
3916  v |= v >> 32;
3917  v = v ^ (v >> 1);
3918  return v;
3919 }
3920 
3921 static inline bool VmaStrIsEmpty(const char* pStr)
3922 {
3923  return pStr == VMA_NULL || *pStr == '\0';
3924 }
3925 
3926 #if VMA_STATS_STRING_ENABLED
3927 
3928 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3929 {
3930  switch(algorithm)
3931  {
3933  return "Linear";
3935  return "Buddy";
3936  case 0:
3937  return "Default";
3938  default:
3939  VMA_ASSERT(0);
3940  return "";
3941  }
3942 }
3943 
3944 #endif // #if VMA_STATS_STRING_ENABLED
3945 
3946 #ifndef VMA_SORT
3947 
3948 template<typename Iterator, typename Compare>
3949 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3950 {
3951  Iterator centerValue = end; --centerValue;
3952  Iterator insertIndex = beg;
3953  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3954  {
3955  if(cmp(*memTypeIndex, *centerValue))
3956  {
3957  if(insertIndex != memTypeIndex)
3958  {
3959  VMA_SWAP(*memTypeIndex, *insertIndex);
3960  }
3961  ++insertIndex;
3962  }
3963  }
3964  if(insertIndex != centerValue)
3965  {
3966  VMA_SWAP(*insertIndex, *centerValue);
3967  }
3968  return insertIndex;
3969 }
3970 
3971 template<typename Iterator, typename Compare>
3972 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3973 {
3974  if(beg < end)
3975  {
3976  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3977  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3978  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3979  }
3980 }
3981 
3982 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3983 
3984 #endif // #ifndef VMA_SORT
3985 
3986 /*
3987 Returns true if two memory blocks occupy overlapping pages.
3988 ResourceA must be in less memory offset than ResourceB.
3989 
3990 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3991 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3992 */
3993 static inline bool VmaBlocksOnSamePage(
3994  VkDeviceSize resourceAOffset,
3995  VkDeviceSize resourceASize,
3996  VkDeviceSize resourceBOffset,
3997  VkDeviceSize pageSize)
3998 {
3999  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4000  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4001  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4002  VkDeviceSize resourceBStart = resourceBOffset;
4003  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4004  return resourceAEndPage == resourceBStartPage;
4005 }
4006 
4007 enum VmaSuballocationType
4008 {
4009  VMA_SUBALLOCATION_TYPE_FREE = 0,
4010  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4011  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4012  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4013  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4014  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4015  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4016 };
4017 
4018 /*
4019 Returns true if given suballocation types could conflict and must respect
4020 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4021 or linear image and another one is optimal image. If type is unknown, behave
4022 conservatively.
4023 */
4024 static inline bool VmaIsBufferImageGranularityConflict(
4025  VmaSuballocationType suballocType1,
4026  VmaSuballocationType suballocType2)
4027 {
4028  if(suballocType1 > suballocType2)
4029  {
4030  VMA_SWAP(suballocType1, suballocType2);
4031  }
4032 
4033  switch(suballocType1)
4034  {
4035  case VMA_SUBALLOCATION_TYPE_FREE:
4036  return false;
4037  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4038  return true;
4039  case VMA_SUBALLOCATION_TYPE_BUFFER:
4040  return
4041  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4042  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4043  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4044  return
4045  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4046  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4047  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4048  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4049  return
4050  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4051  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4052  return false;
4053  default:
4054  VMA_ASSERT(0);
4055  return true;
4056  }
4057 }
4058 
4059 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4060 {
4061 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4062  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4063  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4064  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4065  {
4066  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4067  }
4068 #else
4069  // no-op
4070 #endif
4071 }
4072 
4073 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4074 {
4075 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4076  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4077  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4078  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4079  {
4080  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4081  {
4082  return false;
4083  }
4084  }
4085 #endif
4086  return true;
4087 }
4088 
4089 /*
4090 Fills structure with parameters of an example buffer to be used for transfers
4091 during GPU memory defragmentation.
4092 */
4093 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4094 {
4095  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4096  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4097  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4098  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4099 }
4100 
4101 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4102 struct VmaMutexLock
4103 {
4104  VMA_CLASS_NO_COPY(VmaMutexLock)
4105 public:
4106  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4107  m_pMutex(useMutex ? &mutex : VMA_NULL)
4108  { if(m_pMutex) { m_pMutex->Lock(); } }
4109  ~VmaMutexLock()
4110  { if(m_pMutex) { m_pMutex->Unlock(); } }
4111 private:
4112  VMA_MUTEX* m_pMutex;
4113 };
4114 
4115 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4116 struct VmaMutexLockRead
4117 {
4118  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4119 public:
4120  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4121  m_pMutex(useMutex ? &mutex : VMA_NULL)
4122  { if(m_pMutex) { m_pMutex->LockRead(); } }
4123  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4124 private:
4125  VMA_RW_MUTEX* m_pMutex;
4126 };
4127 
4128 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4129 struct VmaMutexLockWrite
4130 {
4131  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4132 public:
4133  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4134  m_pMutex(useMutex ? &mutex : VMA_NULL)
4135  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4136  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4137 private:
4138  VMA_RW_MUTEX* m_pMutex;
4139 };
4140 
4141 #if VMA_DEBUG_GLOBAL_MUTEX
4142  static VMA_MUTEX gDebugGlobalMutex;
4143  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4144 #else
4145  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4146 #endif
4147 
4148 // Minimum size of a free suballocation to register it in the free suballocation collection.
4149 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4150 
4151 /*
4152 Performs binary search and returns iterator to first element that is greater or
4153 equal to (key), according to comparison (cmp).
4154 
4155 Cmp should return true if first argument is less than second argument.
4156 
4157 Returned value is the found element, if present in the collection or place where
4158 new element with value (key) should be inserted.
4159 */
4160 template <typename CmpLess, typename IterT, typename KeyT>
4161 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4162 {
4163  size_t down = 0, up = (end - beg);
4164  while(down < up)
4165  {
4166  const size_t mid = (down + up) / 2;
4167  if(cmp(*(beg+mid), key))
4168  {
4169  down = mid + 1;
4170  }
4171  else
4172  {
4173  up = mid;
4174  }
4175  }
4176  return beg + down;
4177 }
4178 
4179 template<typename CmpLess, typename IterT, typename KeyT>
4180 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4181 {
4182  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4183  beg, end, value, cmp);
4184  if(it == end ||
4185  (!cmp(*it, value) && !cmp(value, *it)))
4186  {
4187  return it;
4188  }
4189  return end;
4190 }
4191 
4192 /*
4193 Returns true if all pointers in the array are not-null and unique.
4194 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4195 T must be pointer type, e.g. VmaAllocation, VmaPool.
4196 */
4197 template<typename T>
4198 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4199 {
4200  for(uint32_t i = 0; i < count; ++i)
4201  {
4202  const T iPtr = arr[i];
4203  if(iPtr == VMA_NULL)
4204  {
4205  return false;
4206  }
4207  for(uint32_t j = i + 1; j < count; ++j)
4208  {
4209  if(iPtr == arr[j])
4210  {
4211  return false;
4212  }
4213  }
4214  }
4215  return true;
4216 }
4217 
4219 // Memory allocation
4220 
4221 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4222 {
4223  if((pAllocationCallbacks != VMA_NULL) &&
4224  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4225  {
4226  return (*pAllocationCallbacks->pfnAllocation)(
4227  pAllocationCallbacks->pUserData,
4228  size,
4229  alignment,
4230  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4231  }
4232  else
4233  {
4234  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4235  }
4236 }
4237 
4238 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4239 {
4240  if((pAllocationCallbacks != VMA_NULL) &&
4241  (pAllocationCallbacks->pfnFree != VMA_NULL))
4242  {
4243  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4244  }
4245  else
4246  {
4247  VMA_SYSTEM_FREE(ptr);
4248  }
4249 }
4250 
4251 template<typename T>
4252 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4253 {
4254  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4255 }
4256 
4257 template<typename T>
4258 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4259 {
4260  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4261 }
4262 
4263 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4264 
4265 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4266 
4267 template<typename T>
4268 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4269 {
4270  ptr->~T();
4271  VmaFree(pAllocationCallbacks, ptr);
4272 }
4273 
4274 template<typename T>
4275 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4276 {
4277  if(ptr != VMA_NULL)
4278  {
4279  for(size_t i = count; i--; )
4280  {
4281  ptr[i].~T();
4282  }
4283  VmaFree(pAllocationCallbacks, ptr);
4284  }
4285 }
4286 
4287 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4288 {
4289  if(srcStr != VMA_NULL)
4290  {
4291  const size_t len = strlen(srcStr);
4292  char* const result = vma_new_array(allocs, char, len + 1);
4293  memcpy(result, srcStr, len + 1);
4294  return result;
4295  }
4296  else
4297  {
4298  return VMA_NULL;
4299  }
4300 }
4301 
4302 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4303 {
4304  if(str != VMA_NULL)
4305  {
4306  const size_t len = strlen(str);
4307  vma_delete_array(allocs, str, len + 1);
4308  }
4309 }
4310 
4311 // STL-compatible allocator.
4312 template<typename T>
4313 class VmaStlAllocator
4314 {
4315 public:
4316  const VkAllocationCallbacks* const m_pCallbacks;
4317  typedef T value_type;
4318 
4319  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4320  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4321 
4322  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4323  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4324 
4325  template<typename U>
4326  bool operator==(const VmaStlAllocator<U>& rhs) const
4327  {
4328  return m_pCallbacks == rhs.m_pCallbacks;
4329  }
4330  template<typename U>
4331  bool operator!=(const VmaStlAllocator<U>& rhs) const
4332  {
4333  return m_pCallbacks != rhs.m_pCallbacks;
4334  }
4335 
4336  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4337 };
4338 
4339 #if VMA_USE_STL_VECTOR
4340 
4341 #define VmaVector std::vector
4342 
4343 template<typename T, typename allocatorT>
4344 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4345 {
4346  vec.insert(vec.begin() + index, item);
4347 }
4348 
4349 template<typename T, typename allocatorT>
4350 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4351 {
4352  vec.erase(vec.begin() + index);
4353 }
4354 
4355 #else // #if VMA_USE_STL_VECTOR
4356 
4357 /* Class with interface compatible with subset of std::vector.
4358 T must be POD because constructors and destructors are not called and memcpy is
4359 used for these objects. */
4360 template<typename T, typename AllocatorT>
4361 class VmaVector
4362 {
4363 public:
4364  typedef T value_type;
4365 
4366  VmaVector(const AllocatorT& allocator) :
4367  m_Allocator(allocator),
4368  m_pArray(VMA_NULL),
4369  m_Count(0),
4370  m_Capacity(0)
4371  {
4372  }
4373 
4374  VmaVector(size_t count, const AllocatorT& allocator) :
4375  m_Allocator(allocator),
4376  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4377  m_Count(count),
4378  m_Capacity(count)
4379  {
4380  }
4381 
4382  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4383  // value is unused.
4384  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4385  : VmaVector(count, allocator) {}
4386 
4387  VmaVector(const VmaVector<T, AllocatorT>& src) :
4388  m_Allocator(src.m_Allocator),
4389  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4390  m_Count(src.m_Count),
4391  m_Capacity(src.m_Count)
4392  {
4393  if(m_Count != 0)
4394  {
4395  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4396  }
4397  }
4398 
4399  ~VmaVector()
4400  {
4401  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4402  }
4403 
4404  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4405  {
4406  if(&rhs != this)
4407  {
4408  resize(rhs.m_Count);
4409  if(m_Count != 0)
4410  {
4411  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4412  }
4413  }
4414  return *this;
4415  }
4416 
4417  bool empty() const { return m_Count == 0; }
4418  size_t size() const { return m_Count; }
4419  T* data() { return m_pArray; }
4420  const T* data() const { return m_pArray; }
4421 
4422  T& operator[](size_t index)
4423  {
4424  VMA_HEAVY_ASSERT(index < m_Count);
4425  return m_pArray[index];
4426  }
4427  const T& operator[](size_t index) const
4428  {
4429  VMA_HEAVY_ASSERT(index < m_Count);
4430  return m_pArray[index];
4431  }
4432 
4433  T& front()
4434  {
4435  VMA_HEAVY_ASSERT(m_Count > 0);
4436  return m_pArray[0];
4437  }
4438  const T& front() const
4439  {
4440  VMA_HEAVY_ASSERT(m_Count > 0);
4441  return m_pArray[0];
4442  }
4443  T& back()
4444  {
4445  VMA_HEAVY_ASSERT(m_Count > 0);
4446  return m_pArray[m_Count - 1];
4447  }
4448  const T& back() const
4449  {
4450  VMA_HEAVY_ASSERT(m_Count > 0);
4451  return m_pArray[m_Count - 1];
4452  }
4453 
4454  void reserve(size_t newCapacity, bool freeMemory = false)
4455  {
4456  newCapacity = VMA_MAX(newCapacity, m_Count);
4457 
4458  if((newCapacity < m_Capacity) && !freeMemory)
4459  {
4460  newCapacity = m_Capacity;
4461  }
4462 
4463  if(newCapacity != m_Capacity)
4464  {
4465  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4466  if(m_Count != 0)
4467  {
4468  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4469  }
4470  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4471  m_Capacity = newCapacity;
4472  m_pArray = newArray;
4473  }
4474  }
4475 
4476  void resize(size_t newCount, bool freeMemory = false)
4477  {
4478  size_t newCapacity = m_Capacity;
4479  if(newCount > m_Capacity)
4480  {
4481  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4482  }
4483  else if(freeMemory)
4484  {
4485  newCapacity = newCount;
4486  }
4487 
4488  if(newCapacity != m_Capacity)
4489  {
4490  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4491  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4492  if(elementsToCopy != 0)
4493  {
4494  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4495  }
4496  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4497  m_Capacity = newCapacity;
4498  m_pArray = newArray;
4499  }
4500 
4501  m_Count = newCount;
4502  }
4503 
4504  void clear(bool freeMemory = false)
4505  {
4506  resize(0, freeMemory);
4507  }
4508 
4509  void insert(size_t index, const T& src)
4510  {
4511  VMA_HEAVY_ASSERT(index <= m_Count);
4512  const size_t oldCount = size();
4513  resize(oldCount + 1);
4514  if(index < oldCount)
4515  {
4516  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4517  }
4518  m_pArray[index] = src;
4519  }
4520 
4521  void remove(size_t index)
4522  {
4523  VMA_HEAVY_ASSERT(index < m_Count);
4524  const size_t oldCount = size();
4525  if(index < oldCount - 1)
4526  {
4527  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4528  }
4529  resize(oldCount - 1);
4530  }
4531 
4532  void push_back(const T& src)
4533  {
4534  const size_t newIndex = size();
4535  resize(newIndex + 1);
4536  m_pArray[newIndex] = src;
4537  }
4538 
4539  void pop_back()
4540  {
4541  VMA_HEAVY_ASSERT(m_Count > 0);
4542  resize(size() - 1);
4543  }
4544 
4545  void push_front(const T& src)
4546  {
4547  insert(0, src);
4548  }
4549 
4550  void pop_front()
4551  {
4552  VMA_HEAVY_ASSERT(m_Count > 0);
4553  remove(0);
4554  }
4555 
4556  typedef T* iterator;
4557 
4558  iterator begin() { return m_pArray; }
4559  iterator end() { return m_pArray + m_Count; }
4560 
4561 private:
4562  AllocatorT m_Allocator;
4563  T* m_pArray;
4564  size_t m_Count;
4565  size_t m_Capacity;
4566 };
4567 
4568 template<typename T, typename allocatorT>
4569 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4570 {
4571  vec.insert(index, item);
4572 }
4573 
4574 template<typename T, typename allocatorT>
4575 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4576 {
4577  vec.remove(index);
4578 }
4579 
4580 #endif // #if VMA_USE_STL_VECTOR
4581 
4582 template<typename CmpLess, typename VectorT>
4583 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4584 {
4585  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4586  vector.data(),
4587  vector.data() + vector.size(),
4588  value,
4589  CmpLess()) - vector.data();
4590  VmaVectorInsert(vector, indexToInsert, value);
4591  return indexToInsert;
4592 }
4593 
4594 template<typename CmpLess, typename VectorT>
4595 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4596 {
4597  CmpLess comparator;
4598  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4599  vector.begin(),
4600  vector.end(),
4601  value,
4602  comparator);
4603  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4604  {
4605  size_t indexToRemove = it - vector.begin();
4606  VmaVectorRemove(vector, indexToRemove);
4607  return true;
4608  }
4609  return false;
4610 }
4611 
4613 // class VmaPoolAllocator
4614 
4615 /*
4616 Allocator for objects of type T using a list of arrays (pools) to speed up
4617 allocation. Number of elements that can be allocated is not bounded because
4618 allocator can create multiple blocks.
4619 */
4620 template<typename T>
4621 class VmaPoolAllocator
4622 {
4623  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4624 public:
4625  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4626  ~VmaPoolAllocator();
4627  T* Alloc();
4628  void Free(T* ptr);
4629 
4630 private:
4631  union Item
4632  {
4633  uint32_t NextFreeIndex;
4634  alignas(T) char Value[sizeof(T)];
4635  };
4636 
4637  struct ItemBlock
4638  {
4639  Item* pItems;
4640  uint32_t Capacity;
4641  uint32_t FirstFreeIndex;
4642  };
4643 
4644  const VkAllocationCallbacks* m_pAllocationCallbacks;
4645  const uint32_t m_FirstBlockCapacity;
4646  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4647 
4648  ItemBlock& CreateNewBlock();
4649 };
4650 
4651 template<typename T>
4652 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4653  m_pAllocationCallbacks(pAllocationCallbacks),
4654  m_FirstBlockCapacity(firstBlockCapacity),
4655  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4656 {
4657  VMA_ASSERT(m_FirstBlockCapacity > 1);
4658 }
4659 
4660 template<typename T>
4661 VmaPoolAllocator<T>::~VmaPoolAllocator()
4662 {
4663  for(size_t i = m_ItemBlocks.size(); i--; )
4664  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4665  m_ItemBlocks.clear();
4666 }
4667 
4668 template<typename T>
4669 T* VmaPoolAllocator<T>::Alloc()
4670 {
4671  for(size_t i = m_ItemBlocks.size(); i--; )
4672  {
4673  ItemBlock& block = m_ItemBlocks[i];
4674  // This block has some free items: Use first one.
4675  if(block.FirstFreeIndex != UINT32_MAX)
4676  {
4677  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4678  block.FirstFreeIndex = pItem->NextFreeIndex;
4679  T* result = (T*)&pItem->Value;
4680  new(result)T(); // Explicit constructor call.
4681  return result;
4682  }
4683  }
4684 
4685  // No block has free item: Create new one and use it.
4686  ItemBlock& newBlock = CreateNewBlock();
4687  Item* const pItem = &newBlock.pItems[0];
4688  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4689  T* result = (T*)&pItem->Value;
4690  new(result)T(); // Explicit constructor call.
4691  return result;
4692 }
4693 
4694 template<typename T>
4695 void VmaPoolAllocator<T>::Free(T* ptr)
4696 {
4697  // Search all memory blocks to find ptr.
4698  for(size_t i = m_ItemBlocks.size(); i--; )
4699  {
4700  ItemBlock& block = m_ItemBlocks[i];
4701 
4702  // Casting to union.
4703  Item* pItemPtr;
4704  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4705 
4706  // Check if pItemPtr is in address range of this block.
4707  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4708  {
4709  ptr->~T(); // Explicit destructor call.
4710  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4711  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4712  block.FirstFreeIndex = index;
4713  return;
4714  }
4715  }
4716  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4717 }
4718 
4719 template<typename T>
4720 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4721 {
4722  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4723  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4724 
4725  const ItemBlock newBlock = {
4726  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4727  newBlockCapacity,
4728  0 };
4729 
4730  m_ItemBlocks.push_back(newBlock);
4731 
4732  // Setup singly-linked list of all free items in this block.
4733  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4734  newBlock.pItems[i].NextFreeIndex = i + 1;
4735  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4736  return m_ItemBlocks.back();
4737 }
4738 
4740 // class VmaRawList, VmaList
4741 
4742 #if VMA_USE_STL_LIST
4743 
4744 #define VmaList std::list
4745 
4746 #else // #if VMA_USE_STL_LIST
4747 
4748 template<typename T>
4749 struct VmaListItem
4750 {
4751  VmaListItem* pPrev;
4752  VmaListItem* pNext;
4753  T Value;
4754 };
4755 
4756 // Doubly linked list.
4757 template<typename T>
4758 class VmaRawList
4759 {
4760  VMA_CLASS_NO_COPY(VmaRawList)
4761 public:
4762  typedef VmaListItem<T> ItemType;
4763 
4764  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4765  ~VmaRawList();
4766  void Clear();
4767 
4768  size_t GetCount() const { return m_Count; }
4769  bool IsEmpty() const { return m_Count == 0; }
4770 
4771  ItemType* Front() { return m_pFront; }
4772  const ItemType* Front() const { return m_pFront; }
4773  ItemType* Back() { return m_pBack; }
4774  const ItemType* Back() const { return m_pBack; }
4775 
4776  ItemType* PushBack();
4777  ItemType* PushFront();
4778  ItemType* PushBack(const T& value);
4779  ItemType* PushFront(const T& value);
4780  void PopBack();
4781  void PopFront();
4782 
4783  // Item can be null - it means PushBack.
4784  ItemType* InsertBefore(ItemType* pItem);
4785  // Item can be null - it means PushFront.
4786  ItemType* InsertAfter(ItemType* pItem);
4787 
4788  ItemType* InsertBefore(ItemType* pItem, const T& value);
4789  ItemType* InsertAfter(ItemType* pItem, const T& value);
4790 
4791  void Remove(ItemType* pItem);
4792 
4793 private:
4794  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4795  VmaPoolAllocator<ItemType> m_ItemAllocator;
4796  ItemType* m_pFront;
4797  ItemType* m_pBack;
4798  size_t m_Count;
4799 };
4800 
4801 template<typename T>
4802 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4803  m_pAllocationCallbacks(pAllocationCallbacks),
4804  m_ItemAllocator(pAllocationCallbacks, 128),
4805  m_pFront(VMA_NULL),
4806  m_pBack(VMA_NULL),
4807  m_Count(0)
4808 {
4809 }
4810 
4811 template<typename T>
4812 VmaRawList<T>::~VmaRawList()
4813 {
4814  // Intentionally not calling Clear, because that would be unnecessary
4815  // computations to return all items to m_ItemAllocator as free.
4816 }
4817 
4818 template<typename T>
4819 void VmaRawList<T>::Clear()
4820 {
4821  if(IsEmpty() == false)
4822  {
4823  ItemType* pItem = m_pBack;
4824  while(pItem != VMA_NULL)
4825  {
4826  ItemType* const pPrevItem = pItem->pPrev;
4827  m_ItemAllocator.Free(pItem);
4828  pItem = pPrevItem;
4829  }
4830  m_pFront = VMA_NULL;
4831  m_pBack = VMA_NULL;
4832  m_Count = 0;
4833  }
4834 }
4835 
4836 template<typename T>
4837 VmaListItem<T>* VmaRawList<T>::PushBack()
4838 {
4839  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4840  pNewItem->pNext = VMA_NULL;
4841  if(IsEmpty())
4842  {
4843  pNewItem->pPrev = VMA_NULL;
4844  m_pFront = pNewItem;
4845  m_pBack = pNewItem;
4846  m_Count = 1;
4847  }
4848  else
4849  {
4850  pNewItem->pPrev = m_pBack;
4851  m_pBack->pNext = pNewItem;
4852  m_pBack = pNewItem;
4853  ++m_Count;
4854  }
4855  return pNewItem;
4856 }
4857 
4858 template<typename T>
4859 VmaListItem<T>* VmaRawList<T>::PushFront()
4860 {
4861  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4862  pNewItem->pPrev = VMA_NULL;
4863  if(IsEmpty())
4864  {
4865  pNewItem->pNext = VMA_NULL;
4866  m_pFront = pNewItem;
4867  m_pBack = pNewItem;
4868  m_Count = 1;
4869  }
4870  else
4871  {
4872  pNewItem->pNext = m_pFront;
4873  m_pFront->pPrev = pNewItem;
4874  m_pFront = pNewItem;
4875  ++m_Count;
4876  }
4877  return pNewItem;
4878 }
4879 
4880 template<typename T>
4881 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4882 {
4883  ItemType* const pNewItem = PushBack();
4884  pNewItem->Value = value;
4885  return pNewItem;
4886 }
4887 
4888 template<typename T>
4889 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4890 {
4891  ItemType* const pNewItem = PushFront();
4892  pNewItem->Value = value;
4893  return pNewItem;
4894 }
4895 
4896 template<typename T>
4897 void VmaRawList<T>::PopBack()
4898 {
4899  VMA_HEAVY_ASSERT(m_Count > 0);
4900  ItemType* const pBackItem = m_pBack;
4901  ItemType* const pPrevItem = pBackItem->pPrev;
4902  if(pPrevItem != VMA_NULL)
4903  {
4904  pPrevItem->pNext = VMA_NULL;
4905  }
4906  m_pBack = pPrevItem;
4907  m_ItemAllocator.Free(pBackItem);
4908  --m_Count;
4909 }
4910 
4911 template<typename T>
4912 void VmaRawList<T>::PopFront()
4913 {
4914  VMA_HEAVY_ASSERT(m_Count > 0);
4915  ItemType* const pFrontItem = m_pFront;
4916  ItemType* const pNextItem = pFrontItem->pNext;
4917  if(pNextItem != VMA_NULL)
4918  {
4919  pNextItem->pPrev = VMA_NULL;
4920  }
4921  m_pFront = pNextItem;
4922  m_ItemAllocator.Free(pFrontItem);
4923  --m_Count;
4924 }
4925 
4926 template<typename T>
4927 void VmaRawList<T>::Remove(ItemType* pItem)
4928 {
4929  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4930  VMA_HEAVY_ASSERT(m_Count > 0);
4931 
4932  if(pItem->pPrev != VMA_NULL)
4933  {
4934  pItem->pPrev->pNext = pItem->pNext;
4935  }
4936  else
4937  {
4938  VMA_HEAVY_ASSERT(m_pFront == pItem);
4939  m_pFront = pItem->pNext;
4940  }
4941 
4942  if(pItem->pNext != VMA_NULL)
4943  {
4944  pItem->pNext->pPrev = pItem->pPrev;
4945  }
4946  else
4947  {
4948  VMA_HEAVY_ASSERT(m_pBack == pItem);
4949  m_pBack = pItem->pPrev;
4950  }
4951 
4952  m_ItemAllocator.Free(pItem);
4953  --m_Count;
4954 }
4955 
4956 template<typename T>
4957 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4958 {
4959  if(pItem != VMA_NULL)
4960  {
4961  ItemType* const prevItem = pItem->pPrev;
4962  ItemType* const newItem = m_ItemAllocator.Alloc();
4963  newItem->pPrev = prevItem;
4964  newItem->pNext = pItem;
4965  pItem->pPrev = newItem;
4966  if(prevItem != VMA_NULL)
4967  {
4968  prevItem->pNext = newItem;
4969  }
4970  else
4971  {
4972  VMA_HEAVY_ASSERT(m_pFront == pItem);
4973  m_pFront = newItem;
4974  }
4975  ++m_Count;
4976  return newItem;
4977  }
4978  else
4979  return PushBack();
4980 }
4981 
4982 template<typename T>
4983 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4984 {
4985  if(pItem != VMA_NULL)
4986  {
4987  ItemType* const nextItem = pItem->pNext;
4988  ItemType* const newItem = m_ItemAllocator.Alloc();
4989  newItem->pNext = nextItem;
4990  newItem->pPrev = pItem;
4991  pItem->pNext = newItem;
4992  if(nextItem != VMA_NULL)
4993  {
4994  nextItem->pPrev = newItem;
4995  }
4996  else
4997  {
4998  VMA_HEAVY_ASSERT(m_pBack == pItem);
4999  m_pBack = newItem;
5000  }
5001  ++m_Count;
5002  return newItem;
5003  }
5004  else
5005  return PushFront();
5006 }
5007 
5008 template<typename T>
5009 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5010 {
5011  ItemType* const newItem = InsertBefore(pItem);
5012  newItem->Value = value;
5013  return newItem;
5014 }
5015 
5016 template<typename T>
5017 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5018 {
5019  ItemType* const newItem = InsertAfter(pItem);
5020  newItem->Value = value;
5021  return newItem;
5022 }
5023 
5024 template<typename T, typename AllocatorT>
5025 class VmaList
5026 {
5027  VMA_CLASS_NO_COPY(VmaList)
5028 public:
5029  class iterator
5030  {
5031  public:
5032  iterator() :
5033  m_pList(VMA_NULL),
5034  m_pItem(VMA_NULL)
5035  {
5036  }
5037 
5038  T& operator*() const
5039  {
5040  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5041  return m_pItem->Value;
5042  }
5043  T* operator->() const
5044  {
5045  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5046  return &m_pItem->Value;
5047  }
5048 
5049  iterator& operator++()
5050  {
5051  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5052  m_pItem = m_pItem->pNext;
5053  return *this;
5054  }
5055  iterator& operator--()
5056  {
5057  if(m_pItem != VMA_NULL)
5058  {
5059  m_pItem = m_pItem->pPrev;
5060  }
5061  else
5062  {
5063  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5064  m_pItem = m_pList->Back();
5065  }
5066  return *this;
5067  }
5068 
5069  iterator operator++(int)
5070  {
5071  iterator result = *this;
5072  ++*this;
5073  return result;
5074  }
5075  iterator operator--(int)
5076  {
5077  iterator result = *this;
5078  --*this;
5079  return result;
5080  }
5081 
5082  bool operator==(const iterator& rhs) const
5083  {
5084  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5085  return m_pItem == rhs.m_pItem;
5086  }
5087  bool operator!=(const iterator& rhs) const
5088  {
5089  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5090  return m_pItem != rhs.m_pItem;
5091  }
5092 
5093  private:
5094  VmaRawList<T>* m_pList;
5095  VmaListItem<T>* m_pItem;
5096 
5097  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5098  m_pList(pList),
5099  m_pItem(pItem)
5100  {
5101  }
5102 
5103  friend class VmaList<T, AllocatorT>;
5104  };
5105 
5106  class const_iterator
5107  {
5108  public:
5109  const_iterator() :
5110  m_pList(VMA_NULL),
5111  m_pItem(VMA_NULL)
5112  {
5113  }
5114 
5115  const_iterator(const iterator& src) :
5116  m_pList(src.m_pList),
5117  m_pItem(src.m_pItem)
5118  {
5119  }
5120 
5121  const T& operator*() const
5122  {
5123  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5124  return m_pItem->Value;
5125  }
5126  const T* operator->() const
5127  {
5128  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5129  return &m_pItem->Value;
5130  }
5131 
5132  const_iterator& operator++()
5133  {
5134  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5135  m_pItem = m_pItem->pNext;
5136  return *this;
5137  }
5138  const_iterator& operator--()
5139  {
5140  if(m_pItem != VMA_NULL)
5141  {
5142  m_pItem = m_pItem->pPrev;
5143  }
5144  else
5145  {
5146  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5147  m_pItem = m_pList->Back();
5148  }
5149  return *this;
5150  }
5151 
5152  const_iterator operator++(int)
5153  {
5154  const_iterator result = *this;
5155  ++*this;
5156  return result;
5157  }
5158  const_iterator operator--(int)
5159  {
5160  const_iterator result = *this;
5161  --*this;
5162  return result;
5163  }
5164 
5165  bool operator==(const const_iterator& rhs) const
5166  {
5167  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5168  return m_pItem == rhs.m_pItem;
5169  }
5170  bool operator!=(const const_iterator& rhs) const
5171  {
5172  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5173  return m_pItem != rhs.m_pItem;
5174  }
5175 
5176  private:
5177  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5178  m_pList(pList),
5179  m_pItem(pItem)
5180  {
5181  }
5182 
5183  const VmaRawList<T>* m_pList;
5184  const VmaListItem<T>* m_pItem;
5185 
5186  friend class VmaList<T, AllocatorT>;
5187  };
5188 
5189  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5190 
5191  bool empty() const { return m_RawList.IsEmpty(); }
5192  size_t size() const { return m_RawList.GetCount(); }
5193 
5194  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5195  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5196 
5197  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5198  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5199 
5200  void clear() { m_RawList.Clear(); }
5201  void push_back(const T& value) { m_RawList.PushBack(value); }
5202  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5203  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5204 
5205 private:
5206  VmaRawList<T> m_RawList;
5207 };
5208 
5209 #endif // #if VMA_USE_STL_LIST
5210 
5212 // class VmaMap
5213 
5214 // Unused in this version.
5215 #if 0
5216 
5217 #if VMA_USE_STL_UNORDERED_MAP
5218 
5219 #define VmaPair std::pair
5220 
5221 #define VMA_MAP_TYPE(KeyT, ValueT) \
5222  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5223 
5224 #else // #if VMA_USE_STL_UNORDERED_MAP
5225 
5226 template<typename T1, typename T2>
5227 struct VmaPair
5228 {
5229  T1 first;
5230  T2 second;
5231 
5232  VmaPair() : first(), second() { }
5233  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5234 };
5235 
5236 /* Class compatible with subset of interface of std::unordered_map.
5237 KeyT, ValueT must be POD because they will be stored in VmaVector.
5238 */
5239 template<typename KeyT, typename ValueT>
5240 class VmaMap
5241 {
5242 public:
5243  typedef VmaPair<KeyT, ValueT> PairType;
5244  typedef PairType* iterator;
5245 
5246  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5247 
5248  iterator begin() { return m_Vector.begin(); }
5249  iterator end() { return m_Vector.end(); }
5250 
5251  void insert(const PairType& pair);
5252  iterator find(const KeyT& key);
5253  void erase(iterator it);
5254 
5255 private:
5256  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5257 };
5258 
5259 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5260 
5261 template<typename FirstT, typename SecondT>
5262 struct VmaPairFirstLess
5263 {
5264  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5265  {
5266  return lhs.first < rhs.first;
5267  }
5268  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5269  {
5270  return lhs.first < rhsFirst;
5271  }
5272 };
5273 
5274 template<typename KeyT, typename ValueT>
5275 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5276 {
5277  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5278  m_Vector.data(),
5279  m_Vector.data() + m_Vector.size(),
5280  pair,
5281  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5282  VmaVectorInsert(m_Vector, indexToInsert, pair);
5283 }
5284 
5285 template<typename KeyT, typename ValueT>
5286 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5287 {
5288  PairType* it = VmaBinaryFindFirstNotLess(
5289  m_Vector.data(),
5290  m_Vector.data() + m_Vector.size(),
5291  key,
5292  VmaPairFirstLess<KeyT, ValueT>());
5293  if((it != m_Vector.end()) && (it->first == key))
5294  {
5295  return it;
5296  }
5297  else
5298  {
5299  return m_Vector.end();
5300  }
5301 }
5302 
5303 template<typename KeyT, typename ValueT>
5304 void VmaMap<KeyT, ValueT>::erase(iterator it)
5305 {
5306  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5307 }
5308 
5309 #endif // #if VMA_USE_STL_UNORDERED_MAP
5310 
5311 #endif // #if 0
5312 
5314 
5315 class VmaDeviceMemoryBlock;
5316 
5317 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5318 
5319 struct VmaAllocation_T
5320 {
5321 private:
5322  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5323 
5324  enum FLAGS
5325  {
5326  FLAG_USER_DATA_STRING = 0x01,
5327  };
5328 
5329 public:
5330  enum ALLOCATION_TYPE
5331  {
5332  ALLOCATION_TYPE_NONE,
5333  ALLOCATION_TYPE_BLOCK,
5334  ALLOCATION_TYPE_DEDICATED,
5335  };
5336 
5337  /*
5338  This struct is allocated using VmaPoolAllocator.
5339  */
5340 
5341  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5342  {
5343  m_Alignment = 1;
5344  m_Size = 0;
5345  m_MemoryTypeIndex = 0;
5346  m_pUserData = VMA_NULL;
5347  m_LastUseFrameIndex = currentFrameIndex;
5348  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5349  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5350  m_MapCount = 0;
5351  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5352 
5353 #if VMA_STATS_STRING_ENABLED
5354  m_CreationFrameIndex = currentFrameIndex;
5355  m_BufferImageUsage = 0;
5356 #endif
5357  }
5358 
5359  void Dtor()
5360  {
5361  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5362 
5363  // Check if owned string was freed.
5364  VMA_ASSERT(m_pUserData == VMA_NULL);
5365  }
5366 
5367  void InitBlockAllocation(
5368  VmaDeviceMemoryBlock* block,
5369  VkDeviceSize offset,
5370  VkDeviceSize alignment,
5371  VkDeviceSize size,
5372  uint32_t memoryTypeIndex,
5373  VmaSuballocationType suballocationType,
5374  bool mapped,
5375  bool canBecomeLost)
5376  {
5377  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5378  VMA_ASSERT(block != VMA_NULL);
5379  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5380  m_Alignment = alignment;
5381  m_Size = size;
5382  m_MemoryTypeIndex = memoryTypeIndex;
5383  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5384  m_SuballocationType = (uint8_t)suballocationType;
5385  m_BlockAllocation.m_Block = block;
5386  m_BlockAllocation.m_Offset = offset;
5387  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5388  }
5389 
5390  void InitLost()
5391  {
5392  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5393  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5394  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5395  m_MemoryTypeIndex = 0;
5396  m_BlockAllocation.m_Block = VMA_NULL;
5397  m_BlockAllocation.m_Offset = 0;
5398  m_BlockAllocation.m_CanBecomeLost = true;
5399  }
5400 
5401  void ChangeBlockAllocation(
5402  VmaAllocator hAllocator,
5403  VmaDeviceMemoryBlock* block,
5404  VkDeviceSize offset);
5405 
5406  void ChangeOffset(VkDeviceSize newOffset);
5407 
5408  // pMappedData not null means allocation is created with MAPPED flag.
5409  void InitDedicatedAllocation(
5410  uint32_t memoryTypeIndex,
5411  VkDeviceMemory hMemory,
5412  VmaSuballocationType suballocationType,
5413  void* pMappedData,
5414  VkDeviceSize size)
5415  {
5416  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5417  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5418  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5419  m_Alignment = 0;
5420  m_Size = size;
5421  m_MemoryTypeIndex = memoryTypeIndex;
5422  m_SuballocationType = (uint8_t)suballocationType;
5423  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5424  m_DedicatedAllocation.m_hMemory = hMemory;
5425  m_DedicatedAllocation.m_pMappedData = pMappedData;
5426  }
5427 
5428  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5429  VkDeviceSize GetAlignment() const { return m_Alignment; }
5430  VkDeviceSize GetSize() const { return m_Size; }
5431  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5432  void* GetUserData() const { return m_pUserData; }
5433  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5434  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5435 
5436  VmaDeviceMemoryBlock* GetBlock() const
5437  {
5438  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5439  return m_BlockAllocation.m_Block;
5440  }
5441  VkDeviceSize GetOffset() const;
5442  VkDeviceMemory GetMemory() const;
5443  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5444  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5445  void* GetMappedData() const;
5446  bool CanBecomeLost() const;
5447 
5448  uint32_t GetLastUseFrameIndex() const
5449  {
5450  return m_LastUseFrameIndex.load();
5451  }
5452  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5453  {
5454  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5455  }
5456  /*
5457  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5458  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5459  - Else, returns false.
5460 
5461  If hAllocation is already lost, assert - you should not call it then.
5462  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5463  */
5464  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5465 
5466  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5467  {
5468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5469  outInfo.blockCount = 1;
5470  outInfo.allocationCount = 1;
5471  outInfo.unusedRangeCount = 0;
5472  outInfo.usedBytes = m_Size;
5473  outInfo.unusedBytes = 0;
5474  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5475  outInfo.unusedRangeSizeMin = UINT64_MAX;
5476  outInfo.unusedRangeSizeMax = 0;
5477  }
5478 
5479  void BlockAllocMap();
5480  void BlockAllocUnmap();
5481  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5482  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5483 
5484 #if VMA_STATS_STRING_ENABLED
5485  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5486  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5487 
5488  void InitBufferImageUsage(uint32_t bufferImageUsage)
5489  {
5490  VMA_ASSERT(m_BufferImageUsage == 0);
5491  m_BufferImageUsage = bufferImageUsage;
5492  }
5493 
5494  void PrintParameters(class VmaJsonWriter& json) const;
5495 #endif
5496 
5497 private:
5498  VkDeviceSize m_Alignment;
5499  VkDeviceSize m_Size;
5500  void* m_pUserData;
5501  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5502  uint32_t m_MemoryTypeIndex;
5503  uint8_t m_Type; // ALLOCATION_TYPE
5504  uint8_t m_SuballocationType; // VmaSuballocationType
5505  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5506  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5507  uint8_t m_MapCount;
5508  uint8_t m_Flags; // enum FLAGS
5509 
5510  // Allocation out of VmaDeviceMemoryBlock.
5511  struct BlockAllocation
5512  {
5513  VmaDeviceMemoryBlock* m_Block;
5514  VkDeviceSize m_Offset;
5515  bool m_CanBecomeLost;
5516  };
5517 
5518  // Allocation for an object that has its own private VkDeviceMemory.
5519  struct DedicatedAllocation
5520  {
5521  VkDeviceMemory m_hMemory;
5522  void* m_pMappedData; // Not null means memory is mapped.
5523  };
5524 
5525  union
5526  {
5527  // Allocation out of VmaDeviceMemoryBlock.
5528  BlockAllocation m_BlockAllocation;
5529  // Allocation for an object that has its own private VkDeviceMemory.
5530  DedicatedAllocation m_DedicatedAllocation;
5531  };
5532 
5533 #if VMA_STATS_STRING_ENABLED
5534  uint32_t m_CreationFrameIndex;
5535  uint32_t m_BufferImageUsage; // 0 if unknown.
5536 #endif
5537 
5538  void FreeUserDataString(VmaAllocator hAllocator);
5539 };
5540 
5541 /*
5542 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5543 allocated memory block or free.
5544 */
5545 struct VmaSuballocation
5546 {
5547  VkDeviceSize offset;
5548  VkDeviceSize size;
5549  VmaAllocation hAllocation;
5550  VmaSuballocationType type;
5551 };
5552 
5553 // Comparator for offsets.
5554 struct VmaSuballocationOffsetLess
5555 {
5556  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5557  {
5558  return lhs.offset < rhs.offset;
5559  }
5560 };
5561 struct VmaSuballocationOffsetGreater
5562 {
5563  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5564  {
5565  return lhs.offset > rhs.offset;
5566  }
5567 };
5568 
5569 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5570 
5571 // Cost of one additional allocation lost, as equivalent in bytes.
5572 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5573 
5574 enum class VmaAllocationRequestType
5575 {
5576  Normal,
5577  // Used by "Linear" algorithm.
5578  UpperAddress,
5579  EndOf1st,
5580  EndOf2nd,
5581 };
5582 
5583 /*
5584 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5585 
5586 If canMakeOtherLost was false:
5587 - item points to a FREE suballocation.
5588 - itemsToMakeLostCount is 0.
5589 
5590 If canMakeOtherLost was true:
5591 - item points to first of sequence of suballocations, which are either FREE,
5592  or point to VmaAllocations that can become lost.
5593 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5594  the requested allocation to succeed.
5595 */
5596 struct VmaAllocationRequest
5597 {
5598  VkDeviceSize offset;
5599  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5600  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5601  VmaSuballocationList::iterator item;
5602  size_t itemsToMakeLostCount;
5603  void* customData;
5604  VmaAllocationRequestType type;
5605 
5606  VkDeviceSize CalcCost() const
5607  {
5608  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5609  }
5610 };
5611 
5612 /*
5613 Data structure used for bookkeeping of allocations and unused ranges of memory
5614 in a single VkDeviceMemory block.
5615 */
5616 class VmaBlockMetadata
5617 {
5618 public:
5619  VmaBlockMetadata(VmaAllocator hAllocator);
5620  virtual ~VmaBlockMetadata() { }
5621  virtual void Init(VkDeviceSize size) { m_Size = size; }
5622 
5623  // Validates all data structures inside this object. If not valid, returns false.
5624  virtual bool Validate() const = 0;
5625  VkDeviceSize GetSize() const { return m_Size; }
5626  virtual size_t GetAllocationCount() const = 0;
5627  virtual VkDeviceSize GetSumFreeSize() const = 0;
5628  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5629  // Returns true if this block is empty - contains only single free suballocation.
5630  virtual bool IsEmpty() const = 0;
5631 
5632  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5633  // Shouldn't modify blockCount.
5634  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5635 
5636 #if VMA_STATS_STRING_ENABLED
5637  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5638 #endif
5639 
5640  // Tries to find a place for suballocation with given parameters inside this block.
5641  // If succeeded, fills pAllocationRequest and returns true.
5642  // If failed, returns false.
5643  virtual bool CreateAllocationRequest(
5644  uint32_t currentFrameIndex,
5645  uint32_t frameInUseCount,
5646  VkDeviceSize bufferImageGranularity,
5647  VkDeviceSize allocSize,
5648  VkDeviceSize allocAlignment,
5649  bool upperAddress,
5650  VmaSuballocationType allocType,
5651  bool canMakeOtherLost,
5652  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5653  uint32_t strategy,
5654  VmaAllocationRequest* pAllocationRequest) = 0;
5655 
5656  virtual bool MakeRequestedAllocationsLost(
5657  uint32_t currentFrameIndex,
5658  uint32_t frameInUseCount,
5659  VmaAllocationRequest* pAllocationRequest) = 0;
5660 
5661  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5662 
5663  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5664 
5665  // Makes actual allocation based on request. Request must already be checked and valid.
5666  virtual void Alloc(
5667  const VmaAllocationRequest& request,
5668  VmaSuballocationType type,
5669  VkDeviceSize allocSize,
5670  VmaAllocation hAllocation) = 0;
5671 
5672  // Frees suballocation assigned to given memory region.
5673  virtual void Free(const VmaAllocation allocation) = 0;
5674  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5675 
5676 protected:
5677  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5678 
5679 #if VMA_STATS_STRING_ENABLED
5680  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5681  VkDeviceSize unusedBytes,
5682  size_t allocationCount,
5683  size_t unusedRangeCount) const;
5684  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5685  VkDeviceSize offset,
5686  VmaAllocation hAllocation) const;
5687  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5688  VkDeviceSize offset,
5689  VkDeviceSize size) const;
5690  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5691 #endif
5692 
5693 private:
5694  VkDeviceSize m_Size;
5695  const VkAllocationCallbacks* m_pAllocationCallbacks;
5696 };
5697 
5698 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5699  VMA_ASSERT(0 && "Validation failed: " #cond); \
5700  return false; \
5701  } } while(false)
5702 
5703 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5704 {
5705  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5706 public:
5707  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5708  virtual ~VmaBlockMetadata_Generic();
5709  virtual void Init(VkDeviceSize size);
5710 
5711  virtual bool Validate() const;
5712  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5713  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5714  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5715  virtual bool IsEmpty() const;
5716 
5717  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5718  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5719 
5720 #if VMA_STATS_STRING_ENABLED
5721  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5722 #endif
5723 
5724  virtual bool CreateAllocationRequest(
5725  uint32_t currentFrameIndex,
5726  uint32_t frameInUseCount,
5727  VkDeviceSize bufferImageGranularity,
5728  VkDeviceSize allocSize,
5729  VkDeviceSize allocAlignment,
5730  bool upperAddress,
5731  VmaSuballocationType allocType,
5732  bool canMakeOtherLost,
5733  uint32_t strategy,
5734  VmaAllocationRequest* pAllocationRequest);
5735 
5736  virtual bool MakeRequestedAllocationsLost(
5737  uint32_t currentFrameIndex,
5738  uint32_t frameInUseCount,
5739  VmaAllocationRequest* pAllocationRequest);
5740 
5741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5742 
5743  virtual VkResult CheckCorruption(const void* pBlockData);
5744 
5745  virtual void Alloc(
5746  const VmaAllocationRequest& request,
5747  VmaSuballocationType type,
5748  VkDeviceSize allocSize,
5749  VmaAllocation hAllocation);
5750 
5751  virtual void Free(const VmaAllocation allocation);
5752  virtual void FreeAtOffset(VkDeviceSize offset);
5753 
5755  // For defragmentation
5756 
5757  bool IsBufferImageGranularityConflictPossible(
5758  VkDeviceSize bufferImageGranularity,
5759  VmaSuballocationType& inOutPrevSuballocType) const;
5760 
5761 private:
5762  friend class VmaDefragmentationAlgorithm_Generic;
5763  friend class VmaDefragmentationAlgorithm_Fast;
5764 
5765  uint32_t m_FreeCount;
5766  VkDeviceSize m_SumFreeSize;
5767  VmaSuballocationList m_Suballocations;
5768  // Suballocations that are free and have size greater than certain threshold.
5769  // Sorted by size, ascending.
5770  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5771 
5772  bool ValidateFreeSuballocationList() const;
5773 
5774  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5775  // If yes, fills pOffset and returns true. If no, returns false.
5776  bool CheckAllocation(
5777  uint32_t currentFrameIndex,
5778  uint32_t frameInUseCount,
5779  VkDeviceSize bufferImageGranularity,
5780  VkDeviceSize allocSize,
5781  VkDeviceSize allocAlignment,
5782  VmaSuballocationType allocType,
5783  VmaSuballocationList::const_iterator suballocItem,
5784  bool canMakeOtherLost,
5785  VkDeviceSize* pOffset,
5786  size_t* itemsToMakeLostCount,
5787  VkDeviceSize* pSumFreeSize,
5788  VkDeviceSize* pSumItemSize) const;
5789  // Given free suballocation, it merges it with following one, which must also be free.
5790  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5791  // Releases given suballocation, making it free.
5792  // Merges it with adjacent free suballocations if applicable.
5793  // Returns iterator to new free suballocation at this place.
5794  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5795  // Given free suballocation, it inserts it into sorted list of
5796  // m_FreeSuballocationsBySize if it's suitable.
5797  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5798  // Given free suballocation, it removes it from sorted list of
5799  // m_FreeSuballocationsBySize if it's suitable.
5800  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5801 };
5802 
5803 /*
5804 Allocations and their references in internal data structure look like this:
5805 
5806 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5807 
5808  0 +-------+
5809  | |
5810  | |
5811  | |
5812  +-------+
5813  | Alloc | 1st[m_1stNullItemsBeginCount]
5814  +-------+
5815  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5816  +-------+
5817  | ... |
5818  +-------+
5819  | Alloc | 1st[1st.size() - 1]
5820  +-------+
5821  | |
5822  | |
5823  | |
5824 GetSize() +-------+
5825 
5826 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5827 
5828  0 +-------+
5829  | Alloc | 2nd[0]
5830  +-------+
5831  | Alloc | 2nd[1]
5832  +-------+
5833  | ... |
5834  +-------+
5835  | Alloc | 2nd[2nd.size() - 1]
5836  +-------+
5837  | |
5838  | |
5839  | |
5840  +-------+
5841  | Alloc | 1st[m_1stNullItemsBeginCount]
5842  +-------+
5843  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5844  +-------+
5845  | ... |
5846  +-------+
5847  | Alloc | 1st[1st.size() - 1]
5848  +-------+
5849  | |
5850 GetSize() +-------+
5851 
5852 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5853 
5854  0 +-------+
5855  | |
5856  | |
5857  | |
5858  +-------+
5859  | Alloc | 1st[m_1stNullItemsBeginCount]
5860  +-------+
5861  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5862  +-------+
5863  | ... |
5864  +-------+
5865  | Alloc | 1st[1st.size() - 1]
5866  +-------+
5867  | |
5868  | |
5869  | |
5870  +-------+
5871  | Alloc | 2nd[2nd.size() - 1]
5872  +-------+
5873  | ... |
5874  +-------+
5875  | Alloc | 2nd[1]
5876  +-------+
5877  | Alloc | 2nd[0]
5878 GetSize() +-------+
5879 
5880 */
5881 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5882 {
5883  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5884 public:
5885  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5886  virtual ~VmaBlockMetadata_Linear();
5887  virtual void Init(VkDeviceSize size);
5888 
5889  virtual bool Validate() const;
5890  virtual size_t GetAllocationCount() const;
5891  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5892  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5893  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5894 
5895  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5896  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5897 
5898 #if VMA_STATS_STRING_ENABLED
5899  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5900 #endif
5901 
5902  virtual bool CreateAllocationRequest(
5903  uint32_t currentFrameIndex,
5904  uint32_t frameInUseCount,
5905  VkDeviceSize bufferImageGranularity,
5906  VkDeviceSize allocSize,
5907  VkDeviceSize allocAlignment,
5908  bool upperAddress,
5909  VmaSuballocationType allocType,
5910  bool canMakeOtherLost,
5911  uint32_t strategy,
5912  VmaAllocationRequest* pAllocationRequest);
5913 
5914  virtual bool MakeRequestedAllocationsLost(
5915  uint32_t currentFrameIndex,
5916  uint32_t frameInUseCount,
5917  VmaAllocationRequest* pAllocationRequest);
5918 
5919  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5920 
5921  virtual VkResult CheckCorruption(const void* pBlockData);
5922 
5923  virtual void Alloc(
5924  const VmaAllocationRequest& request,
5925  VmaSuballocationType type,
5926  VkDeviceSize allocSize,
5927  VmaAllocation hAllocation);
5928 
5929  virtual void Free(const VmaAllocation allocation);
5930  virtual void FreeAtOffset(VkDeviceSize offset);
5931 
5932 private:
5933  /*
5934  There are two suballocation vectors, used in ping-pong way.
5935  The one with index m_1stVectorIndex is called 1st.
5936  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5937  2nd can be non-empty only when 1st is not empty.
5938  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5939  */
5940  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5941 
5942  enum SECOND_VECTOR_MODE
5943  {
5944  SECOND_VECTOR_EMPTY,
5945  /*
5946  Suballocations in 2nd vector are created later than the ones in 1st, but they
5947  all have smaller offset.
5948  */
5949  SECOND_VECTOR_RING_BUFFER,
5950  /*
5951  Suballocations in 2nd vector are upper side of double stack.
5952  They all have offsets higher than those in 1st vector.
5953  Top of this stack means smaller offsets, but higher indices in this vector.
5954  */
5955  SECOND_VECTOR_DOUBLE_STACK,
5956  };
5957 
5958  VkDeviceSize m_SumFreeSize;
5959  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5960  uint32_t m_1stVectorIndex;
5961  SECOND_VECTOR_MODE m_2ndVectorMode;
5962 
5963  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5964  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5965  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5966  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5967 
5968  // Number of items in 1st vector with hAllocation = null at the beginning.
5969  size_t m_1stNullItemsBeginCount;
5970  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5971  size_t m_1stNullItemsMiddleCount;
5972  // Number of items in 2nd vector with hAllocation = null.
5973  size_t m_2ndNullItemsCount;
5974 
5975  bool ShouldCompact1st() const;
5976  void CleanupAfterFree();
5977 
5978  bool CreateAllocationRequest_LowerAddress(
5979  uint32_t currentFrameIndex,
5980  uint32_t frameInUseCount,
5981  VkDeviceSize bufferImageGranularity,
5982  VkDeviceSize allocSize,
5983  VkDeviceSize allocAlignment,
5984  VmaSuballocationType allocType,
5985  bool canMakeOtherLost,
5986  uint32_t strategy,
5987  VmaAllocationRequest* pAllocationRequest);
5988  bool CreateAllocationRequest_UpperAddress(
5989  uint32_t currentFrameIndex,
5990  uint32_t frameInUseCount,
5991  VkDeviceSize bufferImageGranularity,
5992  VkDeviceSize allocSize,
5993  VkDeviceSize allocAlignment,
5994  VmaSuballocationType allocType,
5995  bool canMakeOtherLost,
5996  uint32_t strategy,
5997  VmaAllocationRequest* pAllocationRequest);
5998 };
5999 
6000 /*
6001 - GetSize() is the original size of allocated memory block.
6002 - m_UsableSize is this size aligned down to a power of two.
6003  All allocations and calculations happen relative to m_UsableSize.
6004 - GetUnusableSize() is the difference between them.
6005  It is repoted as separate, unused range, not available for allocations.
6006 
6007 Node at level 0 has size = m_UsableSize.
6008 Each next level contains nodes with size 2 times smaller than current level.
6009 m_LevelCount is the maximum number of levels to use in the current object.
6010 */
6011 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6012 {
6013  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6014 public:
6015  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6016  virtual ~VmaBlockMetadata_Buddy();
6017  virtual void Init(VkDeviceSize size);
6018 
6019  virtual bool Validate() const;
6020  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6021  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6022  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6023  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6024 
6025  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6026  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6027 
6028 #if VMA_STATS_STRING_ENABLED
6029  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6030 #endif
6031 
6032  virtual bool CreateAllocationRequest(
6033  uint32_t currentFrameIndex,
6034  uint32_t frameInUseCount,
6035  VkDeviceSize bufferImageGranularity,
6036  VkDeviceSize allocSize,
6037  VkDeviceSize allocAlignment,
6038  bool upperAddress,
6039  VmaSuballocationType allocType,
6040  bool canMakeOtherLost,
6041  uint32_t strategy,
6042  VmaAllocationRequest* pAllocationRequest);
6043 
6044  virtual bool MakeRequestedAllocationsLost(
6045  uint32_t currentFrameIndex,
6046  uint32_t frameInUseCount,
6047  VmaAllocationRequest* pAllocationRequest);
6048 
6049  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6050 
6051  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6052 
6053  virtual void Alloc(
6054  const VmaAllocationRequest& request,
6055  VmaSuballocationType type,
6056  VkDeviceSize allocSize,
6057  VmaAllocation hAllocation);
6058 
6059  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6060  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6061 
6062 private:
6063  static const VkDeviceSize MIN_NODE_SIZE = 32;
6064  static const size_t MAX_LEVELS = 30;
6065 
6066  struct ValidationContext
6067  {
6068  size_t calculatedAllocationCount;
6069  size_t calculatedFreeCount;
6070  VkDeviceSize calculatedSumFreeSize;
6071 
6072  ValidationContext() :
6073  calculatedAllocationCount(0),
6074  calculatedFreeCount(0),
6075  calculatedSumFreeSize(0) { }
6076  };
6077 
6078  struct Node
6079  {
6080  VkDeviceSize offset;
6081  enum TYPE
6082  {
6083  TYPE_FREE,
6084  TYPE_ALLOCATION,
6085  TYPE_SPLIT,
6086  TYPE_COUNT
6087  } type;
6088  Node* parent;
6089  Node* buddy;
6090 
6091  union
6092  {
6093  struct
6094  {
6095  Node* prev;
6096  Node* next;
6097  } free;
6098  struct
6099  {
6100  VmaAllocation alloc;
6101  } allocation;
6102  struct
6103  {
6104  Node* leftChild;
6105  } split;
6106  };
6107  };
6108 
6109  // Size of the memory block aligned down to a power of two.
6110  VkDeviceSize m_UsableSize;
6111  uint32_t m_LevelCount;
6112 
6113  Node* m_Root;
6114  struct {
6115  Node* front;
6116  Node* back;
6117  } m_FreeList[MAX_LEVELS];
6118  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6119  size_t m_AllocationCount;
6120  // Number of nodes in the tree with type == TYPE_FREE.
6121  size_t m_FreeCount;
6122  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6123  VkDeviceSize m_SumFreeSize;
6124 
6125  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6126  void DeleteNode(Node* node);
6127  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6128  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6129  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6130  // Alloc passed just for validation. Can be null.
6131  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6132  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6133  // Adds node to the front of FreeList at given level.
6134  // node->type must be FREE.
6135  // node->free.prev, next can be undefined.
6136  void AddToFreeListFront(uint32_t level, Node* node);
6137  // Removes node from FreeList at given level.
6138  // node->type must be FREE.
6139  // node->free.prev, next stay untouched.
6140  void RemoveFromFreeList(uint32_t level, Node* node);
6141 
6142 #if VMA_STATS_STRING_ENABLED
6143  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6144 #endif
6145 };
6146 
6147 /*
6148 Represents a single block of device memory (`VkDeviceMemory`) with all the
6149 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6150 
6151 Thread-safety: This class must be externally synchronized.
6152 */
6153 class VmaDeviceMemoryBlock
6154 {
6155  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6156 public:
6157  VmaBlockMetadata* m_pMetadata;
6158 
6159  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6160 
6161  ~VmaDeviceMemoryBlock()
6162  {
6163  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6164  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6165  }
6166 
6167  // Always call after construction.
6168  void Init(
6169  VmaAllocator hAllocator,
6170  VmaPool hParentPool,
6171  uint32_t newMemoryTypeIndex,
6172  VkDeviceMemory newMemory,
6173  VkDeviceSize newSize,
6174  uint32_t id,
6175  uint32_t algorithm);
6176  // Always call before destruction.
6177  void Destroy(VmaAllocator allocator);
6178 
6179  VmaPool GetParentPool() const { return m_hParentPool; }
6180  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6181  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6182  uint32_t GetId() const { return m_Id; }
6183  void* GetMappedData() const { return m_pMappedData; }
6184 
6185  // Validates all data structures inside this object. If not valid, returns false.
6186  bool Validate() const;
6187 
6188  VkResult CheckCorruption(VmaAllocator hAllocator);
6189 
6190  // ppData can be null.
6191  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6192  void Unmap(VmaAllocator hAllocator, uint32_t count);
6193 
6194  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6195  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6196 
6197  VkResult BindBufferMemory(
6198  const VmaAllocator hAllocator,
6199  const VmaAllocation hAllocation,
6200  VkDeviceSize allocationLocalOffset,
6201  VkBuffer hBuffer,
6202  const void* pNext);
6203  VkResult BindImageMemory(
6204  const VmaAllocator hAllocator,
6205  const VmaAllocation hAllocation,
6206  VkDeviceSize allocationLocalOffset,
6207  VkImage hImage,
6208  const void* pNext);
6209 
6210 private:
6211  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6212  uint32_t m_MemoryTypeIndex;
6213  uint32_t m_Id;
6214  VkDeviceMemory m_hMemory;
6215 
6216  /*
6217  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6218  Also protects m_MapCount, m_pMappedData.
6219  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6220  */
6221  VMA_MUTEX m_Mutex;
6222  uint32_t m_MapCount;
6223  void* m_pMappedData;
6224 };
6225 
6226 struct VmaPointerLess
6227 {
6228  bool operator()(const void* lhs, const void* rhs) const
6229  {
6230  return lhs < rhs;
6231  }
6232 };
6233 
6234 struct VmaDefragmentationMove
6235 {
6236  size_t srcBlockIndex;
6237  size_t dstBlockIndex;
6238  VkDeviceSize srcOffset;
6239  VkDeviceSize dstOffset;
6240  VkDeviceSize size;
6241 };
6242 
6243 class VmaDefragmentationAlgorithm;
6244 
6245 /*
6246 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6247 Vulkan memory type.
6248 
6249 Synchronized internally with a mutex.
6250 */
6251 struct VmaBlockVector
6252 {
6253  VMA_CLASS_NO_COPY(VmaBlockVector)
6254 public:
6255  VmaBlockVector(
6256  VmaAllocator hAllocator,
6257  VmaPool hParentPool,
6258  uint32_t memoryTypeIndex,
6259  VkDeviceSize preferredBlockSize,
6260  size_t minBlockCount,
6261  size_t maxBlockCount,
6262  VkDeviceSize bufferImageGranularity,
6263  uint32_t frameInUseCount,
6264  bool explicitBlockSize,
6265  uint32_t algorithm);
6266  ~VmaBlockVector();
6267 
6268  VkResult CreateMinBlocks();
6269 
6270  VmaAllocator GetAllocator() const { return m_hAllocator; }
6271  VmaPool GetParentPool() const { return m_hParentPool; }
6272  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6273  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6274  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6275  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6276  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6277  uint32_t GetAlgorithm() const { return m_Algorithm; }
6278 
6279  void GetPoolStats(VmaPoolStats* pStats);
6280 
6281  bool IsEmpty() const { return m_Blocks.empty(); }
6282  bool IsCorruptionDetectionEnabled() const;
6283 
6284  VkResult Allocate(
6285  uint32_t currentFrameIndex,
6286  VkDeviceSize size,
6287  VkDeviceSize alignment,
6288  const VmaAllocationCreateInfo& createInfo,
6289  VmaSuballocationType suballocType,
6290  size_t allocationCount,
6291  VmaAllocation* pAllocations);
6292 
6293  void Free(const VmaAllocation hAllocation);
6294 
6295  // Adds statistics of this BlockVector to pStats.
6296  void AddStats(VmaStats* pStats);
6297 
6298 #if VMA_STATS_STRING_ENABLED
6299  void PrintDetailedMap(class VmaJsonWriter& json);
6300 #endif
6301 
6302  void MakePoolAllocationsLost(
6303  uint32_t currentFrameIndex,
6304  size_t* pLostAllocationCount);
6305  VkResult CheckCorruption();
6306 
6307  // Saves results in pCtx->res.
6308  void Defragment(
6309  class VmaBlockVectorDefragmentationContext* pCtx,
6310  VmaDefragmentationStats* pStats,
6311  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6312  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6313  VkCommandBuffer commandBuffer);
6314  void DefragmentationEnd(
6315  class VmaBlockVectorDefragmentationContext* pCtx,
6316  VmaDefragmentationStats* pStats);
6317 
6319  // To be used only while the m_Mutex is locked. Used during defragmentation.
6320 
6321  size_t GetBlockCount() const { return m_Blocks.size(); }
6322  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6323  size_t CalcAllocationCount() const;
6324  bool IsBufferImageGranularityConflictPossible() const;
6325 
6326 private:
6327  friend class VmaDefragmentationAlgorithm_Generic;
6328 
6329  const VmaAllocator m_hAllocator;
6330  const VmaPool m_hParentPool;
6331  const uint32_t m_MemoryTypeIndex;
6332  const VkDeviceSize m_PreferredBlockSize;
6333  const size_t m_MinBlockCount;
6334  const size_t m_MaxBlockCount;
6335  const VkDeviceSize m_BufferImageGranularity;
6336  const uint32_t m_FrameInUseCount;
6337  const bool m_ExplicitBlockSize;
6338  const uint32_t m_Algorithm;
6339  VMA_RW_MUTEX m_Mutex;
6340 
6341  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6342  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6343  bool m_HasEmptyBlock;
6344  // Incrementally sorted by sumFreeSize, ascending.
6345  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6346  uint32_t m_NextBlockId;
6347 
6348  VkDeviceSize CalcMaxBlockSize() const;
6349 
6350  // Finds and removes given block from vector.
6351  void Remove(VmaDeviceMemoryBlock* pBlock);
6352 
6353  // Performs single step in sorting m_Blocks. They may not be fully sorted
6354  // after this call.
6355  void IncrementallySortBlocks();
6356 
6357  VkResult AllocatePage(
6358  uint32_t currentFrameIndex,
6359  VkDeviceSize size,
6360  VkDeviceSize alignment,
6361  const VmaAllocationCreateInfo& createInfo,
6362  VmaSuballocationType suballocType,
6363  VmaAllocation* pAllocation);
6364 
6365  // To be used only without CAN_MAKE_OTHER_LOST flag.
6366  VkResult AllocateFromBlock(
6367  VmaDeviceMemoryBlock* pBlock,
6368  uint32_t currentFrameIndex,
6369  VkDeviceSize size,
6370  VkDeviceSize alignment,
6371  VmaAllocationCreateFlags allocFlags,
6372  void* pUserData,
6373  VmaSuballocationType suballocType,
6374  uint32_t strategy,
6375  VmaAllocation* pAllocation);
6376 
6377  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6378 
6379  // Saves result to pCtx->res.
6380  void ApplyDefragmentationMovesCpu(
6381  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6382  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6383  // Saves result to pCtx->res.
6384  void ApplyDefragmentationMovesGpu(
6385  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6386  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6387  VkCommandBuffer commandBuffer);
6388 
6389  /*
6390  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6391  - updated with new data.
6392  */
6393  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6394 
6395  void UpdateHasEmptyBlock();
6396 };
6397 
6398 struct VmaPool_T
6399 {
6400  VMA_CLASS_NO_COPY(VmaPool_T)
6401 public:
6402  VmaBlockVector m_BlockVector;
6403 
6404  VmaPool_T(
6405  VmaAllocator hAllocator,
6406  const VmaPoolCreateInfo& createInfo,
6407  VkDeviceSize preferredBlockSize);
6408  ~VmaPool_T();
6409 
6410  uint32_t GetId() const { return m_Id; }
6411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6412 
6413  const char* GetName() const { return m_Name; }
6414  void SetName(const char* pName);
6415 
6416 #if VMA_STATS_STRING_ENABLED
6417  //void PrintDetailedMap(class VmaStringBuilder& sb);
6418 #endif
6419 
6420 private:
6421  uint32_t m_Id;
6422  char* m_Name;
6423 };
6424 
6425 /*
6426 Performs defragmentation:
6427 
6428 - Updates `pBlockVector->m_pMetadata`.
6429 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6430 - Does not move actual data, only returns requested moves as `moves`.
6431 */
6432 class VmaDefragmentationAlgorithm
6433 {
6434  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6435 public:
6436  VmaDefragmentationAlgorithm(
6437  VmaAllocator hAllocator,
6438  VmaBlockVector* pBlockVector,
6439  uint32_t currentFrameIndex) :
6440  m_hAllocator(hAllocator),
6441  m_pBlockVector(pBlockVector),
6442  m_CurrentFrameIndex(currentFrameIndex)
6443  {
6444  }
6445  virtual ~VmaDefragmentationAlgorithm()
6446  {
6447  }
6448 
6449  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6450  virtual void AddAll() = 0;
6451 
6452  virtual VkResult Defragment(
6453  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6454  VkDeviceSize maxBytesToMove,
6455  uint32_t maxAllocationsToMove) = 0;
6456 
6457  virtual VkDeviceSize GetBytesMoved() const = 0;
6458  virtual uint32_t GetAllocationsMoved() const = 0;
6459 
6460 protected:
6461  VmaAllocator const m_hAllocator;
6462  VmaBlockVector* const m_pBlockVector;
6463  const uint32_t m_CurrentFrameIndex;
6464 
6465  struct AllocationInfo
6466  {
6467  VmaAllocation m_hAllocation;
6468  VkBool32* m_pChanged;
6469 
6470  AllocationInfo() :
6471  m_hAllocation(VK_NULL_HANDLE),
6472  m_pChanged(VMA_NULL)
6473  {
6474  }
6475  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6476  m_hAllocation(hAlloc),
6477  m_pChanged(pChanged)
6478  {
6479  }
6480  };
6481 };
6482 
6483 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6484 {
6485  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6486 public:
6487  VmaDefragmentationAlgorithm_Generic(
6488  VmaAllocator hAllocator,
6489  VmaBlockVector* pBlockVector,
6490  uint32_t currentFrameIndex,
6491  bool overlappingMoveSupported);
6492  virtual ~VmaDefragmentationAlgorithm_Generic();
6493 
6494  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6495  virtual void AddAll() { m_AllAllocations = true; }
6496 
6497  virtual VkResult Defragment(
6498  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6499  VkDeviceSize maxBytesToMove,
6500  uint32_t maxAllocationsToMove);
6501 
6502  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6503  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6504 
6505 private:
6506  uint32_t m_AllocationCount;
6507  bool m_AllAllocations;
6508 
6509  VkDeviceSize m_BytesMoved;
6510  uint32_t m_AllocationsMoved;
6511 
6512  struct AllocationInfoSizeGreater
6513  {
6514  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6515  {
6516  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6517  }
6518  };
6519 
6520  struct AllocationInfoOffsetGreater
6521  {
6522  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6523  {
6524  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6525  }
6526  };
6527 
6528  struct BlockInfo
6529  {
6530  size_t m_OriginalBlockIndex;
6531  VmaDeviceMemoryBlock* m_pBlock;
6532  bool m_HasNonMovableAllocations;
6533  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6534 
6535  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6536  m_OriginalBlockIndex(SIZE_MAX),
6537  m_pBlock(VMA_NULL),
6538  m_HasNonMovableAllocations(true),
6539  m_Allocations(pAllocationCallbacks)
6540  {
6541  }
6542 
6543  void CalcHasNonMovableAllocations()
6544  {
6545  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6546  const size_t defragmentAllocCount = m_Allocations.size();
6547  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6548  }
6549 
6550  void SortAllocationsBySizeDescending()
6551  {
6552  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6553  }
6554 
6555  void SortAllocationsByOffsetDescending()
6556  {
6557  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6558  }
6559  };
6560 
6561  struct BlockPointerLess
6562  {
6563  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6564  {
6565  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6566  }
6567  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6568  {
6569  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6570  }
6571  };
6572 
6573  // 1. Blocks with some non-movable allocations go first.
6574  // 2. Blocks with smaller sumFreeSize go first.
6575  struct BlockInfoCompareMoveDestination
6576  {
6577  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6578  {
6579  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6580  {
6581  return true;
6582  }
6583  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6584  {
6585  return false;
6586  }
6587  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6588  {
6589  return true;
6590  }
6591  return false;
6592  }
6593  };
6594 
6595  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6596  BlockInfoVector m_Blocks;
6597 
6598  VkResult DefragmentRound(
6599  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6600  VkDeviceSize maxBytesToMove,
6601  uint32_t maxAllocationsToMove);
6602 
6603  size_t CalcBlocksWithNonMovableCount() const;
6604 
6605  static bool MoveMakesSense(
6606  size_t dstBlockIndex, VkDeviceSize dstOffset,
6607  size_t srcBlockIndex, VkDeviceSize srcOffset);
6608 };
6609 
6610 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6611 {
6612  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6613 public:
6614  VmaDefragmentationAlgorithm_Fast(
6615  VmaAllocator hAllocator,
6616  VmaBlockVector* pBlockVector,
6617  uint32_t currentFrameIndex,
6618  bool overlappingMoveSupported);
6619  virtual ~VmaDefragmentationAlgorithm_Fast();
6620 
6621  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6622  virtual void AddAll() { m_AllAllocations = true; }
6623 
6624  virtual VkResult Defragment(
6625  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6626  VkDeviceSize maxBytesToMove,
6627  uint32_t maxAllocationsToMove);
6628 
6629  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6630  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6631 
6632 private:
6633  struct BlockInfo
6634  {
6635  size_t origBlockIndex;
6636  };
6637 
6638  class FreeSpaceDatabase
6639  {
6640  public:
6641  FreeSpaceDatabase()
6642  {
6643  FreeSpace s = {};
6644  s.blockInfoIndex = SIZE_MAX;
6645  for(size_t i = 0; i < MAX_COUNT; ++i)
6646  {
6647  m_FreeSpaces[i] = s;
6648  }
6649  }
6650 
6651  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6652  {
6653  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6654  {
6655  return;
6656  }
6657 
6658  // Find first invalid or the smallest structure.
6659  size_t bestIndex = SIZE_MAX;
6660  for(size_t i = 0; i < MAX_COUNT; ++i)
6661  {
6662  // Empty structure.
6663  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6664  {
6665  bestIndex = i;
6666  break;
6667  }
6668  if(m_FreeSpaces[i].size < size &&
6669  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6670  {
6671  bestIndex = i;
6672  }
6673  }
6674 
6675  if(bestIndex != SIZE_MAX)
6676  {
6677  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6678  m_FreeSpaces[bestIndex].offset = offset;
6679  m_FreeSpaces[bestIndex].size = size;
6680  }
6681  }
6682 
6683  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6684  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6685  {
6686  size_t bestIndex = SIZE_MAX;
6687  VkDeviceSize bestFreeSpaceAfter = 0;
6688  for(size_t i = 0; i < MAX_COUNT; ++i)
6689  {
6690  // Structure is valid.
6691  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6692  {
6693  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6694  // Allocation fits into this structure.
6695  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6696  {
6697  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6698  (dstOffset + size);
6699  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6700  {
6701  bestIndex = i;
6702  bestFreeSpaceAfter = freeSpaceAfter;
6703  }
6704  }
6705  }
6706  }
6707 
6708  if(bestIndex != SIZE_MAX)
6709  {
6710  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6711  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6712 
6713  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6714  {
6715  // Leave this structure for remaining empty space.
6716  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6717  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6718  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6719  }
6720  else
6721  {
6722  // This structure becomes invalid.
6723  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6724  }
6725 
6726  return true;
6727  }
6728 
6729  return false;
6730  }
6731 
6732  private:
6733  static const size_t MAX_COUNT = 4;
6734 
6735  struct FreeSpace
6736  {
6737  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6738  VkDeviceSize offset;
6739  VkDeviceSize size;
6740  } m_FreeSpaces[MAX_COUNT];
6741  };
6742 
6743  const bool m_OverlappingMoveSupported;
6744 
6745  uint32_t m_AllocationCount;
6746  bool m_AllAllocations;
6747 
6748  VkDeviceSize m_BytesMoved;
6749  uint32_t m_AllocationsMoved;
6750 
6751  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6752 
6753  void PreprocessMetadata();
6754  void PostprocessMetadata();
6755  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6756 };
6757 
6758 struct VmaBlockDefragmentationContext
6759 {
6760  enum BLOCK_FLAG
6761  {
6762  BLOCK_FLAG_USED = 0x00000001,
6763  };
6764  uint32_t flags;
6765  VkBuffer hBuffer;
6766 };
6767 
6768 class VmaBlockVectorDefragmentationContext
6769 {
6770  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6771 public:
6772  VkResult res;
6773  bool mutexLocked;
6774  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6775 
6776  VmaBlockVectorDefragmentationContext(
6777  VmaAllocator hAllocator,
6778  VmaPool hCustomPool, // Optional.
6779  VmaBlockVector* pBlockVector,
6780  uint32_t currFrameIndex);
6781  ~VmaBlockVectorDefragmentationContext();
6782 
6783  VmaPool GetCustomPool() const { return m_hCustomPool; }
6784  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6785  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6786 
6787  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6788  void AddAll() { m_AllAllocations = true; }
6789 
6790  void Begin(bool overlappingMoveSupported);
6791 
6792 private:
6793  const VmaAllocator m_hAllocator;
6794  // Null if not from custom pool.
6795  const VmaPool m_hCustomPool;
6796  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6797  VmaBlockVector* const m_pBlockVector;
6798  const uint32_t m_CurrFrameIndex;
6799  // Owner of this object.
6800  VmaDefragmentationAlgorithm* m_pAlgorithm;
6801 
6802  struct AllocInfo
6803  {
6804  VmaAllocation hAlloc;
6805  VkBool32* pChanged;
6806  };
6807  // Used between constructor and Begin.
6808  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6809  bool m_AllAllocations;
6810 };
6811 
6812 struct VmaDefragmentationContext_T
6813 {
6814 private:
6815  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6816 public:
6817  VmaDefragmentationContext_T(
6818  VmaAllocator hAllocator,
6819  uint32_t currFrameIndex,
6820  uint32_t flags,
6821  VmaDefragmentationStats* pStats);
6822  ~VmaDefragmentationContext_T();
6823 
6824  void AddPools(uint32_t poolCount, VmaPool* pPools);
6825  void AddAllocations(
6826  uint32_t allocationCount,
6827  VmaAllocation* pAllocations,
6828  VkBool32* pAllocationsChanged);
6829 
6830  /*
6831  Returns:
6832  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6833  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6834  - Negative value if error occured and object can be destroyed immediately.
6835  */
6836  VkResult Defragment(
6837  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6838  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6839  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6840 
6841 private:
6842  const VmaAllocator m_hAllocator;
6843  const uint32_t m_CurrFrameIndex;
6844  const uint32_t m_Flags;
6845  VmaDefragmentationStats* const m_pStats;
6846  // Owner of these objects.
6847  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6848  // Owner of these objects.
6849  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6850 };
6851 
6852 #if VMA_RECORDING_ENABLED
6853 
6854 class VmaRecorder
6855 {
6856 public:
6857  VmaRecorder();
6858  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6859  void WriteConfiguration(
6860  const VkPhysicalDeviceProperties& devProps,
6861  const VkPhysicalDeviceMemoryProperties& memProps,
6862  uint32_t vulkanApiVersion,
6863  bool dedicatedAllocationExtensionEnabled,
6864  bool bindMemory2ExtensionEnabled,
6865  bool memoryBudgetExtensionEnabled);
6866  ~VmaRecorder();
6867 
6868  void RecordCreateAllocator(uint32_t frameIndex);
6869  void RecordDestroyAllocator(uint32_t frameIndex);
6870  void RecordCreatePool(uint32_t frameIndex,
6871  const VmaPoolCreateInfo& createInfo,
6872  VmaPool pool);
6873  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6874  void RecordAllocateMemory(uint32_t frameIndex,
6875  const VkMemoryRequirements& vkMemReq,
6876  const VmaAllocationCreateInfo& createInfo,
6877  VmaAllocation allocation);
6878  void RecordAllocateMemoryPages(uint32_t frameIndex,
6879  const VkMemoryRequirements& vkMemReq,
6880  const VmaAllocationCreateInfo& createInfo,
6881  uint64_t allocationCount,
6882  const VmaAllocation* pAllocations);
6883  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6884  const VkMemoryRequirements& vkMemReq,
6885  bool requiresDedicatedAllocation,
6886  bool prefersDedicatedAllocation,
6887  const VmaAllocationCreateInfo& createInfo,
6888  VmaAllocation allocation);
6889  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6890  const VkMemoryRequirements& vkMemReq,
6891  bool requiresDedicatedAllocation,
6892  bool prefersDedicatedAllocation,
6893  const VmaAllocationCreateInfo& createInfo,
6894  VmaAllocation allocation);
6895  void RecordFreeMemory(uint32_t frameIndex,
6896  VmaAllocation allocation);
6897  void RecordFreeMemoryPages(uint32_t frameIndex,
6898  uint64_t allocationCount,
6899  const VmaAllocation* pAllocations);
6900  void RecordSetAllocationUserData(uint32_t frameIndex,
6901  VmaAllocation allocation,
6902  const void* pUserData);
6903  void RecordCreateLostAllocation(uint32_t frameIndex,
6904  VmaAllocation allocation);
6905  void RecordMapMemory(uint32_t frameIndex,
6906  VmaAllocation allocation);
6907  void RecordUnmapMemory(uint32_t frameIndex,
6908  VmaAllocation allocation);
6909  void RecordFlushAllocation(uint32_t frameIndex,
6910  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6911  void RecordInvalidateAllocation(uint32_t frameIndex,
6912  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6913  void RecordCreateBuffer(uint32_t frameIndex,
6914  const VkBufferCreateInfo& bufCreateInfo,
6915  const VmaAllocationCreateInfo& allocCreateInfo,
6916  VmaAllocation allocation);
6917  void RecordCreateImage(uint32_t frameIndex,
6918  const VkImageCreateInfo& imageCreateInfo,
6919  const VmaAllocationCreateInfo& allocCreateInfo,
6920  VmaAllocation allocation);
6921  void RecordDestroyBuffer(uint32_t frameIndex,
6922  VmaAllocation allocation);
6923  void RecordDestroyImage(uint32_t frameIndex,
6924  VmaAllocation allocation);
6925  void RecordTouchAllocation(uint32_t frameIndex,
6926  VmaAllocation allocation);
6927  void RecordGetAllocationInfo(uint32_t frameIndex,
6928  VmaAllocation allocation);
6929  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6930  VmaPool pool);
6931  void RecordDefragmentationBegin(uint32_t frameIndex,
6932  const VmaDefragmentationInfo2& info,
6934  void RecordDefragmentationEnd(uint32_t frameIndex,
6936  void RecordSetPoolName(uint32_t frameIndex,
6937  VmaPool pool,
6938  const char* name);
6939 
6940 private:
6941  struct CallParams
6942  {
6943  uint32_t threadId;
6944  double time;
6945  };
6946 
6947  class UserDataString
6948  {
6949  public:
6950  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6951  const char* GetString() const { return m_Str; }
6952 
6953  private:
6954  char m_PtrStr[17];
6955  const char* m_Str;
6956  };
6957 
6958  bool m_UseMutex;
6959  VmaRecordFlags m_Flags;
6960  FILE* m_File;
6961  VMA_MUTEX m_FileMutex;
6962  int64_t m_Freq;
6963  int64_t m_StartCounter;
6964 
6965  void GetBasicParams(CallParams& outParams);
6966 
6967  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6968  template<typename T>
6969  void PrintPointerList(uint64_t count, const T* pItems)
6970  {
6971  if(count)
6972  {
6973  fprintf(m_File, "%p", pItems[0]);
6974  for(uint64_t i = 1; i < count; ++i)
6975  {
6976  fprintf(m_File, " %p", pItems[i]);
6977  }
6978  }
6979  }
6980 
6981  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6982  void Flush();
6983 };
6984 
6985 #endif // #if VMA_RECORDING_ENABLED
6986 
6987 /*
6988 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6989 */
6990 class VmaAllocationObjectAllocator
6991 {
6992  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6993 public:
6994  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6995 
6996  VmaAllocation Allocate();
6997  void Free(VmaAllocation hAlloc);
6998 
6999 private:
7000  VMA_MUTEX m_Mutex;
7001  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7002 };
7003 
7004 struct VmaCurrentBudgetData
7005 {
7006  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7007  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7008 
7009 #if VMA_MEMORY_BUDGET
7010  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7011  VMA_RW_MUTEX m_BudgetMutex;
7012  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7013  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7014  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7015 #endif // #if VMA_MEMORY_BUDGET
7016 
7017  VmaCurrentBudgetData()
7018  {
7019  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7020  {
7021  m_BlockBytes[heapIndex] = 0;
7022  m_AllocationBytes[heapIndex] = 0;
7023 #if VMA_MEMORY_BUDGET
7024  m_VulkanUsage[heapIndex] = 0;
7025  m_VulkanBudget[heapIndex] = 0;
7026  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7027 #endif
7028  }
7029 
7030 #if VMA_MEMORY_BUDGET
7031  m_OperationsSinceBudgetFetch = 0;
7032 #endif
7033  }
7034 
7035  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7036  {
7037  m_AllocationBytes[heapIndex] += allocationSize;
7038 #if VMA_MEMORY_BUDGET
7039  ++m_OperationsSinceBudgetFetch;
7040 #endif
7041  }
7042 
7043  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7044  {
7045  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7046  m_AllocationBytes[heapIndex] -= allocationSize;
7047 #if VMA_MEMORY_BUDGET
7048  ++m_OperationsSinceBudgetFetch;
7049 #endif
7050  }
7051 };
7052 
7053 // Main allocator object.
7054 struct VmaAllocator_T
7055 {
7056  VMA_CLASS_NO_COPY(VmaAllocator_T)
7057 public:
7058  bool m_UseMutex;
7059  uint32_t m_VulkanApiVersion;
7060  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7061  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7062  bool m_UseExtMemoryBudget;
7063  VkDevice m_hDevice;
7064  VkInstance m_hInstance;
7065  bool m_AllocationCallbacksSpecified;
7066  VkAllocationCallbacks m_AllocationCallbacks;
7067  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7068  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7069 
7070  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7071  uint32_t m_HeapSizeLimitMask;
7072 
7073  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7074  VkPhysicalDeviceMemoryProperties m_MemProps;
7075 
7076  // Default pools.
7077  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7078 
7079  // Each vector is sorted by memory (handle value).
7080  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7081  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7082  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7083 
7084  VmaCurrentBudgetData m_Budget;
7085 
7086  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7087  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7088  ~VmaAllocator_T();
7089 
7090  const VkAllocationCallbacks* GetAllocationCallbacks() const
7091  {
7092  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7093  }
7094  const VmaVulkanFunctions& GetVulkanFunctions() const
7095  {
7096  return m_VulkanFunctions;
7097  }
7098 
7099  VkDeviceSize GetBufferImageGranularity() const
7100  {
7101  return VMA_MAX(
7102  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7103  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7104  }
7105 
7106  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7107  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7108 
7109  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7110  {
7111  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7112  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7113  }
7114  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7115  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7116  {
7117  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7118  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7119  }
7120  // Minimum alignment for all allocations in specific memory type.
7121  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7122  {
7123  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7124  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7125  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7126  }
7127 
7128  bool IsIntegratedGpu() const
7129  {
7130  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7131  }
7132 
7133 #if VMA_RECORDING_ENABLED
7134  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7135 #endif
7136 
7137  void GetBufferMemoryRequirements(
7138  VkBuffer hBuffer,
7139  VkMemoryRequirements& memReq,
7140  bool& requiresDedicatedAllocation,
7141  bool& prefersDedicatedAllocation) const;
7142  void GetImageMemoryRequirements(
7143  VkImage hImage,
7144  VkMemoryRequirements& memReq,
7145  bool& requiresDedicatedAllocation,
7146  bool& prefersDedicatedAllocation) const;
7147 
7148  // Main allocation function.
7149  VkResult AllocateMemory(
7150  const VkMemoryRequirements& vkMemReq,
7151  bool requiresDedicatedAllocation,
7152  bool prefersDedicatedAllocation,
7153  VkBuffer dedicatedBuffer,
7154  VkImage dedicatedImage,
7155  const VmaAllocationCreateInfo& createInfo,
7156  VmaSuballocationType suballocType,
7157  size_t allocationCount,
7158  VmaAllocation* pAllocations);
7159 
7160  // Main deallocation function.
7161  void FreeMemory(
7162  size_t allocationCount,
7163  const VmaAllocation* pAllocations);
7164 
7165  VkResult ResizeAllocation(
7166  const VmaAllocation alloc,
7167  VkDeviceSize newSize);
7168 
7169  void CalculateStats(VmaStats* pStats);
7170 
7171  void GetBudget(
7172  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7173 
7174 #if VMA_STATS_STRING_ENABLED
7175  void PrintDetailedMap(class VmaJsonWriter& json);
7176 #endif
7177 
7178  VkResult DefragmentationBegin(
7179  const VmaDefragmentationInfo2& info,
7180  VmaDefragmentationStats* pStats,
7181  VmaDefragmentationContext* pContext);
7182  VkResult DefragmentationEnd(
7183  VmaDefragmentationContext context);
7184 
7185  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7186  bool TouchAllocation(VmaAllocation hAllocation);
7187 
7188  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7189  void DestroyPool(VmaPool pool);
7190  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7191 
7192  void SetCurrentFrameIndex(uint32_t frameIndex);
7193  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7194 
7195  void MakePoolAllocationsLost(
7196  VmaPool hPool,
7197  size_t* pLostAllocationCount);
7198  VkResult CheckPoolCorruption(VmaPool hPool);
7199  VkResult CheckCorruption(uint32_t memoryTypeBits);
7200 
7201  void CreateLostAllocation(VmaAllocation* pAllocation);
7202 
7203  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7204  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7205  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7206  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7207  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7208  VkResult BindVulkanBuffer(
7209  VkDeviceMemory memory,
7210  VkDeviceSize memoryOffset,
7211  VkBuffer buffer,
7212  const void* pNext);
7213  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7214  VkResult BindVulkanImage(
7215  VkDeviceMemory memory,
7216  VkDeviceSize memoryOffset,
7217  VkImage image,
7218  const void* pNext);
7219 
7220  VkResult Map(VmaAllocation hAllocation, void** ppData);
7221  void Unmap(VmaAllocation hAllocation);
7222 
7223  VkResult BindBufferMemory(
7224  VmaAllocation hAllocation,
7225  VkDeviceSize allocationLocalOffset,
7226  VkBuffer hBuffer,
7227  const void* pNext);
7228  VkResult BindImageMemory(
7229  VmaAllocation hAllocation,
7230  VkDeviceSize allocationLocalOffset,
7231  VkImage hImage,
7232  const void* pNext);
7233 
7234  void FlushOrInvalidateAllocation(
7235  VmaAllocation hAllocation,
7236  VkDeviceSize offset, VkDeviceSize size,
7237  VMA_CACHE_OPERATION op);
7238 
7239  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7240 
7241  /*
7242  Returns bit mask of memory types that can support defragmentation on GPU as
7243  they support creation of required buffer for copy operations.
7244  */
7245  uint32_t GetGpuDefragmentationMemoryTypeBits();
7246 
7247 private:
7248  VkDeviceSize m_PreferredLargeHeapBlockSize;
7249 
7250  VkPhysicalDevice m_PhysicalDevice;
7251  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7252  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7253 
7254  VMA_RW_MUTEX m_PoolsMutex;
7255  // Protected by m_PoolsMutex. Sorted by pointer value.
7256  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7257  uint32_t m_NextPoolId;
7258 
7259  VmaVulkanFunctions m_VulkanFunctions;
7260 
7261 #if VMA_RECORDING_ENABLED
7262  VmaRecorder* m_pRecorder;
7263 #endif
7264 
7265  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7266 
7267  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7268 
7269  VkResult AllocateMemoryOfType(
7270  VkDeviceSize size,
7271  VkDeviceSize alignment,
7272  bool dedicatedAllocation,
7273  VkBuffer dedicatedBuffer,
7274  VkImage dedicatedImage,
7275  const VmaAllocationCreateInfo& createInfo,
7276  uint32_t memTypeIndex,
7277  VmaSuballocationType suballocType,
7278  size_t allocationCount,
7279  VmaAllocation* pAllocations);
7280 
7281  // Helper function only to be used inside AllocateDedicatedMemory.
7282  VkResult AllocateDedicatedMemoryPage(
7283  VkDeviceSize size,
7284  VmaSuballocationType suballocType,
7285  uint32_t memTypeIndex,
7286  const VkMemoryAllocateInfo& allocInfo,
7287  bool map,
7288  bool isUserDataString,
7289  void* pUserData,
7290  VmaAllocation* pAllocation);
7291 
7292  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7293  VkResult AllocateDedicatedMemory(
7294  VkDeviceSize size,
7295  VmaSuballocationType suballocType,
7296  uint32_t memTypeIndex,
7297  bool withinBudget,
7298  bool map,
7299  bool isUserDataString,
7300  void* pUserData,
7301  VkBuffer dedicatedBuffer,
7302  VkImage dedicatedImage,
7303  size_t allocationCount,
7304  VmaAllocation* pAllocations);
7305 
7306  void FreeDedicatedMemory(const VmaAllocation allocation);
7307 
7308  /*
7309  Calculates and returns bit mask of memory types that can support defragmentation
7310  on GPU as they support creation of required buffer for copy operations.
7311  */
7312  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7313 
7314 #if VMA_MEMORY_BUDGET
7315  void UpdateVulkanBudget();
7316 #endif // #if VMA_MEMORY_BUDGET
7317 };
7318 
7320 // Memory allocation #2 after VmaAllocator_T definition
7321 
7322 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7323 {
7324  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7325 }
7326 
7327 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7328 {
7329  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7330 }
7331 
7332 template<typename T>
7333 static T* VmaAllocate(VmaAllocator hAllocator)
7334 {
7335  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7336 }
7337 
7338 template<typename T>
7339 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7340 {
7341  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7342 }
7343 
7344 template<typename T>
7345 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7346 {
7347  if(ptr != VMA_NULL)
7348  {
7349  ptr->~T();
7350  VmaFree(hAllocator, ptr);
7351  }
7352 }
7353 
7354 template<typename T>
7355 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7356 {
7357  if(ptr != VMA_NULL)
7358  {
7359  for(size_t i = count; i--; )
7360  ptr[i].~T();
7361  VmaFree(hAllocator, ptr);
7362  }
7363 }
7364 
7366 // VmaStringBuilder
7367 
7368 #if VMA_STATS_STRING_ENABLED
7369 
7370 class VmaStringBuilder
7371 {
7372 public:
7373  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7374  size_t GetLength() const { return m_Data.size(); }
7375  const char* GetData() const { return m_Data.data(); }
7376 
7377  void Add(char ch) { m_Data.push_back(ch); }
7378  void Add(const char* pStr);
7379  void AddNewLine() { Add('\n'); }
7380  void AddNumber(uint32_t num);
7381  void AddNumber(uint64_t num);
7382  void AddPointer(const void* ptr);
7383 
7384 private:
7385  VmaVector< char, VmaStlAllocator<char> > m_Data;
7386 };
7387 
7388 void VmaStringBuilder::Add(const char* pStr)
7389 {
7390  const size_t strLen = strlen(pStr);
7391  if(strLen > 0)
7392  {
7393  const size_t oldCount = m_Data.size();
7394  m_Data.resize(oldCount + strLen);
7395  memcpy(m_Data.data() + oldCount, pStr, strLen);
7396  }
7397 }
7398 
7399 void VmaStringBuilder::AddNumber(uint32_t num)
7400 {
7401  char buf[11];
7402  buf[10] = '\0';
7403  char *p = &buf[10];
7404  do
7405  {
7406  *--p = '0' + (num % 10);
7407  num /= 10;
7408  }
7409  while(num);
7410  Add(p);
7411 }
7412 
7413 void VmaStringBuilder::AddNumber(uint64_t num)
7414 {
7415  char buf[21];
7416  buf[20] = '\0';
7417  char *p = &buf[20];
7418  do
7419  {
7420  *--p = '0' + (num % 10);
7421  num /= 10;
7422  }
7423  while(num);
7424  Add(p);
7425 }
7426 
7427 void VmaStringBuilder::AddPointer(const void* ptr)
7428 {
7429  char buf[21];
7430  VmaPtrToStr(buf, sizeof(buf), ptr);
7431  Add(buf);
7432 }
7433 
7434 #endif // #if VMA_STATS_STRING_ENABLED
7435 
7437 // VmaJsonWriter
7438 
7439 #if VMA_STATS_STRING_ENABLED
7440 
7441 class VmaJsonWriter
7442 {
7443  VMA_CLASS_NO_COPY(VmaJsonWriter)
7444 public:
7445  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7446  ~VmaJsonWriter();
7447 
7448  void BeginObject(bool singleLine = false);
7449  void EndObject();
7450 
7451  void BeginArray(bool singleLine = false);
7452  void EndArray();
7453 
7454  void WriteString(const char* pStr);
7455  void BeginString(const char* pStr = VMA_NULL);
7456  void ContinueString(const char* pStr);
7457  void ContinueString(uint32_t n);
7458  void ContinueString(uint64_t n);
7459  void ContinueString_Pointer(const void* ptr);
7460  void EndString(const char* pStr = VMA_NULL);
7461 
7462  void WriteNumber(uint32_t n);
7463  void WriteNumber(uint64_t n);
7464  void WriteBool(bool b);
7465  void WriteNull();
7466 
7467 private:
7468  static const char* const INDENT;
7469 
7470  enum COLLECTION_TYPE
7471  {
7472  COLLECTION_TYPE_OBJECT,
7473  COLLECTION_TYPE_ARRAY,
7474  };
7475  struct StackItem
7476  {
7477  COLLECTION_TYPE type;
7478  uint32_t valueCount;
7479  bool singleLineMode;
7480  };
7481 
7482  VmaStringBuilder& m_SB;
7483  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7484  bool m_InsideString;
7485 
7486  void BeginValue(bool isString);
7487  void WriteIndent(bool oneLess = false);
7488 };
7489 
7490 const char* const VmaJsonWriter::INDENT = " ";
7491 
7492 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7493  m_SB(sb),
7494  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7495  m_InsideString(false)
7496 {
7497 }
7498 
7499 VmaJsonWriter::~VmaJsonWriter()
7500 {
7501  VMA_ASSERT(!m_InsideString);
7502  VMA_ASSERT(m_Stack.empty());
7503 }
7504 
7505 void VmaJsonWriter::BeginObject(bool singleLine)
7506 {
7507  VMA_ASSERT(!m_InsideString);
7508 
7509  BeginValue(false);
7510  m_SB.Add('{');
7511 
7512  StackItem item;
7513  item.type = COLLECTION_TYPE_OBJECT;
7514  item.valueCount = 0;
7515  item.singleLineMode = singleLine;
7516  m_Stack.push_back(item);
7517 }
7518 
7519 void VmaJsonWriter::EndObject()
7520 {
7521  VMA_ASSERT(!m_InsideString);
7522 
7523  WriteIndent(true);
7524  m_SB.Add('}');
7525 
7526  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7527  m_Stack.pop_back();
7528 }
7529 
7530 void VmaJsonWriter::BeginArray(bool singleLine)
7531 {
7532  VMA_ASSERT(!m_InsideString);
7533 
7534  BeginValue(false);
7535  m_SB.Add('[');
7536 
7537  StackItem item;
7538  item.type = COLLECTION_TYPE_ARRAY;
7539  item.valueCount = 0;
7540  item.singleLineMode = singleLine;
7541  m_Stack.push_back(item);
7542 }
7543 
7544 void VmaJsonWriter::EndArray()
7545 {
7546  VMA_ASSERT(!m_InsideString);
7547 
7548  WriteIndent(true);
7549  m_SB.Add(']');
7550 
7551  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7552  m_Stack.pop_back();
7553 }
7554 
7555 void VmaJsonWriter::WriteString(const char* pStr)
7556 {
7557  BeginString(pStr);
7558  EndString();
7559 }
7560 
7561 void VmaJsonWriter::BeginString(const char* pStr)
7562 {
7563  VMA_ASSERT(!m_InsideString);
7564 
7565  BeginValue(true);
7566  m_SB.Add('"');
7567  m_InsideString = true;
7568  if(pStr != VMA_NULL && pStr[0] != '\0')
7569  {
7570  ContinueString(pStr);
7571  }
7572 }
7573 
7574 void VmaJsonWriter::ContinueString(const char* pStr)
7575 {
7576  VMA_ASSERT(m_InsideString);
7577 
7578  const size_t strLen = strlen(pStr);
7579  for(size_t i = 0; i < strLen; ++i)
7580  {
7581  char ch = pStr[i];
7582  if(ch == '\\')
7583  {
7584  m_SB.Add("\\\\");
7585  }
7586  else if(ch == '"')
7587  {
7588  m_SB.Add("\\\"");
7589  }
7590  else if(ch >= 32)
7591  {
7592  m_SB.Add(ch);
7593  }
7594  else switch(ch)
7595  {
7596  case '\b':
7597  m_SB.Add("\\b");
7598  break;
7599  case '\f':
7600  m_SB.Add("\\f");
7601  break;
7602  case '\n':
7603  m_SB.Add("\\n");
7604  break;
7605  case '\r':
7606  m_SB.Add("\\r");
7607  break;
7608  case '\t':
7609  m_SB.Add("\\t");
7610  break;
7611  default:
7612  VMA_ASSERT(0 && "Character not currently supported.");
7613  break;
7614  }
7615  }
7616 }
7617 
7618 void VmaJsonWriter::ContinueString(uint32_t n)
7619 {
7620  VMA_ASSERT(m_InsideString);
7621  m_SB.AddNumber(n);
7622 }
7623 
7624 void VmaJsonWriter::ContinueString(uint64_t n)
7625 {
7626  VMA_ASSERT(m_InsideString);
7627  m_SB.AddNumber(n);
7628 }
7629 
7630 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7631 {
7632  VMA_ASSERT(m_InsideString);
7633  m_SB.AddPointer(ptr);
7634 }
7635 
7636 void VmaJsonWriter::EndString(const char* pStr)
7637 {
7638  VMA_ASSERT(m_InsideString);
7639  if(pStr != VMA_NULL && pStr[0] != '\0')
7640  {
7641  ContinueString(pStr);
7642  }
7643  m_SB.Add('"');
7644  m_InsideString = false;
7645 }
7646 
7647 void VmaJsonWriter::WriteNumber(uint32_t n)
7648 {
7649  VMA_ASSERT(!m_InsideString);
7650  BeginValue(false);
7651  m_SB.AddNumber(n);
7652 }
7653 
7654 void VmaJsonWriter::WriteNumber(uint64_t n)
7655 {
7656  VMA_ASSERT(!m_InsideString);
7657  BeginValue(false);
7658  m_SB.AddNumber(n);
7659 }
7660 
7661 void VmaJsonWriter::WriteBool(bool b)
7662 {
7663  VMA_ASSERT(!m_InsideString);
7664  BeginValue(false);
7665  m_SB.Add(b ? "true" : "false");
7666 }
7667 
7668 void VmaJsonWriter::WriteNull()
7669 {
7670  VMA_ASSERT(!m_InsideString);
7671  BeginValue(false);
7672  m_SB.Add("null");
7673 }
7674 
7675 void VmaJsonWriter::BeginValue(bool isString)
7676 {
7677  if(!m_Stack.empty())
7678  {
7679  StackItem& currItem = m_Stack.back();
7680  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7681  currItem.valueCount % 2 == 0)
7682  {
7683  VMA_ASSERT(isString);
7684  }
7685 
7686  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7687  currItem.valueCount % 2 != 0)
7688  {
7689  m_SB.Add(": ");
7690  }
7691  else if(currItem.valueCount > 0)
7692  {
7693  m_SB.Add(", ");
7694  WriteIndent();
7695  }
7696  else
7697  {
7698  WriteIndent();
7699  }
7700  ++currItem.valueCount;
7701  }
7702 }
7703 
7704 void VmaJsonWriter::WriteIndent(bool oneLess)
7705 {
7706  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7707  {
7708  m_SB.AddNewLine();
7709 
7710  size_t count = m_Stack.size();
7711  if(count > 0 && oneLess)
7712  {
7713  --count;
7714  }
7715  for(size_t i = 0; i < count; ++i)
7716  {
7717  m_SB.Add(INDENT);
7718  }
7719  }
7720 }
7721 
7722 #endif // #if VMA_STATS_STRING_ENABLED
7723 
7725 
7726 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7727 {
7728  if(IsUserDataString())
7729  {
7730  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7731 
7732  FreeUserDataString(hAllocator);
7733 
7734  if(pUserData != VMA_NULL)
7735  {
7736  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
7737  }
7738  }
7739  else
7740  {
7741  m_pUserData = pUserData;
7742  }
7743 }
7744 
7745 void VmaAllocation_T::ChangeBlockAllocation(
7746  VmaAllocator hAllocator,
7747  VmaDeviceMemoryBlock* block,
7748  VkDeviceSize offset)
7749 {
7750  VMA_ASSERT(block != VMA_NULL);
7751  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7752 
7753  // Move mapping reference counter from old block to new block.
7754  if(block != m_BlockAllocation.m_Block)
7755  {
7756  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7757  if(IsPersistentMap())
7758  ++mapRefCount;
7759  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7760  block->Map(hAllocator, mapRefCount, VMA_NULL);
7761  }
7762 
7763  m_BlockAllocation.m_Block = block;
7764  m_BlockAllocation.m_Offset = offset;
7765 }
7766 
7767 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7768 {
7769  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7770  m_BlockAllocation.m_Offset = newOffset;
7771 }
7772 
7773 VkDeviceSize VmaAllocation_T::GetOffset() const
7774 {
7775  switch(m_Type)
7776  {
7777  case ALLOCATION_TYPE_BLOCK:
7778  return m_BlockAllocation.m_Offset;
7779  case ALLOCATION_TYPE_DEDICATED:
7780  return 0;
7781  default:
7782  VMA_ASSERT(0);
7783  return 0;
7784  }
7785 }
7786 
7787 VkDeviceMemory VmaAllocation_T::GetMemory() const
7788 {
7789  switch(m_Type)
7790  {
7791  case ALLOCATION_TYPE_BLOCK:
7792  return m_BlockAllocation.m_Block->GetDeviceMemory();
7793  case ALLOCATION_TYPE_DEDICATED:
7794  return m_DedicatedAllocation.m_hMemory;
7795  default:
7796  VMA_ASSERT(0);
7797  return VK_NULL_HANDLE;
7798  }
7799 }
7800 
7801 void* VmaAllocation_T::GetMappedData() const
7802 {
7803  switch(m_Type)
7804  {
7805  case ALLOCATION_TYPE_BLOCK:
7806  if(m_MapCount != 0)
7807  {
7808  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7809  VMA_ASSERT(pBlockData != VMA_NULL);
7810  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7811  }
7812  else
7813  {
7814  return VMA_NULL;
7815  }
7816  break;
7817  case ALLOCATION_TYPE_DEDICATED:
7818  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7819  return m_DedicatedAllocation.m_pMappedData;
7820  default:
7821  VMA_ASSERT(0);
7822  return VMA_NULL;
7823  }
7824 }
7825 
7826 bool VmaAllocation_T::CanBecomeLost() const
7827 {
7828  switch(m_Type)
7829  {
7830  case ALLOCATION_TYPE_BLOCK:
7831  return m_BlockAllocation.m_CanBecomeLost;
7832  case ALLOCATION_TYPE_DEDICATED:
7833  return false;
7834  default:
7835  VMA_ASSERT(0);
7836  return false;
7837  }
7838 }
7839 
7840 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7841 {
7842  VMA_ASSERT(CanBecomeLost());
7843 
7844  /*
7845  Warning: This is a carefully designed algorithm.
7846  Do not modify unless you really know what you're doing :)
7847  */
7848  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7849  for(;;)
7850  {
7851  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7852  {
7853  VMA_ASSERT(0);
7854  return false;
7855  }
7856  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7857  {
7858  return false;
7859  }
7860  else // Last use time earlier than current time.
7861  {
7862  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7863  {
7864  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7865  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7866  return true;
7867  }
7868  }
7869  }
7870 }
7871 
7872 #if VMA_STATS_STRING_ENABLED
7873 
7874 // Correspond to values of enum VmaSuballocationType.
7875 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7876  "FREE",
7877  "UNKNOWN",
7878  "BUFFER",
7879  "IMAGE_UNKNOWN",
7880  "IMAGE_LINEAR",
7881  "IMAGE_OPTIMAL",
7882 };
7883 
7884 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7885 {
7886  json.WriteString("Type");
7887  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7888 
7889  json.WriteString("Size");
7890  json.WriteNumber(m_Size);
7891 
7892  if(m_pUserData != VMA_NULL)
7893  {
7894  json.WriteString("UserData");
7895  if(IsUserDataString())
7896  {
7897  json.WriteString((const char*)m_pUserData);
7898  }
7899  else
7900  {
7901  json.BeginString();
7902  json.ContinueString_Pointer(m_pUserData);
7903  json.EndString();
7904  }
7905  }
7906 
7907  json.WriteString("CreationFrameIndex");
7908  json.WriteNumber(m_CreationFrameIndex);
7909 
7910  json.WriteString("LastUseFrameIndex");
7911  json.WriteNumber(GetLastUseFrameIndex());
7912 
7913  if(m_BufferImageUsage != 0)
7914  {
7915  json.WriteString("Usage");
7916  json.WriteNumber(m_BufferImageUsage);
7917  }
7918 }
7919 
7920 #endif
7921 
7922 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7923 {
7924  VMA_ASSERT(IsUserDataString());
7925  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
7926  m_pUserData = VMA_NULL;
7927 }
7928 
7929 void VmaAllocation_T::BlockAllocMap()
7930 {
7931  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7932 
7933  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7934  {
7935  ++m_MapCount;
7936  }
7937  else
7938  {
7939  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7940  }
7941 }
7942 
7943 void VmaAllocation_T::BlockAllocUnmap()
7944 {
7945  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7946 
7947  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7948  {
7949  --m_MapCount;
7950  }
7951  else
7952  {
7953  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7954  }
7955 }
7956 
7957 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7958 {
7959  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7960 
7961  if(m_MapCount != 0)
7962  {
7963  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7964  {
7965  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7966  *ppData = m_DedicatedAllocation.m_pMappedData;
7967  ++m_MapCount;
7968  return VK_SUCCESS;
7969  }
7970  else
7971  {
7972  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7973  return VK_ERROR_MEMORY_MAP_FAILED;
7974  }
7975  }
7976  else
7977  {
7978  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7979  hAllocator->m_hDevice,
7980  m_DedicatedAllocation.m_hMemory,
7981  0, // offset
7982  VK_WHOLE_SIZE,
7983  0, // flags
7984  ppData);
7985  if(result == VK_SUCCESS)
7986  {
7987  m_DedicatedAllocation.m_pMappedData = *ppData;
7988  m_MapCount = 1;
7989  }
7990  return result;
7991  }
7992 }
7993 
7994 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7995 {
7996  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7997 
7998  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7999  {
8000  --m_MapCount;
8001  if(m_MapCount == 0)
8002  {
8003  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8004  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8005  hAllocator->m_hDevice,
8006  m_DedicatedAllocation.m_hMemory);
8007  }
8008  }
8009  else
8010  {
8011  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8012  }
8013 }
8014 
8015 #if VMA_STATS_STRING_ENABLED
8016 
8017 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8018 {
8019  json.BeginObject();
8020 
8021  json.WriteString("Blocks");
8022  json.WriteNumber(stat.blockCount);
8023 
8024  json.WriteString("Allocations");
8025  json.WriteNumber(stat.allocationCount);
8026 
8027  json.WriteString("UnusedRanges");
8028  json.WriteNumber(stat.unusedRangeCount);
8029 
8030  json.WriteString("UsedBytes");
8031  json.WriteNumber(stat.usedBytes);
8032 
8033  json.WriteString("UnusedBytes");
8034  json.WriteNumber(stat.unusedBytes);
8035 
8036  if(stat.allocationCount > 1)
8037  {
8038  json.WriteString("AllocationSize");
8039  json.BeginObject(true);
8040  json.WriteString("Min");
8041  json.WriteNumber(stat.allocationSizeMin);
8042  json.WriteString("Avg");
8043  json.WriteNumber(stat.allocationSizeAvg);
8044  json.WriteString("Max");
8045  json.WriteNumber(stat.allocationSizeMax);
8046  json.EndObject();
8047  }
8048 
8049  if(stat.unusedRangeCount > 1)
8050  {
8051  json.WriteString("UnusedRangeSize");
8052  json.BeginObject(true);
8053  json.WriteString("Min");
8054  json.WriteNumber(stat.unusedRangeSizeMin);
8055  json.WriteString("Avg");
8056  json.WriteNumber(stat.unusedRangeSizeAvg);
8057  json.WriteString("Max");
8058  json.WriteNumber(stat.unusedRangeSizeMax);
8059  json.EndObject();
8060  }
8061 
8062  json.EndObject();
8063 }
8064 
8065 #endif // #if VMA_STATS_STRING_ENABLED
8066 
8067 struct VmaSuballocationItemSizeLess
8068 {
8069  bool operator()(
8070  const VmaSuballocationList::iterator lhs,
8071  const VmaSuballocationList::iterator rhs) const
8072  {
8073  return lhs->size < rhs->size;
8074  }
8075  bool operator()(
8076  const VmaSuballocationList::iterator lhs,
8077  VkDeviceSize rhsSize) const
8078  {
8079  return lhs->size < rhsSize;
8080  }
8081 };
8082 
8083 
8085 // class VmaBlockMetadata
8086 
8087 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8088  m_Size(0),
8089  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8090 {
8091 }
8092 
8093 #if VMA_STATS_STRING_ENABLED
8094 
8095 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8096  VkDeviceSize unusedBytes,
8097  size_t allocationCount,
8098  size_t unusedRangeCount) const
8099 {
8100  json.BeginObject();
8101 
8102  json.WriteString("TotalBytes");
8103  json.WriteNumber(GetSize());
8104 
8105  json.WriteString("UnusedBytes");
8106  json.WriteNumber(unusedBytes);
8107 
8108  json.WriteString("Allocations");
8109  json.WriteNumber((uint64_t)allocationCount);
8110 
8111  json.WriteString("UnusedRanges");
8112  json.WriteNumber((uint64_t)unusedRangeCount);
8113 
8114  json.WriteString("Suballocations");
8115  json.BeginArray();
8116 }
8117 
8118 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8119  VkDeviceSize offset,
8120  VmaAllocation hAllocation) const
8121 {
8122  json.BeginObject(true);
8123 
8124  json.WriteString("Offset");
8125  json.WriteNumber(offset);
8126 
8127  hAllocation->PrintParameters(json);
8128 
8129  json.EndObject();
8130 }
8131 
8132 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8133  VkDeviceSize offset,
8134  VkDeviceSize size) const
8135 {
8136  json.BeginObject(true);
8137 
8138  json.WriteString("Offset");
8139  json.WriteNumber(offset);
8140 
8141  json.WriteString("Type");
8142  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8143 
8144  json.WriteString("Size");
8145  json.WriteNumber(size);
8146 
8147  json.EndObject();
8148 }
8149 
8150 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8151 {
8152  json.EndArray();
8153  json.EndObject();
8154 }
8155 
8156 #endif // #if VMA_STATS_STRING_ENABLED
8157 
8159 // class VmaBlockMetadata_Generic
8160 
8161 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8162  VmaBlockMetadata(hAllocator),
8163  m_FreeCount(0),
8164  m_SumFreeSize(0),
8165  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8166  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8167 {
8168 }
8169 
8170 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8171 {
8172 }
8173 
8174 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8175 {
8176  VmaBlockMetadata::Init(size);
8177 
8178  m_FreeCount = 1;
8179  m_SumFreeSize = size;
8180 
8181  VmaSuballocation suballoc = {};
8182  suballoc.offset = 0;
8183  suballoc.size = size;
8184  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8185  suballoc.hAllocation = VK_NULL_HANDLE;
8186 
8187  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8188  m_Suballocations.push_back(suballoc);
8189  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8190  --suballocItem;
8191  m_FreeSuballocationsBySize.push_back(suballocItem);
8192 }
8193 
8194 bool VmaBlockMetadata_Generic::Validate() const
8195 {
8196  VMA_VALIDATE(!m_Suballocations.empty());
8197 
8198  // Expected offset of new suballocation as calculated from previous ones.
8199  VkDeviceSize calculatedOffset = 0;
8200  // Expected number of free suballocations as calculated from traversing their list.
8201  uint32_t calculatedFreeCount = 0;
8202  // Expected sum size of free suballocations as calculated from traversing their list.
8203  VkDeviceSize calculatedSumFreeSize = 0;
8204  // Expected number of free suballocations that should be registered in
8205  // m_FreeSuballocationsBySize calculated from traversing their list.
8206  size_t freeSuballocationsToRegister = 0;
8207  // True if previous visited suballocation was free.
8208  bool prevFree = false;
8209 
8210  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8211  suballocItem != m_Suballocations.cend();
8212  ++suballocItem)
8213  {
8214  const VmaSuballocation& subAlloc = *suballocItem;
8215 
8216  // Actual offset of this suballocation doesn't match expected one.
8217  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8218 
8219  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8220  // Two adjacent free suballocations are invalid. They should be merged.
8221  VMA_VALIDATE(!prevFree || !currFree);
8222 
8223  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8224 
8225  if(currFree)
8226  {
8227  calculatedSumFreeSize += subAlloc.size;
8228  ++calculatedFreeCount;
8229  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8230  {
8231  ++freeSuballocationsToRegister;
8232  }
8233 
8234  // Margin required between allocations - every free space must be at least that large.
8235  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8236  }
8237  else
8238  {
8239  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8240  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8241 
8242  // Margin required between allocations - previous allocation must be free.
8243  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8244  }
8245 
8246  calculatedOffset += subAlloc.size;
8247  prevFree = currFree;
8248  }
8249 
8250  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8251  // match expected one.
8252  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8253 
8254  VkDeviceSize lastSize = 0;
8255  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8256  {
8257  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8258 
8259  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8260  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8261  // They must be sorted by size ascending.
8262  VMA_VALIDATE(suballocItem->size >= lastSize);
8263 
8264  lastSize = suballocItem->size;
8265  }
8266 
8267  // Check if totals match calculacted values.
8268  VMA_VALIDATE(ValidateFreeSuballocationList());
8269  VMA_VALIDATE(calculatedOffset == GetSize());
8270  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8271  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8272 
8273  return true;
8274 }
8275 
8276 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8277 {
8278  if(!m_FreeSuballocationsBySize.empty())
8279  {
8280  return m_FreeSuballocationsBySize.back()->size;
8281  }
8282  else
8283  {
8284  return 0;
8285  }
8286 }
8287 
8288 bool VmaBlockMetadata_Generic::IsEmpty() const
8289 {
8290  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8291 }
8292 
8293 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8294 {
8295  outInfo.blockCount = 1;
8296 
8297  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8298  outInfo.allocationCount = rangeCount - m_FreeCount;
8299  outInfo.unusedRangeCount = m_FreeCount;
8300 
8301  outInfo.unusedBytes = m_SumFreeSize;
8302  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8303 
8304  outInfo.allocationSizeMin = UINT64_MAX;
8305  outInfo.allocationSizeMax = 0;
8306  outInfo.unusedRangeSizeMin = UINT64_MAX;
8307  outInfo.unusedRangeSizeMax = 0;
8308 
8309  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8310  suballocItem != m_Suballocations.cend();
8311  ++suballocItem)
8312  {
8313  const VmaSuballocation& suballoc = *suballocItem;
8314  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8315  {
8316  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8317  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8318  }
8319  else
8320  {
8321  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8322  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8323  }
8324  }
8325 }
8326 
8327 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8328 {
8329  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8330 
8331  inoutStats.size += GetSize();
8332  inoutStats.unusedSize += m_SumFreeSize;
8333  inoutStats.allocationCount += rangeCount - m_FreeCount;
8334  inoutStats.unusedRangeCount += m_FreeCount;
8335  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8336 }
8337 
8338 #if VMA_STATS_STRING_ENABLED
8339 
8340 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8341 {
8342  PrintDetailedMap_Begin(json,
8343  m_SumFreeSize, // unusedBytes
8344  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8345  m_FreeCount); // unusedRangeCount
8346 
8347  size_t i = 0;
8348  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8349  suballocItem != m_Suballocations.cend();
8350  ++suballocItem, ++i)
8351  {
8352  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8353  {
8354  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8355  }
8356  else
8357  {
8358  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8359  }
8360  }
8361 
8362  PrintDetailedMap_End(json);
8363 }
8364 
8365 #endif // #if VMA_STATS_STRING_ENABLED
8366 
8367 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8368  uint32_t currentFrameIndex,
8369  uint32_t frameInUseCount,
8370  VkDeviceSize bufferImageGranularity,
8371  VkDeviceSize allocSize,
8372  VkDeviceSize allocAlignment,
8373  bool upperAddress,
8374  VmaSuballocationType allocType,
8375  bool canMakeOtherLost,
8376  uint32_t strategy,
8377  VmaAllocationRequest* pAllocationRequest)
8378 {
8379  VMA_ASSERT(allocSize > 0);
8380  VMA_ASSERT(!upperAddress);
8381  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8382  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8383  VMA_HEAVY_ASSERT(Validate());
8384 
8385  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8386 
8387  // There is not enough total free space in this block to fullfill the request: Early return.
8388  if(canMakeOtherLost == false &&
8389  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8390  {
8391  return false;
8392  }
8393 
8394  // New algorithm, efficiently searching freeSuballocationsBySize.
8395  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8396  if(freeSuballocCount > 0)
8397  {
8399  {
8400  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8401  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8402  m_FreeSuballocationsBySize.data(),
8403  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8404  allocSize + 2 * VMA_DEBUG_MARGIN,
8405  VmaSuballocationItemSizeLess());
8406  size_t index = it - m_FreeSuballocationsBySize.data();
8407  for(; index < freeSuballocCount; ++index)
8408  {
8409  if(CheckAllocation(
8410  currentFrameIndex,
8411  frameInUseCount,
8412  bufferImageGranularity,
8413  allocSize,
8414  allocAlignment,
8415  allocType,
8416  m_FreeSuballocationsBySize[index],
8417  false, // canMakeOtherLost
8418  &pAllocationRequest->offset,
8419  &pAllocationRequest->itemsToMakeLostCount,
8420  &pAllocationRequest->sumFreeSize,
8421  &pAllocationRequest->sumItemSize))
8422  {
8423  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8424  return true;
8425  }
8426  }
8427  }
8428  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8429  {
8430  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8431  it != m_Suballocations.end();
8432  ++it)
8433  {
8434  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8435  currentFrameIndex,
8436  frameInUseCount,
8437  bufferImageGranularity,
8438  allocSize,
8439  allocAlignment,
8440  allocType,
8441  it,
8442  false, // canMakeOtherLost
8443  &pAllocationRequest->offset,
8444  &pAllocationRequest->itemsToMakeLostCount,
8445  &pAllocationRequest->sumFreeSize,
8446  &pAllocationRequest->sumItemSize))
8447  {
8448  pAllocationRequest->item = it;
8449  return true;
8450  }
8451  }
8452  }
8453  else // WORST_FIT, FIRST_FIT
8454  {
8455  // Search staring from biggest suballocations.
8456  for(size_t index = freeSuballocCount; index--; )
8457  {
8458  if(CheckAllocation(
8459  currentFrameIndex,
8460  frameInUseCount,
8461  bufferImageGranularity,
8462  allocSize,
8463  allocAlignment,
8464  allocType,
8465  m_FreeSuballocationsBySize[index],
8466  false, // canMakeOtherLost
8467  &pAllocationRequest->offset,
8468  &pAllocationRequest->itemsToMakeLostCount,
8469  &pAllocationRequest->sumFreeSize,
8470  &pAllocationRequest->sumItemSize))
8471  {
8472  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8473  return true;
8474  }
8475  }
8476  }
8477  }
8478 
8479  if(canMakeOtherLost)
8480  {
8481  // Brute-force algorithm. TODO: Come up with something better.
8482 
8483  bool found = false;
8484  VmaAllocationRequest tmpAllocRequest = {};
8485  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8486  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8487  suballocIt != m_Suballocations.end();
8488  ++suballocIt)
8489  {
8490  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8491  suballocIt->hAllocation->CanBecomeLost())
8492  {
8493  if(CheckAllocation(
8494  currentFrameIndex,
8495  frameInUseCount,
8496  bufferImageGranularity,
8497  allocSize,
8498  allocAlignment,
8499  allocType,
8500  suballocIt,
8501  canMakeOtherLost,
8502  &tmpAllocRequest.offset,
8503  &tmpAllocRequest.itemsToMakeLostCount,
8504  &tmpAllocRequest.sumFreeSize,
8505  &tmpAllocRequest.sumItemSize))
8506  {
8508  {
8509  *pAllocationRequest = tmpAllocRequest;
8510  pAllocationRequest->item = suballocIt;
8511  break;
8512  }
8513  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8514  {
8515  *pAllocationRequest = tmpAllocRequest;
8516  pAllocationRequest->item = suballocIt;
8517  found = true;
8518  }
8519  }
8520  }
8521  }
8522 
8523  return found;
8524  }
8525 
8526  return false;
8527 }
8528 
8529 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8530  uint32_t currentFrameIndex,
8531  uint32_t frameInUseCount,
8532  VmaAllocationRequest* pAllocationRequest)
8533 {
8534  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8535 
8536  while(pAllocationRequest->itemsToMakeLostCount > 0)
8537  {
8538  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8539  {
8540  ++pAllocationRequest->item;
8541  }
8542  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8543  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8544  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8545  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8546  {
8547  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8548  --pAllocationRequest->itemsToMakeLostCount;
8549  }
8550  else
8551  {
8552  return false;
8553  }
8554  }
8555 
8556  VMA_HEAVY_ASSERT(Validate());
8557  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8558  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8559 
8560  return true;
8561 }
8562 
8563 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8564 {
8565  uint32_t lostAllocationCount = 0;
8566  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8567  it != m_Suballocations.end();
8568  ++it)
8569  {
8570  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8571  it->hAllocation->CanBecomeLost() &&
8572  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8573  {
8574  it = FreeSuballocation(it);
8575  ++lostAllocationCount;
8576  }
8577  }
8578  return lostAllocationCount;
8579 }
8580 
8581 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8582 {
8583  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8584  it != m_Suballocations.end();
8585  ++it)
8586  {
8587  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8588  {
8589  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8590  {
8591  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8592  return VK_ERROR_VALIDATION_FAILED_EXT;
8593  }
8594  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8595  {
8596  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8597  return VK_ERROR_VALIDATION_FAILED_EXT;
8598  }
8599  }
8600  }
8601 
8602  return VK_SUCCESS;
8603 }
8604 
8605 void VmaBlockMetadata_Generic::Alloc(
8606  const VmaAllocationRequest& request,
8607  VmaSuballocationType type,
8608  VkDeviceSize allocSize,
8609  VmaAllocation hAllocation)
8610 {
8611  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8612  VMA_ASSERT(request.item != m_Suballocations.end());
8613  VmaSuballocation& suballoc = *request.item;
8614  // Given suballocation is a free block.
8615  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8616  // Given offset is inside this suballocation.
8617  VMA_ASSERT(request.offset >= suballoc.offset);
8618  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8619  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8620  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8621 
8622  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8623  // it to become used.
8624  UnregisterFreeSuballocation(request.item);
8625 
8626  suballoc.offset = request.offset;
8627  suballoc.size = allocSize;
8628  suballoc.type = type;
8629  suballoc.hAllocation = hAllocation;
8630 
8631  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8632  if(paddingEnd)
8633  {
8634  VmaSuballocation paddingSuballoc = {};
8635  paddingSuballoc.offset = request.offset + allocSize;
8636  paddingSuballoc.size = paddingEnd;
8637  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8638  VmaSuballocationList::iterator next = request.item;
8639  ++next;
8640  const VmaSuballocationList::iterator paddingEndItem =
8641  m_Suballocations.insert(next, paddingSuballoc);
8642  RegisterFreeSuballocation(paddingEndItem);
8643  }
8644 
8645  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8646  if(paddingBegin)
8647  {
8648  VmaSuballocation paddingSuballoc = {};
8649  paddingSuballoc.offset = request.offset - paddingBegin;
8650  paddingSuballoc.size = paddingBegin;
8651  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8652  const VmaSuballocationList::iterator paddingBeginItem =
8653  m_Suballocations.insert(request.item, paddingSuballoc);
8654  RegisterFreeSuballocation(paddingBeginItem);
8655  }
8656 
8657  // Update totals.
8658  m_FreeCount = m_FreeCount - 1;
8659  if(paddingBegin > 0)
8660  {
8661  ++m_FreeCount;
8662  }
8663  if(paddingEnd > 0)
8664  {
8665  ++m_FreeCount;
8666  }
8667  m_SumFreeSize -= allocSize;
8668 }
8669 
8670 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8671 {
8672  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8673  suballocItem != m_Suballocations.end();
8674  ++suballocItem)
8675  {
8676  VmaSuballocation& suballoc = *suballocItem;
8677  if(suballoc.hAllocation == allocation)
8678  {
8679  FreeSuballocation(suballocItem);
8680  VMA_HEAVY_ASSERT(Validate());
8681  return;
8682  }
8683  }
8684  VMA_ASSERT(0 && "Not found!");
8685 }
8686 
8687 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8688 {
8689  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8690  suballocItem != m_Suballocations.end();
8691  ++suballocItem)
8692  {
8693  VmaSuballocation& suballoc = *suballocItem;
8694  if(suballoc.offset == offset)
8695  {
8696  FreeSuballocation(suballocItem);
8697  return;
8698  }
8699  }
8700  VMA_ASSERT(0 && "Not found!");
8701 }
8702 
8703 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8704 {
8705  VkDeviceSize lastSize = 0;
8706  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8707  {
8708  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8709 
8710  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8711  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8712  VMA_VALIDATE(it->size >= lastSize);
8713  lastSize = it->size;
8714  }
8715  return true;
8716 }
8717 
8718 bool VmaBlockMetadata_Generic::CheckAllocation(
8719  uint32_t currentFrameIndex,
8720  uint32_t frameInUseCount,
8721  VkDeviceSize bufferImageGranularity,
8722  VkDeviceSize allocSize,
8723  VkDeviceSize allocAlignment,
8724  VmaSuballocationType allocType,
8725  VmaSuballocationList::const_iterator suballocItem,
8726  bool canMakeOtherLost,
8727  VkDeviceSize* pOffset,
8728  size_t* itemsToMakeLostCount,
8729  VkDeviceSize* pSumFreeSize,
8730  VkDeviceSize* pSumItemSize) const
8731 {
8732  VMA_ASSERT(allocSize > 0);
8733  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8734  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8735  VMA_ASSERT(pOffset != VMA_NULL);
8736 
8737  *itemsToMakeLostCount = 0;
8738  *pSumFreeSize = 0;
8739  *pSumItemSize = 0;
8740 
8741  if(canMakeOtherLost)
8742  {
8743  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8744  {
8745  *pSumFreeSize = suballocItem->size;
8746  }
8747  else
8748  {
8749  if(suballocItem->hAllocation->CanBecomeLost() &&
8750  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8751  {
8752  ++*itemsToMakeLostCount;
8753  *pSumItemSize = suballocItem->size;
8754  }
8755  else
8756  {
8757  return false;
8758  }
8759  }
8760 
8761  // Remaining size is too small for this request: Early return.
8762  if(GetSize() - suballocItem->offset < allocSize)
8763  {
8764  return false;
8765  }
8766 
8767  // Start from offset equal to beginning of this suballocation.
8768  *pOffset = suballocItem->offset;
8769 
8770  // Apply VMA_DEBUG_MARGIN at the beginning.
8771  if(VMA_DEBUG_MARGIN > 0)
8772  {
8773  *pOffset += VMA_DEBUG_MARGIN;
8774  }
8775 
8776  // Apply alignment.
8777  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8778 
8779  // Check previous suballocations for BufferImageGranularity conflicts.
8780  // Make bigger alignment if necessary.
8781  if(bufferImageGranularity > 1)
8782  {
8783  bool bufferImageGranularityConflict = false;
8784  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8785  while(prevSuballocItem != m_Suballocations.cbegin())
8786  {
8787  --prevSuballocItem;
8788  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8789  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8790  {
8791  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8792  {
8793  bufferImageGranularityConflict = true;
8794  break;
8795  }
8796  }
8797  else
8798  // Already on previous page.
8799  break;
8800  }
8801  if(bufferImageGranularityConflict)
8802  {
8803  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8804  }
8805  }
8806 
8807  // Now that we have final *pOffset, check if we are past suballocItem.
8808  // If yes, return false - this function should be called for another suballocItem as starting point.
8809  if(*pOffset >= suballocItem->offset + suballocItem->size)
8810  {
8811  return false;
8812  }
8813 
8814  // Calculate padding at the beginning based on current offset.
8815  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8816 
8817  // Calculate required margin at the end.
8818  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8819 
8820  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8821  // Another early return check.
8822  if(suballocItem->offset + totalSize > GetSize())
8823  {
8824  return false;
8825  }
8826 
8827  // Advance lastSuballocItem until desired size is reached.
8828  // Update itemsToMakeLostCount.
8829  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8830  if(totalSize > suballocItem->size)
8831  {
8832  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8833  while(remainingSize > 0)
8834  {
8835  ++lastSuballocItem;
8836  if(lastSuballocItem == m_Suballocations.cend())
8837  {
8838  return false;
8839  }
8840  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8841  {
8842  *pSumFreeSize += lastSuballocItem->size;
8843  }
8844  else
8845  {
8846  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8847  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8848  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8849  {
8850  ++*itemsToMakeLostCount;
8851  *pSumItemSize += lastSuballocItem->size;
8852  }
8853  else
8854  {
8855  return false;
8856  }
8857  }
8858  remainingSize = (lastSuballocItem->size < remainingSize) ?
8859  remainingSize - lastSuballocItem->size : 0;
8860  }
8861  }
8862 
8863  // Check next suballocations for BufferImageGranularity conflicts.
8864  // If conflict exists, we must mark more allocations lost or fail.
8865  if(bufferImageGranularity > 1)
8866  {
8867  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8868  ++nextSuballocItem;
8869  while(nextSuballocItem != m_Suballocations.cend())
8870  {
8871  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8872  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8873  {
8874  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8875  {
8876  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8877  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8878  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8879  {
8880  ++*itemsToMakeLostCount;
8881  }
8882  else
8883  {
8884  return false;
8885  }
8886  }
8887  }
8888  else
8889  {
8890  // Already on next page.
8891  break;
8892  }
8893  ++nextSuballocItem;
8894  }
8895  }
8896  }
8897  else
8898  {
8899  const VmaSuballocation& suballoc = *suballocItem;
8900  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8901 
8902  *pSumFreeSize = suballoc.size;
8903 
8904  // Size of this suballocation is too small for this request: Early return.
8905  if(suballoc.size < allocSize)
8906  {
8907  return false;
8908  }
8909 
8910  // Start from offset equal to beginning of this suballocation.
8911  *pOffset = suballoc.offset;
8912 
8913  // Apply VMA_DEBUG_MARGIN at the beginning.
8914  if(VMA_DEBUG_MARGIN > 0)
8915  {
8916  *pOffset += VMA_DEBUG_MARGIN;
8917  }
8918 
8919  // Apply alignment.
8920  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8921 
8922  // Check previous suballocations for BufferImageGranularity conflicts.
8923  // Make bigger alignment if necessary.
8924  if(bufferImageGranularity > 1)
8925  {
8926  bool bufferImageGranularityConflict = false;
8927  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8928  while(prevSuballocItem != m_Suballocations.cbegin())
8929  {
8930  --prevSuballocItem;
8931  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8932  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8933  {
8934  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8935  {
8936  bufferImageGranularityConflict = true;
8937  break;
8938  }
8939  }
8940  else
8941  // Already on previous page.
8942  break;
8943  }
8944  if(bufferImageGranularityConflict)
8945  {
8946  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8947  }
8948  }
8949 
8950  // Calculate padding at the beginning based on current offset.
8951  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8952 
8953  // Calculate required margin at the end.
8954  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8955 
8956  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8957  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8958  {
8959  return false;
8960  }
8961 
8962  // Check next suballocations for BufferImageGranularity conflicts.
8963  // If conflict exists, allocation cannot be made here.
8964  if(bufferImageGranularity > 1)
8965  {
8966  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8967  ++nextSuballocItem;
8968  while(nextSuballocItem != m_Suballocations.cend())
8969  {
8970  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8971  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8972  {
8973  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8974  {
8975  return false;
8976  }
8977  }
8978  else
8979  {
8980  // Already on next page.
8981  break;
8982  }
8983  ++nextSuballocItem;
8984  }
8985  }
8986  }
8987 
8988  // All tests passed: Success. pOffset is already filled.
8989  return true;
8990 }
8991 
8992 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8993 {
8994  VMA_ASSERT(item != m_Suballocations.end());
8995  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8996 
8997  VmaSuballocationList::iterator nextItem = item;
8998  ++nextItem;
8999  VMA_ASSERT(nextItem != m_Suballocations.end());
9000  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9001 
9002  item->size += nextItem->size;
9003  --m_FreeCount;
9004  m_Suballocations.erase(nextItem);
9005 }
9006 
9007 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9008 {
9009  // Change this suballocation to be marked as free.
9010  VmaSuballocation& suballoc = *suballocItem;
9011  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9012  suballoc.hAllocation = VK_NULL_HANDLE;
9013 
9014  // Update totals.
9015  ++m_FreeCount;
9016  m_SumFreeSize += suballoc.size;
9017 
9018  // Merge with previous and/or next suballocation if it's also free.
9019  bool mergeWithNext = false;
9020  bool mergeWithPrev = false;
9021 
9022  VmaSuballocationList::iterator nextItem = suballocItem;
9023  ++nextItem;
9024  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9025  {
9026  mergeWithNext = true;
9027  }
9028 
9029  VmaSuballocationList::iterator prevItem = suballocItem;
9030  if(suballocItem != m_Suballocations.begin())
9031  {
9032  --prevItem;
9033  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9034  {
9035  mergeWithPrev = true;
9036  }
9037  }
9038 
9039  if(mergeWithNext)
9040  {
9041  UnregisterFreeSuballocation(nextItem);
9042  MergeFreeWithNext(suballocItem);
9043  }
9044 
9045  if(mergeWithPrev)
9046  {
9047  UnregisterFreeSuballocation(prevItem);
9048  MergeFreeWithNext(prevItem);
9049  RegisterFreeSuballocation(prevItem);
9050  return prevItem;
9051  }
9052  else
9053  {
9054  RegisterFreeSuballocation(suballocItem);
9055  return suballocItem;
9056  }
9057 }
9058 
9059 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9060 {
9061  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9062  VMA_ASSERT(item->size > 0);
9063 
9064  // You may want to enable this validation at the beginning or at the end of
9065  // this function, depending on what do you want to check.
9066  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9067 
9068  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9069  {
9070  if(m_FreeSuballocationsBySize.empty())
9071  {
9072  m_FreeSuballocationsBySize.push_back(item);
9073  }
9074  else
9075  {
9076  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9077  }
9078  }
9079 
9080  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9081 }
9082 
9083 
9084 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9085 {
9086  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9087  VMA_ASSERT(item->size > 0);
9088 
9089  // You may want to enable this validation at the beginning or at the end of
9090  // this function, depending on what do you want to check.
9091  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9092 
9093  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9094  {
9095  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9096  m_FreeSuballocationsBySize.data(),
9097  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9098  item,
9099  VmaSuballocationItemSizeLess());
9100  for(size_t index = it - m_FreeSuballocationsBySize.data();
9101  index < m_FreeSuballocationsBySize.size();
9102  ++index)
9103  {
9104  if(m_FreeSuballocationsBySize[index] == item)
9105  {
9106  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9107  return;
9108  }
9109  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9110  }
9111  VMA_ASSERT(0 && "Not found.");
9112  }
9113 
9114  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9115 }
9116 
9117 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9118  VkDeviceSize bufferImageGranularity,
9119  VmaSuballocationType& inOutPrevSuballocType) const
9120 {
9121  if(bufferImageGranularity == 1 || IsEmpty())
9122  {
9123  return false;
9124  }
9125 
9126  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9127  bool typeConflictFound = false;
9128  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9129  it != m_Suballocations.cend();
9130  ++it)
9131  {
9132  const VmaSuballocationType suballocType = it->type;
9133  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9134  {
9135  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9136  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9137  {
9138  typeConflictFound = true;
9139  }
9140  inOutPrevSuballocType = suballocType;
9141  }
9142  }
9143 
9144  return typeConflictFound || minAlignment >= bufferImageGranularity;
9145 }
9146 
9148 // class VmaBlockMetadata_Linear
9149 
9150 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9151  VmaBlockMetadata(hAllocator),
9152  m_SumFreeSize(0),
9153  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9154  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9155  m_1stVectorIndex(0),
9156  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9157  m_1stNullItemsBeginCount(0),
9158  m_1stNullItemsMiddleCount(0),
9159  m_2ndNullItemsCount(0)
9160 {
9161 }
9162 
9163 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9164 {
9165 }
9166 
9167 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9168 {
9169  VmaBlockMetadata::Init(size);
9170  m_SumFreeSize = size;
9171 }
9172 
9173 bool VmaBlockMetadata_Linear::Validate() const
9174 {
9175  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9176  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9177 
9178  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9179  VMA_VALIDATE(!suballocations1st.empty() ||
9180  suballocations2nd.empty() ||
9181  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9182 
9183  if(!suballocations1st.empty())
9184  {
9185  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9186  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9187  // Null item at the end should be just pop_back().
9188  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9189  }
9190  if(!suballocations2nd.empty())
9191  {
9192  // Null item at the end should be just pop_back().
9193  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9194  }
9195 
9196  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9197  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9198 
9199  VkDeviceSize sumUsedSize = 0;
9200  const size_t suballoc1stCount = suballocations1st.size();
9201  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9202 
9203  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9204  {
9205  const size_t suballoc2ndCount = suballocations2nd.size();
9206  size_t nullItem2ndCount = 0;
9207  for(size_t i = 0; i < suballoc2ndCount; ++i)
9208  {
9209  const VmaSuballocation& suballoc = suballocations2nd[i];
9210  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9211 
9212  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9213  VMA_VALIDATE(suballoc.offset >= offset);
9214 
9215  if(!currFree)
9216  {
9217  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9218  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9219  sumUsedSize += suballoc.size;
9220  }
9221  else
9222  {
9223  ++nullItem2ndCount;
9224  }
9225 
9226  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9227  }
9228 
9229  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9230  }
9231 
9232  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9233  {
9234  const VmaSuballocation& suballoc = suballocations1st[i];
9235  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9236  suballoc.hAllocation == VK_NULL_HANDLE);
9237  }
9238 
9239  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9240 
9241  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9242  {
9243  const VmaSuballocation& suballoc = suballocations1st[i];
9244  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9245 
9246  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9247  VMA_VALIDATE(suballoc.offset >= offset);
9248  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9249 
9250  if(!currFree)
9251  {
9252  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9253  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9254  sumUsedSize += suballoc.size;
9255  }
9256  else
9257  {
9258  ++nullItem1stCount;
9259  }
9260 
9261  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9262  }
9263  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9264 
9265  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9266  {
9267  const size_t suballoc2ndCount = suballocations2nd.size();
9268  size_t nullItem2ndCount = 0;
9269  for(size_t i = suballoc2ndCount; i--; )
9270  {
9271  const VmaSuballocation& suballoc = suballocations2nd[i];
9272  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9273 
9274  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9275  VMA_VALIDATE(suballoc.offset >= offset);
9276 
9277  if(!currFree)
9278  {
9279  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9280  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9281  sumUsedSize += suballoc.size;
9282  }
9283  else
9284  {
9285  ++nullItem2ndCount;
9286  }
9287 
9288  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9289  }
9290 
9291  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9292  }
9293 
9294  VMA_VALIDATE(offset <= GetSize());
9295  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9296 
9297  return true;
9298 }
9299 
9300 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9301 {
9302  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9303  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9304 }
9305 
9306 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9307 {
9308  const VkDeviceSize size = GetSize();
9309 
9310  /*
9311  We don't consider gaps inside allocation vectors with freed allocations because
9312  they are not suitable for reuse in linear allocator. We consider only space that
9313  is available for new allocations.
9314  */
9315  if(IsEmpty())
9316  {
9317  return size;
9318  }
9319 
9320  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9321 
9322  switch(m_2ndVectorMode)
9323  {
9324  case SECOND_VECTOR_EMPTY:
9325  /*
9326  Available space is after end of 1st, as well as before beginning of 1st (which
9327  whould make it a ring buffer).
9328  */
9329  {
9330  const size_t suballocations1stCount = suballocations1st.size();
9331  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9332  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9333  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9334  return VMA_MAX(
9335  firstSuballoc.offset,
9336  size - (lastSuballoc.offset + lastSuballoc.size));
9337  }
9338  break;
9339 
9340  case SECOND_VECTOR_RING_BUFFER:
9341  /*
9342  Available space is only between end of 2nd and beginning of 1st.
9343  */
9344  {
9345  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9346  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9347  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9348  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9349  }
9350  break;
9351 
9352  case SECOND_VECTOR_DOUBLE_STACK:
9353  /*
9354  Available space is only between end of 1st and top of 2nd.
9355  */
9356  {
9357  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9358  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9359  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9360  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9361  }
9362  break;
9363 
9364  default:
9365  VMA_ASSERT(0);
9366  return 0;
9367  }
9368 }
9369 
9370 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9371 {
9372  const VkDeviceSize size = GetSize();
9373  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9374  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9375  const size_t suballoc1stCount = suballocations1st.size();
9376  const size_t suballoc2ndCount = suballocations2nd.size();
9377 
9378  outInfo.blockCount = 1;
9379  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9380  outInfo.unusedRangeCount = 0;
9381  outInfo.usedBytes = 0;
9382  outInfo.allocationSizeMin = UINT64_MAX;
9383  outInfo.allocationSizeMax = 0;
9384  outInfo.unusedRangeSizeMin = UINT64_MAX;
9385  outInfo.unusedRangeSizeMax = 0;
9386 
9387  VkDeviceSize lastOffset = 0;
9388 
9389  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9390  {
9391  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9392  size_t nextAlloc2ndIndex = 0;
9393  while(lastOffset < freeSpace2ndTo1stEnd)
9394  {
9395  // Find next non-null allocation or move nextAllocIndex to the end.
9396  while(nextAlloc2ndIndex < suballoc2ndCount &&
9397  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9398  {
9399  ++nextAlloc2ndIndex;
9400  }
9401 
9402  // Found non-null allocation.
9403  if(nextAlloc2ndIndex < suballoc2ndCount)
9404  {
9405  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9406 
9407  // 1. Process free space before this allocation.
9408  if(lastOffset < suballoc.offset)
9409  {
9410  // There is free space from lastOffset to suballoc.offset.
9411  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9412  ++outInfo.unusedRangeCount;
9413  outInfo.unusedBytes += unusedRangeSize;
9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9415  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9416  }
9417 
9418  // 2. Process this allocation.
9419  // There is allocation with suballoc.offset, suballoc.size.
9420  outInfo.usedBytes += suballoc.size;
9421  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9422  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9423 
9424  // 3. Prepare for next iteration.
9425  lastOffset = suballoc.offset + suballoc.size;
9426  ++nextAlloc2ndIndex;
9427  }
9428  // We are at the end.
9429  else
9430  {
9431  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9432  if(lastOffset < freeSpace2ndTo1stEnd)
9433  {
9434  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9435  ++outInfo.unusedRangeCount;
9436  outInfo.unusedBytes += unusedRangeSize;
9437  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9438  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9439  }
9440 
9441  // End of loop.
9442  lastOffset = freeSpace2ndTo1stEnd;
9443  }
9444  }
9445  }
9446 
9447  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9448  const VkDeviceSize freeSpace1stTo2ndEnd =
9449  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9450  while(lastOffset < freeSpace1stTo2ndEnd)
9451  {
9452  // Find next non-null allocation or move nextAllocIndex to the end.
9453  while(nextAlloc1stIndex < suballoc1stCount &&
9454  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9455  {
9456  ++nextAlloc1stIndex;
9457  }
9458 
9459  // Found non-null allocation.
9460  if(nextAlloc1stIndex < suballoc1stCount)
9461  {
9462  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9463 
9464  // 1. Process free space before this allocation.
9465  if(lastOffset < suballoc.offset)
9466  {
9467  // There is free space from lastOffset to suballoc.offset.
9468  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9469  ++outInfo.unusedRangeCount;
9470  outInfo.unusedBytes += unusedRangeSize;
9471  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9472  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9473  }
9474 
9475  // 2. Process this allocation.
9476  // There is allocation with suballoc.offset, suballoc.size.
9477  outInfo.usedBytes += suballoc.size;
9478  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9479  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9480 
9481  // 3. Prepare for next iteration.
9482  lastOffset = suballoc.offset + suballoc.size;
9483  ++nextAlloc1stIndex;
9484  }
9485  // We are at the end.
9486  else
9487  {
9488  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9489  if(lastOffset < freeSpace1stTo2ndEnd)
9490  {
9491  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9492  ++outInfo.unusedRangeCount;
9493  outInfo.unusedBytes += unusedRangeSize;
9494  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9495  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9496  }
9497 
9498  // End of loop.
9499  lastOffset = freeSpace1stTo2ndEnd;
9500  }
9501  }
9502 
9503  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9504  {
9505  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9506  while(lastOffset < size)
9507  {
9508  // Find next non-null allocation or move nextAllocIndex to the end.
9509  while(nextAlloc2ndIndex != SIZE_MAX &&
9510  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9511  {
9512  --nextAlloc2ndIndex;
9513  }
9514 
9515  // Found non-null allocation.
9516  if(nextAlloc2ndIndex != SIZE_MAX)
9517  {
9518  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9519 
9520  // 1. Process free space before this allocation.
9521  if(lastOffset < suballoc.offset)
9522  {
9523  // There is free space from lastOffset to suballoc.offset.
9524  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9525  ++outInfo.unusedRangeCount;
9526  outInfo.unusedBytes += unusedRangeSize;
9527  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9528  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9529  }
9530 
9531  // 2. Process this allocation.
9532  // There is allocation with suballoc.offset, suballoc.size.
9533  outInfo.usedBytes += suballoc.size;
9534  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9535  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9536 
9537  // 3. Prepare for next iteration.
9538  lastOffset = suballoc.offset + suballoc.size;
9539  --nextAlloc2ndIndex;
9540  }
9541  // We are at the end.
9542  else
9543  {
9544  // There is free space from lastOffset to size.
9545  if(lastOffset < size)
9546  {
9547  const VkDeviceSize unusedRangeSize = size - lastOffset;
9548  ++outInfo.unusedRangeCount;
9549  outInfo.unusedBytes += unusedRangeSize;
9550  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9551  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9552  }
9553 
9554  // End of loop.
9555  lastOffset = size;
9556  }
9557  }
9558  }
9559 
9560  outInfo.unusedBytes = size - outInfo.usedBytes;
9561 }
9562 
9563 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9564 {
9565  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9566  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9567  const VkDeviceSize size = GetSize();
9568  const size_t suballoc1stCount = suballocations1st.size();
9569  const size_t suballoc2ndCount = suballocations2nd.size();
9570 
9571  inoutStats.size += size;
9572 
9573  VkDeviceSize lastOffset = 0;
9574 
9575  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9576  {
9577  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9578  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9579  while(lastOffset < freeSpace2ndTo1stEnd)
9580  {
9581  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9582  while(nextAlloc2ndIndex < suballoc2ndCount &&
9583  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9584  {
9585  ++nextAlloc2ndIndex;
9586  }
9587 
9588  // Found non-null allocation.
9589  if(nextAlloc2ndIndex < suballoc2ndCount)
9590  {
9591  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9592 
9593  // 1. Process free space before this allocation.
9594  if(lastOffset < suballoc.offset)
9595  {
9596  // There is free space from lastOffset to suballoc.offset.
9597  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9598  inoutStats.unusedSize += unusedRangeSize;
9599  ++inoutStats.unusedRangeCount;
9600  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9601  }
9602 
9603  // 2. Process this allocation.
9604  // There is allocation with suballoc.offset, suballoc.size.
9605  ++inoutStats.allocationCount;
9606 
9607  // 3. Prepare for next iteration.
9608  lastOffset = suballoc.offset + suballoc.size;
9609  ++nextAlloc2ndIndex;
9610  }
9611  // We are at the end.
9612  else
9613  {
9614  if(lastOffset < freeSpace2ndTo1stEnd)
9615  {
9616  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9617  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9618  inoutStats.unusedSize += unusedRangeSize;
9619  ++inoutStats.unusedRangeCount;
9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9621  }
9622 
9623  // End of loop.
9624  lastOffset = freeSpace2ndTo1stEnd;
9625  }
9626  }
9627  }
9628 
9629  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9630  const VkDeviceSize freeSpace1stTo2ndEnd =
9631  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9632  while(lastOffset < freeSpace1stTo2ndEnd)
9633  {
9634  // Find next non-null allocation or move nextAllocIndex to the end.
9635  while(nextAlloc1stIndex < suballoc1stCount &&
9636  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9637  {
9638  ++nextAlloc1stIndex;
9639  }
9640 
9641  // Found non-null allocation.
9642  if(nextAlloc1stIndex < suballoc1stCount)
9643  {
9644  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9645 
9646  // 1. Process free space before this allocation.
9647  if(lastOffset < suballoc.offset)
9648  {
9649  // There is free space from lastOffset to suballoc.offset.
9650  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9651  inoutStats.unusedSize += unusedRangeSize;
9652  ++inoutStats.unusedRangeCount;
9653  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9654  }
9655 
9656  // 2. Process this allocation.
9657  // There is allocation with suballoc.offset, suballoc.size.
9658  ++inoutStats.allocationCount;
9659 
9660  // 3. Prepare for next iteration.
9661  lastOffset = suballoc.offset + suballoc.size;
9662  ++nextAlloc1stIndex;
9663  }
9664  // We are at the end.
9665  else
9666  {
9667  if(lastOffset < freeSpace1stTo2ndEnd)
9668  {
9669  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9670  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9671  inoutStats.unusedSize += unusedRangeSize;
9672  ++inoutStats.unusedRangeCount;
9673  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9674  }
9675 
9676  // End of loop.
9677  lastOffset = freeSpace1stTo2ndEnd;
9678  }
9679  }
9680 
9681  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9682  {
9683  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9684  while(lastOffset < size)
9685  {
9686  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9687  while(nextAlloc2ndIndex != SIZE_MAX &&
9688  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9689  {
9690  --nextAlloc2ndIndex;
9691  }
9692 
9693  // Found non-null allocation.
9694  if(nextAlloc2ndIndex != SIZE_MAX)
9695  {
9696  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9697 
9698  // 1. Process free space before this allocation.
9699  if(lastOffset < suballoc.offset)
9700  {
9701  // There is free space from lastOffset to suballoc.offset.
9702  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9703  inoutStats.unusedSize += unusedRangeSize;
9704  ++inoutStats.unusedRangeCount;
9705  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9706  }
9707 
9708  // 2. Process this allocation.
9709  // There is allocation with suballoc.offset, suballoc.size.
9710  ++inoutStats.allocationCount;
9711 
9712  // 3. Prepare for next iteration.
9713  lastOffset = suballoc.offset + suballoc.size;
9714  --nextAlloc2ndIndex;
9715  }
9716  // We are at the end.
9717  else
9718  {
9719  if(lastOffset < size)
9720  {
9721  // There is free space from lastOffset to size.
9722  const VkDeviceSize unusedRangeSize = size - lastOffset;
9723  inoutStats.unusedSize += unusedRangeSize;
9724  ++inoutStats.unusedRangeCount;
9725  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9726  }
9727 
9728  // End of loop.
9729  lastOffset = size;
9730  }
9731  }
9732  }
9733 }
9734 
9735 #if VMA_STATS_STRING_ENABLED
9736 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9737 {
9738  const VkDeviceSize size = GetSize();
9739  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9740  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9741  const size_t suballoc1stCount = suballocations1st.size();
9742  const size_t suballoc2ndCount = suballocations2nd.size();
9743 
9744  // FIRST PASS
9745 
9746  size_t unusedRangeCount = 0;
9747  VkDeviceSize usedBytes = 0;
9748 
9749  VkDeviceSize lastOffset = 0;
9750 
9751  size_t alloc2ndCount = 0;
9752  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9753  {
9754  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9755  size_t nextAlloc2ndIndex = 0;
9756  while(lastOffset < freeSpace2ndTo1stEnd)
9757  {
9758  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9759  while(nextAlloc2ndIndex < suballoc2ndCount &&
9760  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9761  {
9762  ++nextAlloc2ndIndex;
9763  }
9764 
9765  // Found non-null allocation.
9766  if(nextAlloc2ndIndex < suballoc2ndCount)
9767  {
9768  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9769 
9770  // 1. Process free space before this allocation.
9771  if(lastOffset < suballoc.offset)
9772  {
9773  // There is free space from lastOffset to suballoc.offset.
9774  ++unusedRangeCount;
9775  }
9776 
9777  // 2. Process this allocation.
9778  // There is allocation with suballoc.offset, suballoc.size.
9779  ++alloc2ndCount;
9780  usedBytes += suballoc.size;
9781 
9782  // 3. Prepare for next iteration.
9783  lastOffset = suballoc.offset + suballoc.size;
9784  ++nextAlloc2ndIndex;
9785  }
9786  // We are at the end.
9787  else
9788  {
9789  if(lastOffset < freeSpace2ndTo1stEnd)
9790  {
9791  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9792  ++unusedRangeCount;
9793  }
9794 
9795  // End of loop.
9796  lastOffset = freeSpace2ndTo1stEnd;
9797  }
9798  }
9799  }
9800 
9801  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9802  size_t alloc1stCount = 0;
9803  const VkDeviceSize freeSpace1stTo2ndEnd =
9804  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9805  while(lastOffset < freeSpace1stTo2ndEnd)
9806  {
9807  // Find next non-null allocation or move nextAllocIndex to the end.
9808  while(nextAlloc1stIndex < suballoc1stCount &&
9809  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9810  {
9811  ++nextAlloc1stIndex;
9812  }
9813 
9814  // Found non-null allocation.
9815  if(nextAlloc1stIndex < suballoc1stCount)
9816  {
9817  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9818 
9819  // 1. Process free space before this allocation.
9820  if(lastOffset < suballoc.offset)
9821  {
9822  // There is free space from lastOffset to suballoc.offset.
9823  ++unusedRangeCount;
9824  }
9825 
9826  // 2. Process this allocation.
9827  // There is allocation with suballoc.offset, suballoc.size.
9828  ++alloc1stCount;
9829  usedBytes += suballoc.size;
9830 
9831  // 3. Prepare for next iteration.
9832  lastOffset = suballoc.offset + suballoc.size;
9833  ++nextAlloc1stIndex;
9834  }
9835  // We are at the end.
9836  else
9837  {
9838  if(lastOffset < size)
9839  {
9840  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9841  ++unusedRangeCount;
9842  }
9843 
9844  // End of loop.
9845  lastOffset = freeSpace1stTo2ndEnd;
9846  }
9847  }
9848 
9849  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9850  {
9851  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9852  while(lastOffset < size)
9853  {
9854  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9855  while(nextAlloc2ndIndex != SIZE_MAX &&
9856  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9857  {
9858  --nextAlloc2ndIndex;
9859  }
9860 
9861  // Found non-null allocation.
9862  if(nextAlloc2ndIndex != SIZE_MAX)
9863  {
9864  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9865 
9866  // 1. Process free space before this allocation.
9867  if(lastOffset < suballoc.offset)
9868  {
9869  // There is free space from lastOffset to suballoc.offset.
9870  ++unusedRangeCount;
9871  }
9872 
9873  // 2. Process this allocation.
9874  // There is allocation with suballoc.offset, suballoc.size.
9875  ++alloc2ndCount;
9876  usedBytes += suballoc.size;
9877 
9878  // 3. Prepare for next iteration.
9879  lastOffset = suballoc.offset + suballoc.size;
9880  --nextAlloc2ndIndex;
9881  }
9882  // We are at the end.
9883  else
9884  {
9885  if(lastOffset < size)
9886  {
9887  // There is free space from lastOffset to size.
9888  ++unusedRangeCount;
9889  }
9890 
9891  // End of loop.
9892  lastOffset = size;
9893  }
9894  }
9895  }
9896 
9897  const VkDeviceSize unusedBytes = size - usedBytes;
9898  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9899 
9900  // SECOND PASS
9901  lastOffset = 0;
9902 
9903  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9904  {
9905  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9906  size_t nextAlloc2ndIndex = 0;
9907  while(lastOffset < freeSpace2ndTo1stEnd)
9908  {
9909  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9910  while(nextAlloc2ndIndex < suballoc2ndCount &&
9911  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9912  {
9913  ++nextAlloc2ndIndex;
9914  }
9915 
9916  // Found non-null allocation.
9917  if(nextAlloc2ndIndex < suballoc2ndCount)
9918  {
9919  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9920 
9921  // 1. Process free space before this allocation.
9922  if(lastOffset < suballoc.offset)
9923  {
9924  // There is free space from lastOffset to suballoc.offset.
9925  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9926  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9927  }
9928 
9929  // 2. Process this allocation.
9930  // There is allocation with suballoc.offset, suballoc.size.
9931  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9932 
9933  // 3. Prepare for next iteration.
9934  lastOffset = suballoc.offset + suballoc.size;
9935  ++nextAlloc2ndIndex;
9936  }
9937  // We are at the end.
9938  else
9939  {
9940  if(lastOffset < freeSpace2ndTo1stEnd)
9941  {
9942  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9943  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9944  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9945  }
9946 
9947  // End of loop.
9948  lastOffset = freeSpace2ndTo1stEnd;
9949  }
9950  }
9951  }
9952 
9953  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9954  while(lastOffset < freeSpace1stTo2ndEnd)
9955  {
9956  // Find next non-null allocation or move nextAllocIndex to the end.
9957  while(nextAlloc1stIndex < suballoc1stCount &&
9958  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9959  {
9960  ++nextAlloc1stIndex;
9961  }
9962 
9963  // Found non-null allocation.
9964  if(nextAlloc1stIndex < suballoc1stCount)
9965  {
9966  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9967 
9968  // 1. Process free space before this allocation.
9969  if(lastOffset < suballoc.offset)
9970  {
9971  // There is free space from lastOffset to suballoc.offset.
9972  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9973  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9974  }
9975 
9976  // 2. Process this allocation.
9977  // There is allocation with suballoc.offset, suballoc.size.
9978  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9979 
9980  // 3. Prepare for next iteration.
9981  lastOffset = suballoc.offset + suballoc.size;
9982  ++nextAlloc1stIndex;
9983  }
9984  // We are at the end.
9985  else
9986  {
9987  if(lastOffset < freeSpace1stTo2ndEnd)
9988  {
9989  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9990  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9991  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9992  }
9993 
9994  // End of loop.
9995  lastOffset = freeSpace1stTo2ndEnd;
9996  }
9997  }
9998 
9999  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10000  {
10001  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10002  while(lastOffset < size)
10003  {
10004  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10005  while(nextAlloc2ndIndex != SIZE_MAX &&
10006  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10007  {
10008  --nextAlloc2ndIndex;
10009  }
10010 
10011  // Found non-null allocation.
10012  if(nextAlloc2ndIndex != SIZE_MAX)
10013  {
10014  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10015 
10016  // 1. Process free space before this allocation.
10017  if(lastOffset < suballoc.offset)
10018  {
10019  // There is free space from lastOffset to suballoc.offset.
10020  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10021  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10022  }
10023 
10024  // 2. Process this allocation.
10025  // There is allocation with suballoc.offset, suballoc.size.
10026  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10027 
10028  // 3. Prepare for next iteration.
10029  lastOffset = suballoc.offset + suballoc.size;
10030  --nextAlloc2ndIndex;
10031  }
10032  // We are at the end.
10033  else
10034  {
10035  if(lastOffset < size)
10036  {
10037  // There is free space from lastOffset to size.
10038  const VkDeviceSize unusedRangeSize = size - lastOffset;
10039  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10040  }
10041 
10042  // End of loop.
10043  lastOffset = size;
10044  }
10045  }
10046  }
10047 
10048  PrintDetailedMap_End(json);
10049 }
10050 #endif // #if VMA_STATS_STRING_ENABLED
10051 
10052 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10053  uint32_t currentFrameIndex,
10054  uint32_t frameInUseCount,
10055  VkDeviceSize bufferImageGranularity,
10056  VkDeviceSize allocSize,
10057  VkDeviceSize allocAlignment,
10058  bool upperAddress,
10059  VmaSuballocationType allocType,
10060  bool canMakeOtherLost,
10061  uint32_t strategy,
10062  VmaAllocationRequest* pAllocationRequest)
10063 {
10064  VMA_ASSERT(allocSize > 0);
10065  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10066  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10067  VMA_HEAVY_ASSERT(Validate());
10068  return upperAddress ?
10069  CreateAllocationRequest_UpperAddress(
10070  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10071  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10072  CreateAllocationRequest_LowerAddress(
10073  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10074  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10075 }
10076 
10077 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10078  uint32_t currentFrameIndex,
10079  uint32_t frameInUseCount,
10080  VkDeviceSize bufferImageGranularity,
10081  VkDeviceSize allocSize,
10082  VkDeviceSize allocAlignment,
10083  VmaSuballocationType allocType,
10084  bool canMakeOtherLost,
10085  uint32_t strategy,
10086  VmaAllocationRequest* pAllocationRequest)
10087 {
10088  const VkDeviceSize size = GetSize();
10089  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10090  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10091 
10092  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10093  {
10094  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10095  return false;
10096  }
10097 
10098  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10099  if(allocSize > size)
10100  {
10101  return false;
10102  }
10103  VkDeviceSize resultBaseOffset = size - allocSize;
10104  if(!suballocations2nd.empty())
10105  {
10106  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10107  resultBaseOffset = lastSuballoc.offset - allocSize;
10108  if(allocSize > lastSuballoc.offset)
10109  {
10110  return false;
10111  }
10112  }
10113 
10114  // Start from offset equal to end of free space.
10115  VkDeviceSize resultOffset = resultBaseOffset;
10116 
10117  // Apply VMA_DEBUG_MARGIN at the end.
10118  if(VMA_DEBUG_MARGIN > 0)
10119  {
10120  if(resultOffset < VMA_DEBUG_MARGIN)
10121  {
10122  return false;
10123  }
10124  resultOffset -= VMA_DEBUG_MARGIN;
10125  }
10126 
10127  // Apply alignment.
10128  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10129 
10130  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10131  // Make bigger alignment if necessary.
10132  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10133  {
10134  bool bufferImageGranularityConflict = false;
10135  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10136  {
10137  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10138  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10139  {
10140  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10141  {
10142  bufferImageGranularityConflict = true;
10143  break;
10144  }
10145  }
10146  else
10147  // Already on previous page.
10148  break;
10149  }
10150  if(bufferImageGranularityConflict)
10151  {
10152  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10153  }
10154  }
10155 
10156  // There is enough free space.
10157  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10158  suballocations1st.back().offset + suballocations1st.back().size :
10159  0;
10160  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10161  {
10162  // Check previous suballocations for BufferImageGranularity conflicts.
10163  // If conflict exists, allocation cannot be made here.
10164  if(bufferImageGranularity > 1)
10165  {
10166  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10167  {
10168  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10169  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10170  {
10171  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10172  {
10173  return false;
10174  }
10175  }
10176  else
10177  {
10178  // Already on next page.
10179  break;
10180  }
10181  }
10182  }
10183 
10184  // All tests passed: Success.
10185  pAllocationRequest->offset = resultOffset;
10186  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10187  pAllocationRequest->sumItemSize = 0;
10188  // pAllocationRequest->item unused.
10189  pAllocationRequest->itemsToMakeLostCount = 0;
10190  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10191  return true;
10192  }
10193 
10194  return false;
10195 }
10196 
10197 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10198  uint32_t currentFrameIndex,
10199  uint32_t frameInUseCount,
10200  VkDeviceSize bufferImageGranularity,
10201  VkDeviceSize allocSize,
10202  VkDeviceSize allocAlignment,
10203  VmaSuballocationType allocType,
10204  bool canMakeOtherLost,
10205  uint32_t strategy,
10206  VmaAllocationRequest* pAllocationRequest)
10207 {
10208  const VkDeviceSize size = GetSize();
10209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10211 
10212  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10213  {
10214  // Try to allocate at the end of 1st vector.
10215 
10216  VkDeviceSize resultBaseOffset = 0;
10217  if(!suballocations1st.empty())
10218  {
10219  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10220  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10221  }
10222 
10223  // Start from offset equal to beginning of free space.
10224  VkDeviceSize resultOffset = resultBaseOffset;
10225 
10226  // Apply VMA_DEBUG_MARGIN at the beginning.
10227  if(VMA_DEBUG_MARGIN > 0)
10228  {
10229  resultOffset += VMA_DEBUG_MARGIN;
10230  }
10231 
10232  // Apply alignment.
10233  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10234 
10235  // Check previous suballocations for BufferImageGranularity conflicts.
10236  // Make bigger alignment if necessary.
10237  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10238  {
10239  bool bufferImageGranularityConflict = false;
10240  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10241  {
10242  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10243  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10244  {
10245  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10246  {
10247  bufferImageGranularityConflict = true;
10248  break;
10249  }
10250  }
10251  else
10252  // Already on previous page.
10253  break;
10254  }
10255  if(bufferImageGranularityConflict)
10256  {
10257  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10258  }
10259  }
10260 
10261  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10262  suballocations2nd.back().offset : size;
10263 
10264  // There is enough free space at the end after alignment.
10265  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10266  {
10267  // Check next suballocations for BufferImageGranularity conflicts.
10268  // If conflict exists, allocation cannot be made here.
10269  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10270  {
10271  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10272  {
10273  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10274  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10275  {
10276  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10277  {
10278  return false;
10279  }
10280  }
10281  else
10282  {
10283  // Already on previous page.
10284  break;
10285  }
10286  }
10287  }
10288 
10289  // All tests passed: Success.
10290  pAllocationRequest->offset = resultOffset;
10291  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10292  pAllocationRequest->sumItemSize = 0;
10293  // pAllocationRequest->item, customData unused.
10294  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10295  pAllocationRequest->itemsToMakeLostCount = 0;
10296  return true;
10297  }
10298  }
10299 
10300  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10301  // beginning of 1st vector as the end of free space.
10302  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10303  {
10304  VMA_ASSERT(!suballocations1st.empty());
10305 
10306  VkDeviceSize resultBaseOffset = 0;
10307  if(!suballocations2nd.empty())
10308  {
10309  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10310  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10311  }
10312 
10313  // Start from offset equal to beginning of free space.
10314  VkDeviceSize resultOffset = resultBaseOffset;
10315 
10316  // Apply VMA_DEBUG_MARGIN at the beginning.
10317  if(VMA_DEBUG_MARGIN > 0)
10318  {
10319  resultOffset += VMA_DEBUG_MARGIN;
10320  }
10321 
10322  // Apply alignment.
10323  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10324 
10325  // Check previous suballocations for BufferImageGranularity conflicts.
10326  // Make bigger alignment if necessary.
10327  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10328  {
10329  bool bufferImageGranularityConflict = false;
10330  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10331  {
10332  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10333  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10334  {
10335  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10336  {
10337  bufferImageGranularityConflict = true;
10338  break;
10339  }
10340  }
10341  else
10342  // Already on previous page.
10343  break;
10344  }
10345  if(bufferImageGranularityConflict)
10346  {
10347  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10348  }
10349  }
10350 
10351  pAllocationRequest->itemsToMakeLostCount = 0;
10352  pAllocationRequest->sumItemSize = 0;
10353  size_t index1st = m_1stNullItemsBeginCount;
10354 
10355  if(canMakeOtherLost)
10356  {
10357  while(index1st < suballocations1st.size() &&
10358  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10359  {
10360  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10361  const VmaSuballocation& suballoc = suballocations1st[index1st];
10362  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10363  {
10364  // No problem.
10365  }
10366  else
10367  {
10368  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10369  if(suballoc.hAllocation->CanBecomeLost() &&
10370  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10371  {
10372  ++pAllocationRequest->itemsToMakeLostCount;
10373  pAllocationRequest->sumItemSize += suballoc.size;
10374  }
10375  else
10376  {
10377  return false;
10378  }
10379  }
10380  ++index1st;
10381  }
10382 
10383  // Check next suballocations for BufferImageGranularity conflicts.
10384  // If conflict exists, we must mark more allocations lost or fail.
10385  if(bufferImageGranularity > 1)
10386  {
10387  while(index1st < suballocations1st.size())
10388  {
10389  const VmaSuballocation& suballoc = suballocations1st[index1st];
10390  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10391  {
10392  if(suballoc.hAllocation != VK_NULL_HANDLE)
10393  {
10394  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10395  if(suballoc.hAllocation->CanBecomeLost() &&
10396  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10397  {
10398  ++pAllocationRequest->itemsToMakeLostCount;
10399  pAllocationRequest->sumItemSize += suballoc.size;
10400  }
10401  else
10402  {
10403  return false;
10404  }
10405  }
10406  }
10407  else
10408  {
10409  // Already on next page.
10410  break;
10411  }
10412  ++index1st;
10413  }
10414  }
10415 
10416  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10417  if(index1st == suballocations1st.size() &&
10418  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10419  {
10420  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10421  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10422  }
10423  }
10424 
10425  // There is enough free space at the end after alignment.
10426  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10427  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10428  {
10429  // Check next suballocations for BufferImageGranularity conflicts.
10430  // If conflict exists, allocation cannot be made here.
10431  if(bufferImageGranularity > 1)
10432  {
10433  for(size_t nextSuballocIndex = index1st;
10434  nextSuballocIndex < suballocations1st.size();
10435  nextSuballocIndex++)
10436  {
10437  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10438  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10439  {
10440  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10441  {
10442  return false;
10443  }
10444  }
10445  else
10446  {
10447  // Already on next page.
10448  break;
10449  }
10450  }
10451  }
10452 
10453  // All tests passed: Success.
10454  pAllocationRequest->offset = resultOffset;
10455  pAllocationRequest->sumFreeSize =
10456  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10457  - resultBaseOffset
10458  - pAllocationRequest->sumItemSize;
10459  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10460  // pAllocationRequest->item, customData unused.
10461  return true;
10462  }
10463  }
10464 
10465  return false;
10466 }
10467 
10468 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10469  uint32_t currentFrameIndex,
10470  uint32_t frameInUseCount,
10471  VmaAllocationRequest* pAllocationRequest)
10472 {
10473  if(pAllocationRequest->itemsToMakeLostCount == 0)
10474  {
10475  return true;
10476  }
10477 
10478  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10479 
10480  // We always start from 1st.
10481  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10482  size_t index = m_1stNullItemsBeginCount;
10483  size_t madeLostCount = 0;
10484  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10485  {
10486  if(index == suballocations->size())
10487  {
10488  index = 0;
10489  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10490  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10491  {
10492  suballocations = &AccessSuballocations2nd();
10493  }
10494  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10495  // suballocations continues pointing at AccessSuballocations1st().
10496  VMA_ASSERT(!suballocations->empty());
10497  }
10498  VmaSuballocation& suballoc = (*suballocations)[index];
10499  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10500  {
10501  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10502  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10503  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10504  {
10505  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10506  suballoc.hAllocation = VK_NULL_HANDLE;
10507  m_SumFreeSize += suballoc.size;
10508  if(suballocations == &AccessSuballocations1st())
10509  {
10510  ++m_1stNullItemsMiddleCount;
10511  }
10512  else
10513  {
10514  ++m_2ndNullItemsCount;
10515  }
10516  ++madeLostCount;
10517  }
10518  else
10519  {
10520  return false;
10521  }
10522  }
10523  ++index;
10524  }
10525 
10526  CleanupAfterFree();
10527  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10528 
10529  return true;
10530 }
10531 
10532 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10533 {
10534  uint32_t lostAllocationCount = 0;
10535 
10536  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10537  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10538  {
10539  VmaSuballocation& suballoc = suballocations1st[i];
10540  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10541  suballoc.hAllocation->CanBecomeLost() &&
10542  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10543  {
10544  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10545  suballoc.hAllocation = VK_NULL_HANDLE;
10546  ++m_1stNullItemsMiddleCount;
10547  m_SumFreeSize += suballoc.size;
10548  ++lostAllocationCount;
10549  }
10550  }
10551 
10552  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10553  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10554  {
10555  VmaSuballocation& suballoc = suballocations2nd[i];
10556  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10557  suballoc.hAllocation->CanBecomeLost() &&
10558  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10559  {
10560  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10561  suballoc.hAllocation = VK_NULL_HANDLE;
10562  ++m_2ndNullItemsCount;
10563  m_SumFreeSize += suballoc.size;
10564  ++lostAllocationCount;
10565  }
10566  }
10567 
10568  if(lostAllocationCount)
10569  {
10570  CleanupAfterFree();
10571  }
10572 
10573  return lostAllocationCount;
10574 }
10575 
10576 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10577 {
10578  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10579  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10580  {
10581  const VmaSuballocation& suballoc = suballocations1st[i];
10582  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10583  {
10584  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10585  {
10586  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10587  return VK_ERROR_VALIDATION_FAILED_EXT;
10588  }
10589  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10590  {
10591  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10592  return VK_ERROR_VALIDATION_FAILED_EXT;
10593  }
10594  }
10595  }
10596 
10597  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10598  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10599  {
10600  const VmaSuballocation& suballoc = suballocations2nd[i];
10601  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10602  {
10603  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10604  {
10605  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10606  return VK_ERROR_VALIDATION_FAILED_EXT;
10607  }
10608  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10609  {
10610  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10611  return VK_ERROR_VALIDATION_FAILED_EXT;
10612  }
10613  }
10614  }
10615 
10616  return VK_SUCCESS;
10617 }
10618 
10619 void VmaBlockMetadata_Linear::Alloc(
10620  const VmaAllocationRequest& request,
10621  VmaSuballocationType type,
10622  VkDeviceSize allocSize,
10623  VmaAllocation hAllocation)
10624 {
10625  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10626 
10627  switch(request.type)
10628  {
10629  case VmaAllocationRequestType::UpperAddress:
10630  {
10631  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10632  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10633  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10634  suballocations2nd.push_back(newSuballoc);
10635  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10636  }
10637  break;
10638  case VmaAllocationRequestType::EndOf1st:
10639  {
10640  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10641 
10642  VMA_ASSERT(suballocations1st.empty() ||
10643  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10644  // Check if it fits before the end of the block.
10645  VMA_ASSERT(request.offset + allocSize <= GetSize());
10646 
10647  suballocations1st.push_back(newSuballoc);
10648  }
10649  break;
10650  case VmaAllocationRequestType::EndOf2nd:
10651  {
10652  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10653  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10654  VMA_ASSERT(!suballocations1st.empty() &&
10655  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10656  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10657 
10658  switch(m_2ndVectorMode)
10659  {
10660  case SECOND_VECTOR_EMPTY:
10661  // First allocation from second part ring buffer.
10662  VMA_ASSERT(suballocations2nd.empty());
10663  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10664  break;
10665  case SECOND_VECTOR_RING_BUFFER:
10666  // 2-part ring buffer is already started.
10667  VMA_ASSERT(!suballocations2nd.empty());
10668  break;
10669  case SECOND_VECTOR_DOUBLE_STACK:
10670  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10671  break;
10672  default:
10673  VMA_ASSERT(0);
10674  }
10675 
10676  suballocations2nd.push_back(newSuballoc);
10677  }
10678  break;
10679  default:
10680  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10681  }
10682 
10683  m_SumFreeSize -= newSuballoc.size;
10684 }
10685 
10686 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10687 {
10688  FreeAtOffset(allocation->GetOffset());
10689 }
10690 
10691 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10692 {
10693  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10694  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10695 
10696  if(!suballocations1st.empty())
10697  {
10698  // First allocation: Mark it as next empty at the beginning.
10699  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10700  if(firstSuballoc.offset == offset)
10701  {
10702  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10703  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10704  m_SumFreeSize += firstSuballoc.size;
10705  ++m_1stNullItemsBeginCount;
10706  CleanupAfterFree();
10707  return;
10708  }
10709  }
10710 
10711  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10712  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10713  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10714  {
10715  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10716  if(lastSuballoc.offset == offset)
10717  {
10718  m_SumFreeSize += lastSuballoc.size;
10719  suballocations2nd.pop_back();
10720  CleanupAfterFree();
10721  return;
10722  }
10723  }
10724  // Last allocation in 1st vector.
10725  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10726  {
10727  VmaSuballocation& lastSuballoc = suballocations1st.back();
10728  if(lastSuballoc.offset == offset)
10729  {
10730  m_SumFreeSize += lastSuballoc.size;
10731  suballocations1st.pop_back();
10732  CleanupAfterFree();
10733  return;
10734  }
10735  }
10736 
10737  // Item from the middle of 1st vector.
10738  {
10739  VmaSuballocation refSuballoc;
10740  refSuballoc.offset = offset;
10741  // Rest of members stays uninitialized intentionally for better performance.
10742  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10743  suballocations1st.begin() + m_1stNullItemsBeginCount,
10744  suballocations1st.end(),
10745  refSuballoc,
10746  VmaSuballocationOffsetLess());
10747  if(it != suballocations1st.end())
10748  {
10749  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10750  it->hAllocation = VK_NULL_HANDLE;
10751  ++m_1stNullItemsMiddleCount;
10752  m_SumFreeSize += it->size;
10753  CleanupAfterFree();
10754  return;
10755  }
10756  }
10757 
10758  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10759  {
10760  // Item from the middle of 2nd vector.
10761  VmaSuballocation refSuballoc;
10762  refSuballoc.offset = offset;
10763  // Rest of members stays uninitialized intentionally for better performance.
10764  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10765  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10766  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10767  if(it != suballocations2nd.end())
10768  {
10769  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10770  it->hAllocation = VK_NULL_HANDLE;
10771  ++m_2ndNullItemsCount;
10772  m_SumFreeSize += it->size;
10773  CleanupAfterFree();
10774  return;
10775  }
10776  }
10777 
10778  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10779 }
10780 
10781 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10782 {
10783  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10784  const size_t suballocCount = AccessSuballocations1st().size();
10785  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10786 }
10787 
10788 void VmaBlockMetadata_Linear::CleanupAfterFree()
10789 {
10790  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10791  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10792 
10793  if(IsEmpty())
10794  {
10795  suballocations1st.clear();
10796  suballocations2nd.clear();
10797  m_1stNullItemsBeginCount = 0;
10798  m_1stNullItemsMiddleCount = 0;
10799  m_2ndNullItemsCount = 0;
10800  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10801  }
10802  else
10803  {
10804  const size_t suballoc1stCount = suballocations1st.size();
10805  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10806  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10807 
10808  // Find more null items at the beginning of 1st vector.
10809  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10810  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10811  {
10812  ++m_1stNullItemsBeginCount;
10813  --m_1stNullItemsMiddleCount;
10814  }
10815 
10816  // Find more null items at the end of 1st vector.
10817  while(m_1stNullItemsMiddleCount > 0 &&
10818  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10819  {
10820  --m_1stNullItemsMiddleCount;
10821  suballocations1st.pop_back();
10822  }
10823 
10824  // Find more null items at the end of 2nd vector.
10825  while(m_2ndNullItemsCount > 0 &&
10826  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10827  {
10828  --m_2ndNullItemsCount;
10829  suballocations2nd.pop_back();
10830  }
10831 
10832  // Find more null items at the beginning of 2nd vector.
10833  while(m_2ndNullItemsCount > 0 &&
10834  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10835  {
10836  --m_2ndNullItemsCount;
10837  VmaVectorRemove(suballocations2nd, 0);
10838  }
10839 
10840  if(ShouldCompact1st())
10841  {
10842  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10843  size_t srcIndex = m_1stNullItemsBeginCount;
10844  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10845  {
10846  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10847  {
10848  ++srcIndex;
10849  }
10850  if(dstIndex != srcIndex)
10851  {
10852  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10853  }
10854  ++srcIndex;
10855  }
10856  suballocations1st.resize(nonNullItemCount);
10857  m_1stNullItemsBeginCount = 0;
10858  m_1stNullItemsMiddleCount = 0;
10859  }
10860 
10861  // 2nd vector became empty.
10862  if(suballocations2nd.empty())
10863  {
10864  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10865  }
10866 
10867  // 1st vector became empty.
10868  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10869  {
10870  suballocations1st.clear();
10871  m_1stNullItemsBeginCount = 0;
10872 
10873  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10874  {
10875  // Swap 1st with 2nd. Now 2nd is empty.
10876  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10877  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10878  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10879  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10880  {
10881  ++m_1stNullItemsBeginCount;
10882  --m_1stNullItemsMiddleCount;
10883  }
10884  m_2ndNullItemsCount = 0;
10885  m_1stVectorIndex ^= 1;
10886  }
10887  }
10888  }
10889 
10890  VMA_HEAVY_ASSERT(Validate());
10891 }
10892 
10893 
10895 // class VmaBlockMetadata_Buddy
10896 
10897 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10898  VmaBlockMetadata(hAllocator),
10899  m_Root(VMA_NULL),
10900  m_AllocationCount(0),
10901  m_FreeCount(1),
10902  m_SumFreeSize(0)
10903 {
10904  memset(m_FreeList, 0, sizeof(m_FreeList));
10905 }
10906 
10907 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10908 {
10909  DeleteNode(m_Root);
10910 }
10911 
10912 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10913 {
10914  VmaBlockMetadata::Init(size);
10915 
10916  m_UsableSize = VmaPrevPow2(size);
10917  m_SumFreeSize = m_UsableSize;
10918 
10919  // Calculate m_LevelCount.
10920  m_LevelCount = 1;
10921  while(m_LevelCount < MAX_LEVELS &&
10922  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10923  {
10924  ++m_LevelCount;
10925  }
10926 
10927  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10928  rootNode->offset = 0;
10929  rootNode->type = Node::TYPE_FREE;
10930  rootNode->parent = VMA_NULL;
10931  rootNode->buddy = VMA_NULL;
10932 
10933  m_Root = rootNode;
10934  AddToFreeListFront(0, rootNode);
10935 }
10936 
10937 bool VmaBlockMetadata_Buddy::Validate() const
10938 {
10939  // Validate tree.
10940  ValidationContext ctx;
10941  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10942  {
10943  VMA_VALIDATE(false && "ValidateNode failed.");
10944  }
10945  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10946  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10947 
10948  // Validate free node lists.
10949  for(uint32_t level = 0; level < m_LevelCount; ++level)
10950  {
10951  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10952  m_FreeList[level].front->free.prev == VMA_NULL);
10953 
10954  for(Node* node = m_FreeList[level].front;
10955  node != VMA_NULL;
10956  node = node->free.next)
10957  {
10958  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10959 
10960  if(node->free.next == VMA_NULL)
10961  {
10962  VMA_VALIDATE(m_FreeList[level].back == node);
10963  }
10964  else
10965  {
10966  VMA_VALIDATE(node->free.next->free.prev == node);
10967  }
10968  }
10969  }
10970 
10971  // Validate that free lists ar higher levels are empty.
10972  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10973  {
10974  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10975  }
10976 
10977  return true;
10978 }
10979 
10980 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10981 {
10982  for(uint32_t level = 0; level < m_LevelCount; ++level)
10983  {
10984  if(m_FreeList[level].front != VMA_NULL)
10985  {
10986  return LevelToNodeSize(level);
10987  }
10988  }
10989  return 0;
10990 }
10991 
10992 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10993 {
10994  const VkDeviceSize unusableSize = GetUnusableSize();
10995 
10996  outInfo.blockCount = 1;
10997 
10998  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10999  outInfo.usedBytes = outInfo.unusedBytes = 0;
11000 
11001  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11002  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11003  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11004 
11005  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11006 
11007  if(unusableSize > 0)
11008  {
11009  ++outInfo.unusedRangeCount;
11010  outInfo.unusedBytes += unusableSize;
11011  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11012  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11013  }
11014 }
11015 
11016 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11017 {
11018  const VkDeviceSize unusableSize = GetUnusableSize();
11019 
11020  inoutStats.size += GetSize();
11021  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11022  inoutStats.allocationCount += m_AllocationCount;
11023  inoutStats.unusedRangeCount += m_FreeCount;
11024  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11025 
11026  if(unusableSize > 0)
11027  {
11028  ++inoutStats.unusedRangeCount;
11029  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11030  }
11031 }
11032 
11033 #if VMA_STATS_STRING_ENABLED
11034 
11035 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11036 {
11037  // TODO optimize
11038  VmaStatInfo stat;
11039  CalcAllocationStatInfo(stat);
11040 
11041  PrintDetailedMap_Begin(
11042  json,
11043  stat.unusedBytes,
11044  stat.allocationCount,
11045  stat.unusedRangeCount);
11046 
11047  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11048 
11049  const VkDeviceSize unusableSize = GetUnusableSize();
11050  if(unusableSize > 0)
11051  {
11052  PrintDetailedMap_UnusedRange(json,
11053  m_UsableSize, // offset
11054  unusableSize); // size
11055  }
11056 
11057  PrintDetailedMap_End(json);
11058 }
11059 
11060 #endif // #if VMA_STATS_STRING_ENABLED
11061 
11062 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11063  uint32_t currentFrameIndex,
11064  uint32_t frameInUseCount,
11065  VkDeviceSize bufferImageGranularity,
11066  VkDeviceSize allocSize,
11067  VkDeviceSize allocAlignment,
11068  bool upperAddress,
11069  VmaSuballocationType allocType,
11070  bool canMakeOtherLost,
11071  uint32_t strategy,
11072  VmaAllocationRequest* pAllocationRequest)
11073 {
11074  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11075 
11076  // Simple way to respect bufferImageGranularity. May be optimized some day.
11077  // Whenever it might be an OPTIMAL image...
11078  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11079  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11080  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11081  {
11082  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11083  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11084  }
11085 
11086  if(allocSize > m_UsableSize)
11087  {
11088  return false;
11089  }
11090 
11091  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11092  for(uint32_t level = targetLevel + 1; level--; )
11093  {
11094  for(Node* freeNode = m_FreeList[level].front;
11095  freeNode != VMA_NULL;
11096  freeNode = freeNode->free.next)
11097  {
11098  if(freeNode->offset % allocAlignment == 0)
11099  {
11100  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11101  pAllocationRequest->offset = freeNode->offset;
11102  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11103  pAllocationRequest->sumItemSize = 0;
11104  pAllocationRequest->itemsToMakeLostCount = 0;
11105  pAllocationRequest->customData = (void*)(uintptr_t)level;
11106  return true;
11107  }
11108  }
11109  }
11110 
11111  return false;
11112 }
11113 
11114 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11115  uint32_t currentFrameIndex,
11116  uint32_t frameInUseCount,
11117  VmaAllocationRequest* pAllocationRequest)
11118 {
11119  /*
11120  Lost allocations are not supported in buddy allocator at the moment.
11121  Support might be added in the future.
11122  */
11123  return pAllocationRequest->itemsToMakeLostCount == 0;
11124 }
11125 
11126 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11127 {
11128  /*
11129  Lost allocations are not supported in buddy allocator at the moment.
11130  Support might be added in the future.
11131  */
11132  return 0;
11133 }
11134 
11135 void VmaBlockMetadata_Buddy::Alloc(
11136  const VmaAllocationRequest& request,
11137  VmaSuballocationType type,
11138  VkDeviceSize allocSize,
11139  VmaAllocation hAllocation)
11140 {
11141  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11142 
11143  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11144  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11145 
11146  Node* currNode = m_FreeList[currLevel].front;
11147  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11148  while(currNode->offset != request.offset)
11149  {
11150  currNode = currNode->free.next;
11151  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11152  }
11153 
11154  // Go down, splitting free nodes.
11155  while(currLevel < targetLevel)
11156  {
11157  // currNode is already first free node at currLevel.
11158  // Remove it from list of free nodes at this currLevel.
11159  RemoveFromFreeList(currLevel, currNode);
11160 
11161  const uint32_t childrenLevel = currLevel + 1;
11162 
11163  // Create two free sub-nodes.
11164  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11165  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11166 
11167  leftChild->offset = currNode->offset;
11168  leftChild->type = Node::TYPE_FREE;
11169  leftChild->parent = currNode;
11170  leftChild->buddy = rightChild;
11171 
11172  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11173  rightChild->type = Node::TYPE_FREE;
11174  rightChild->parent = currNode;
11175  rightChild->buddy = leftChild;
11176 
11177  // Convert current currNode to split type.
11178  currNode->type = Node::TYPE_SPLIT;
11179  currNode->split.leftChild = leftChild;
11180 
11181  // Add child nodes to free list. Order is important!
11182  AddToFreeListFront(childrenLevel, rightChild);
11183  AddToFreeListFront(childrenLevel, leftChild);
11184 
11185  ++m_FreeCount;
11186  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11187  ++currLevel;
11188  currNode = m_FreeList[currLevel].front;
11189 
11190  /*
11191  We can be sure that currNode, as left child of node previously split,
11192  also fullfills the alignment requirement.
11193  */
11194  }
11195 
11196  // Remove from free list.
11197  VMA_ASSERT(currLevel == targetLevel &&
11198  currNode != VMA_NULL &&
11199  currNode->type == Node::TYPE_FREE);
11200  RemoveFromFreeList(currLevel, currNode);
11201 
11202  // Convert to allocation node.
11203  currNode->type = Node::TYPE_ALLOCATION;
11204  currNode->allocation.alloc = hAllocation;
11205 
11206  ++m_AllocationCount;
11207  --m_FreeCount;
11208  m_SumFreeSize -= allocSize;
11209 }
11210 
11211 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11212 {
11213  if(node->type == Node::TYPE_SPLIT)
11214  {
11215  DeleteNode(node->split.leftChild->buddy);
11216  DeleteNode(node->split.leftChild);
11217  }
11218 
11219  vma_delete(GetAllocationCallbacks(), node);
11220 }
11221 
11222 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11223 {
11224  VMA_VALIDATE(level < m_LevelCount);
11225  VMA_VALIDATE(curr->parent == parent);
11226  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11227  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11228  switch(curr->type)
11229  {
11230  case Node::TYPE_FREE:
11231  // curr->free.prev, next are validated separately.
11232  ctx.calculatedSumFreeSize += levelNodeSize;
11233  ++ctx.calculatedFreeCount;
11234  break;
11235  case Node::TYPE_ALLOCATION:
11236  ++ctx.calculatedAllocationCount;
11237  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11238  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11239  break;
11240  case Node::TYPE_SPLIT:
11241  {
11242  const uint32_t childrenLevel = level + 1;
11243  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11244  const Node* const leftChild = curr->split.leftChild;
11245  VMA_VALIDATE(leftChild != VMA_NULL);
11246  VMA_VALIDATE(leftChild->offset == curr->offset);
11247  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11248  {
11249  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11250  }
11251  const Node* const rightChild = leftChild->buddy;
11252  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11253  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11254  {
11255  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11256  }
11257  }
11258  break;
11259  default:
11260  return false;
11261  }
11262 
11263  return true;
11264 }
11265 
11266 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11267 {
11268  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11269  uint32_t level = 0;
11270  VkDeviceSize currLevelNodeSize = m_UsableSize;
11271  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11272  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11273  {
11274  ++level;
11275  currLevelNodeSize = nextLevelNodeSize;
11276  nextLevelNodeSize = currLevelNodeSize >> 1;
11277  }
11278  return level;
11279 }
11280 
11281 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11282 {
11283  // Find node and level.
11284  Node* node = m_Root;
11285  VkDeviceSize nodeOffset = 0;
11286  uint32_t level = 0;
11287  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11288  while(node->type == Node::TYPE_SPLIT)
11289  {
11290  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11291  if(offset < nodeOffset + nextLevelSize)
11292  {
11293  node = node->split.leftChild;
11294  }
11295  else
11296  {
11297  node = node->split.leftChild->buddy;
11298  nodeOffset += nextLevelSize;
11299  }
11300  ++level;
11301  levelNodeSize = nextLevelSize;
11302  }
11303 
11304  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11305  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11306 
11307  ++m_FreeCount;
11308  --m_AllocationCount;
11309  m_SumFreeSize += alloc->GetSize();
11310 
11311  node->type = Node::TYPE_FREE;
11312 
11313  // Join free nodes if possible.
11314  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11315  {
11316  RemoveFromFreeList(level, node->buddy);
11317  Node* const parent = node->parent;
11318 
11319  vma_delete(GetAllocationCallbacks(), node->buddy);
11320  vma_delete(GetAllocationCallbacks(), node);
11321  parent->type = Node::TYPE_FREE;
11322 
11323  node = parent;
11324  --level;
11325  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11326  --m_FreeCount;
11327  }
11328 
11329  AddToFreeListFront(level, node);
11330 }
11331 
11332 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11333 {
11334  switch(node->type)
11335  {
11336  case Node::TYPE_FREE:
11337  ++outInfo.unusedRangeCount;
11338  outInfo.unusedBytes += levelNodeSize;
11339  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11340  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11341  break;
11342  case Node::TYPE_ALLOCATION:
11343  {
11344  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11345  ++outInfo.allocationCount;
11346  outInfo.usedBytes += allocSize;
11347  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11348  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11349 
11350  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11351  if(unusedRangeSize > 0)
11352  {
11353  ++outInfo.unusedRangeCount;
11354  outInfo.unusedBytes += unusedRangeSize;
11355  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11356  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11357  }
11358  }
11359  break;
11360  case Node::TYPE_SPLIT:
11361  {
11362  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11363  const Node* const leftChild = node->split.leftChild;
11364  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11365  const Node* const rightChild = leftChild->buddy;
11366  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11367  }
11368  break;
11369  default:
11370  VMA_ASSERT(0);
11371  }
11372 }
11373 
11374 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11375 {
11376  VMA_ASSERT(node->type == Node::TYPE_FREE);
11377 
11378  // List is empty.
11379  Node* const frontNode = m_FreeList[level].front;
11380  if(frontNode == VMA_NULL)
11381  {
11382  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11383  node->free.prev = node->free.next = VMA_NULL;
11384  m_FreeList[level].front = m_FreeList[level].back = node;
11385  }
11386  else
11387  {
11388  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11389  node->free.prev = VMA_NULL;
11390  node->free.next = frontNode;
11391  frontNode->free.prev = node;
11392  m_FreeList[level].front = node;
11393  }
11394 }
11395 
11396 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11397 {
11398  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11399 
11400  // It is at the front.
11401  if(node->free.prev == VMA_NULL)
11402  {
11403  VMA_ASSERT(m_FreeList[level].front == node);
11404  m_FreeList[level].front = node->free.next;
11405  }
11406  else
11407  {
11408  Node* const prevFreeNode = node->free.prev;
11409  VMA_ASSERT(prevFreeNode->free.next == node);
11410  prevFreeNode->free.next = node->free.next;
11411  }
11412 
11413  // It is at the back.
11414  if(node->free.next == VMA_NULL)
11415  {
11416  VMA_ASSERT(m_FreeList[level].back == node);
11417  m_FreeList[level].back = node->free.prev;
11418  }
11419  else
11420  {
11421  Node* const nextFreeNode = node->free.next;
11422  VMA_ASSERT(nextFreeNode->free.prev == node);
11423  nextFreeNode->free.prev = node->free.prev;
11424  }
11425 }
11426 
11427 #if VMA_STATS_STRING_ENABLED
11428 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11429 {
11430  switch(node->type)
11431  {
11432  case Node::TYPE_FREE:
11433  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11434  break;
11435  case Node::TYPE_ALLOCATION:
11436  {
11437  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11438  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11439  if(allocSize < levelNodeSize)
11440  {
11441  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11442  }
11443  }
11444  break;
11445  case Node::TYPE_SPLIT:
11446  {
11447  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11448  const Node* const leftChild = node->split.leftChild;
11449  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11450  const Node* const rightChild = leftChild->buddy;
11451  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11452  }
11453  break;
11454  default:
11455  VMA_ASSERT(0);
11456  }
11457 }
11458 #endif // #if VMA_STATS_STRING_ENABLED
11459 
11460 
11462 // class VmaDeviceMemoryBlock
11463 
11464 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11465  m_pMetadata(VMA_NULL),
11466  m_MemoryTypeIndex(UINT32_MAX),
11467  m_Id(0),
11468  m_hMemory(VK_NULL_HANDLE),
11469  m_MapCount(0),
11470  m_pMappedData(VMA_NULL)
11471 {
11472 }
11473 
11474 void VmaDeviceMemoryBlock::Init(
11475  VmaAllocator hAllocator,
11476  VmaPool hParentPool,
11477  uint32_t newMemoryTypeIndex,
11478  VkDeviceMemory newMemory,
11479  VkDeviceSize newSize,
11480  uint32_t id,
11481  uint32_t algorithm)
11482 {
11483  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11484 
11485  m_hParentPool = hParentPool;
11486  m_MemoryTypeIndex = newMemoryTypeIndex;
11487  m_Id = id;
11488  m_hMemory = newMemory;
11489 
11490  switch(algorithm)
11491  {
11493  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11494  break;
11496  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11497  break;
11498  default:
11499  VMA_ASSERT(0);
11500  // Fall-through.
11501  case 0:
11502  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11503  }
11504  m_pMetadata->Init(newSize);
11505 }
11506 
11507 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11508 {
11509  // This is the most important assert in the entire library.
11510  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11511  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11512 
11513  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11514  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11515  m_hMemory = VK_NULL_HANDLE;
11516 
11517  vma_delete(allocator, m_pMetadata);
11518  m_pMetadata = VMA_NULL;
11519 }
11520 
11521 bool VmaDeviceMemoryBlock::Validate() const
11522 {
11523  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11524  (m_pMetadata->GetSize() != 0));
11525 
11526  return m_pMetadata->Validate();
11527 }
11528 
11529 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11530 {
11531  void* pData = nullptr;
11532  VkResult res = Map(hAllocator, 1, &pData);
11533  if(res != VK_SUCCESS)
11534  {
11535  return res;
11536  }
11537 
11538  res = m_pMetadata->CheckCorruption(pData);
11539 
11540  Unmap(hAllocator, 1);
11541 
11542  return res;
11543 }
11544 
11545 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11546 {
11547  if(count == 0)
11548  {
11549  return VK_SUCCESS;
11550  }
11551 
11552  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11553  if(m_MapCount != 0)
11554  {
11555  m_MapCount += count;
11556  VMA_ASSERT(m_pMappedData != VMA_NULL);
11557  if(ppData != VMA_NULL)
11558  {
11559  *ppData = m_pMappedData;
11560  }
11561  return VK_SUCCESS;
11562  }
11563  else
11564  {
11565  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11566  hAllocator->m_hDevice,
11567  m_hMemory,
11568  0, // offset
11569  VK_WHOLE_SIZE,
11570  0, // flags
11571  &m_pMappedData);
11572  if(result == VK_SUCCESS)
11573  {
11574  if(ppData != VMA_NULL)
11575  {
11576  *ppData = m_pMappedData;
11577  }
11578  m_MapCount = count;
11579  }
11580  return result;
11581  }
11582 }
11583 
11584 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11585 {
11586  if(count == 0)
11587  {
11588  return;
11589  }
11590 
11591  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11592  if(m_MapCount >= count)
11593  {
11594  m_MapCount -= count;
11595  if(m_MapCount == 0)
11596  {
11597  m_pMappedData = VMA_NULL;
11598  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11599  }
11600  }
11601  else
11602  {
11603  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11604  }
11605 }
11606 
11607 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11608 {
11609  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11610  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11611 
11612  void* pData;
11613  VkResult res = Map(hAllocator, 1, &pData);
11614  if(res != VK_SUCCESS)
11615  {
11616  return res;
11617  }
11618 
11619  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11620  VmaWriteMagicValue(pData, allocOffset + allocSize);
11621 
11622  Unmap(hAllocator, 1);
11623 
11624  return VK_SUCCESS;
11625 }
11626 
11627 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11628 {
11629  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11630  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11631 
11632  void* pData;
11633  VkResult res = Map(hAllocator, 1, &pData);
11634  if(res != VK_SUCCESS)
11635  {
11636  return res;
11637  }
11638 
11639  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11640  {
11641  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11642  }
11643  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11644  {
11645  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11646  }
11647 
11648  Unmap(hAllocator, 1);
11649 
11650  return VK_SUCCESS;
11651 }
11652 
11653 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11654  const VmaAllocator hAllocator,
11655  const VmaAllocation hAllocation,
11656  VkDeviceSize allocationLocalOffset,
11657  VkBuffer hBuffer,
11658  const void* pNext)
11659 {
11660  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11661  hAllocation->GetBlock() == this);
11662  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11663  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11664  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11665  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11666  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11667  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11668 }
11669 
11670 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11671  const VmaAllocator hAllocator,
11672  const VmaAllocation hAllocation,
11673  VkDeviceSize allocationLocalOffset,
11674  VkImage hImage,
11675  const void* pNext)
11676 {
11677  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11678  hAllocation->GetBlock() == this);
11679  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11680  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11681  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11682  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11683  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11684  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11685 }
11686 
11687 static void InitStatInfo(VmaStatInfo& outInfo)
11688 {
11689  memset(&outInfo, 0, sizeof(outInfo));
11690  outInfo.allocationSizeMin = UINT64_MAX;
11691  outInfo.unusedRangeSizeMin = UINT64_MAX;
11692 }
11693 
11694 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11695 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11696 {
11697  inoutInfo.blockCount += srcInfo.blockCount;
11698  inoutInfo.allocationCount += srcInfo.allocationCount;
11699  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11700  inoutInfo.usedBytes += srcInfo.usedBytes;
11701  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11702  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11703  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11704  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11705  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11706 }
11707 
11708 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11709 {
11710  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11711  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11712  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11713  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11714 }
11715 
11716 VmaPool_T::VmaPool_T(
11717  VmaAllocator hAllocator,
11718  const VmaPoolCreateInfo& createInfo,
11719  VkDeviceSize preferredBlockSize) :
11720  m_BlockVector(
11721  hAllocator,
11722  this, // hParentPool
11723  createInfo.memoryTypeIndex,
11724  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11725  createInfo.minBlockCount,
11726  createInfo.maxBlockCount,
11727  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11728  createInfo.frameInUseCount,
11729  createInfo.blockSize != 0, // explicitBlockSize
11730  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11731  m_Id(0),
11732  m_Name(VMA_NULL)
11733 {
11734 }
11735 
11736 VmaPool_T::~VmaPool_T()
11737 {
11738 }
11739 
11740 void VmaPool_T::SetName(const char* pName)
11741 {
11742  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
11743  VmaFreeString(allocs, m_Name);
11744 
11745  if(pName != VMA_NULL)
11746  {
11747  m_Name = VmaCreateStringCopy(allocs, pName);
11748  }
11749  else
11750  {
11751  m_Name = VMA_NULL;
11752  }
11753 }
11754 
11755 #if VMA_STATS_STRING_ENABLED
11756 
11757 #endif // #if VMA_STATS_STRING_ENABLED
11758 
11759 VmaBlockVector::VmaBlockVector(
11760  VmaAllocator hAllocator,
11761  VmaPool hParentPool,
11762  uint32_t memoryTypeIndex,
11763  VkDeviceSize preferredBlockSize,
11764  size_t minBlockCount,
11765  size_t maxBlockCount,
11766  VkDeviceSize bufferImageGranularity,
11767  uint32_t frameInUseCount,
11768  bool explicitBlockSize,
11769  uint32_t algorithm) :
11770  m_hAllocator(hAllocator),
11771  m_hParentPool(hParentPool),
11772  m_MemoryTypeIndex(memoryTypeIndex),
11773  m_PreferredBlockSize(preferredBlockSize),
11774  m_MinBlockCount(minBlockCount),
11775  m_MaxBlockCount(maxBlockCount),
11776  m_BufferImageGranularity(bufferImageGranularity),
11777  m_FrameInUseCount(frameInUseCount),
11778  m_ExplicitBlockSize(explicitBlockSize),
11779  m_Algorithm(algorithm),
11780  m_HasEmptyBlock(false),
11781  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11782  m_NextBlockId(0)
11783 {
11784 }
11785 
11786 VmaBlockVector::~VmaBlockVector()
11787 {
11788  for(size_t i = m_Blocks.size(); i--; )
11789  {
11790  m_Blocks[i]->Destroy(m_hAllocator);
11791  vma_delete(m_hAllocator, m_Blocks[i]);
11792  }
11793 }
11794 
11795 VkResult VmaBlockVector::CreateMinBlocks()
11796 {
11797  for(size_t i = 0; i < m_MinBlockCount; ++i)
11798  {
11799  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11800  if(res != VK_SUCCESS)
11801  {
11802  return res;
11803  }
11804  }
11805  return VK_SUCCESS;
11806 }
11807 
11808 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11809 {
11810  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11811 
11812  const size_t blockCount = m_Blocks.size();
11813 
11814  pStats->size = 0;
11815  pStats->unusedSize = 0;
11816  pStats->allocationCount = 0;
11817  pStats->unusedRangeCount = 0;
11818  pStats->unusedRangeSizeMax = 0;
11819  pStats->blockCount = blockCount;
11820 
11821  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11822  {
11823  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11824  VMA_ASSERT(pBlock);
11825  VMA_HEAVY_ASSERT(pBlock->Validate());
11826  pBlock->m_pMetadata->AddPoolStats(*pStats);
11827  }
11828 }
11829 
11830 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11831 {
11832  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11833  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11834  (VMA_DEBUG_MARGIN > 0) &&
11835  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11836  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11837 }
11838 
11839 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11840 
11841 VkResult VmaBlockVector::Allocate(
11842  uint32_t currentFrameIndex,
11843  VkDeviceSize size,
11844  VkDeviceSize alignment,
11845  const VmaAllocationCreateInfo& createInfo,
11846  VmaSuballocationType suballocType,
11847  size_t allocationCount,
11848  VmaAllocation* pAllocations)
11849 {
11850  size_t allocIndex;
11851  VkResult res = VK_SUCCESS;
11852 
11853  if(IsCorruptionDetectionEnabled())
11854  {
11855  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11856  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11857  }
11858 
11859  {
11860  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11861  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11862  {
11863  res = AllocatePage(
11864  currentFrameIndex,
11865  size,
11866  alignment,
11867  createInfo,
11868  suballocType,
11869  pAllocations + allocIndex);
11870  if(res != VK_SUCCESS)
11871  {
11872  break;
11873  }
11874  }
11875  }
11876 
11877  if(res != VK_SUCCESS)
11878  {
11879  // Free all already created allocations.
11880  while(allocIndex--)
11881  {
11882  Free(pAllocations[allocIndex]);
11883  }
11884  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11885  }
11886 
11887  return res;
11888 }
11889 
11890 VkResult VmaBlockVector::AllocatePage(
11891  uint32_t currentFrameIndex,
11892  VkDeviceSize size,
11893  VkDeviceSize alignment,
11894  const VmaAllocationCreateInfo& createInfo,
11895  VmaSuballocationType suballocType,
11896  VmaAllocation* pAllocation)
11897 {
11898  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11899  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11900  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11901  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11902 
11903  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
11904  VkDeviceSize freeMemory;
11905  {
11906  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
11907  VmaBudget heapBudget = {};
11908  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
11909  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
11910  }
11911 
11912  const bool canFallbackToDedicated = !IsCustomPool();
11913  const bool canCreateNewBlock =
11914  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11915  (m_Blocks.size() < m_MaxBlockCount) &&
11916  (freeMemory >= size || !canFallbackToDedicated);
11917  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11918 
11919  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11920  // Which in turn is available only when maxBlockCount = 1.
11921  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11922  {
11923  canMakeOtherLost = false;
11924  }
11925 
11926  // Upper address can only be used with linear allocator and within single memory block.
11927  if(isUpperAddress &&
11928  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11929  {
11930  return VK_ERROR_FEATURE_NOT_PRESENT;
11931  }
11932 
11933  // Validate strategy.
11934  switch(strategy)
11935  {
11936  case 0:
11938  break;
11942  break;
11943  default:
11944  return VK_ERROR_FEATURE_NOT_PRESENT;
11945  }
11946 
11947  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11948  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11949  {
11950  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11951  }
11952 
11953  /*
11954  Under certain condition, this whole section can be skipped for optimization, so
11955  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11956  e.g. for custom pools with linear algorithm.
11957  */
11958  if(!canMakeOtherLost || canCreateNewBlock)
11959  {
11960  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11961  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11963 
11964  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11965  {
11966  // Use only last block.
11967  if(!m_Blocks.empty())
11968  {
11969  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11970  VMA_ASSERT(pCurrBlock);
11971  VkResult res = AllocateFromBlock(
11972  pCurrBlock,
11973  currentFrameIndex,
11974  size,
11975  alignment,
11976  allocFlagsCopy,
11977  createInfo.pUserData,
11978  suballocType,
11979  strategy,
11980  pAllocation);
11981  if(res == VK_SUCCESS)
11982  {
11983  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
11984  return VK_SUCCESS;
11985  }
11986  }
11987  }
11988  else
11989  {
11991  {
11992  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11993  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11994  {
11995  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11996  VMA_ASSERT(pCurrBlock);
11997  VkResult res = AllocateFromBlock(
11998  pCurrBlock,
11999  currentFrameIndex,
12000  size,
12001  alignment,
12002  allocFlagsCopy,
12003  createInfo.pUserData,
12004  suballocType,
12005  strategy,
12006  pAllocation);
12007  if(res == VK_SUCCESS)
12008  {
12009  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12010  return VK_SUCCESS;
12011  }
12012  }
12013  }
12014  else // WORST_FIT, FIRST_FIT
12015  {
12016  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12017  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12018  {
12019  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12020  VMA_ASSERT(pCurrBlock);
12021  VkResult res = AllocateFromBlock(
12022  pCurrBlock,
12023  currentFrameIndex,
12024  size,
12025  alignment,
12026  allocFlagsCopy,
12027  createInfo.pUserData,
12028  suballocType,
12029  strategy,
12030  pAllocation);
12031  if(res == VK_SUCCESS)
12032  {
12033  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12034  return VK_SUCCESS;
12035  }
12036  }
12037  }
12038  }
12039 
12040  // 2. Try to create new block.
12041  if(canCreateNewBlock)
12042  {
12043  // Calculate optimal size for new block.
12044  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12045  uint32_t newBlockSizeShift = 0;
12046  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12047 
12048  if(!m_ExplicitBlockSize)
12049  {
12050  // Allocate 1/8, 1/4, 1/2 as first blocks.
12051  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12052  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12053  {
12054  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12055  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12056  {
12057  newBlockSize = smallerNewBlockSize;
12058  ++newBlockSizeShift;
12059  }
12060  else
12061  {
12062  break;
12063  }
12064  }
12065  }
12066 
12067  size_t newBlockIndex = 0;
12068  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12069  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12070  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12071  if(!m_ExplicitBlockSize)
12072  {
12073  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12074  {
12075  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12076  if(smallerNewBlockSize >= size)
12077  {
12078  newBlockSize = smallerNewBlockSize;
12079  ++newBlockSizeShift;
12080  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12081  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12082  }
12083  else
12084  {
12085  break;
12086  }
12087  }
12088  }
12089 
12090  if(res == VK_SUCCESS)
12091  {
12092  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12093  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12094 
12095  res = AllocateFromBlock(
12096  pBlock,
12097  currentFrameIndex,
12098  size,
12099  alignment,
12100  allocFlagsCopy,
12101  createInfo.pUserData,
12102  suballocType,
12103  strategy,
12104  pAllocation);
12105  if(res == VK_SUCCESS)
12106  {
12107  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12108  return VK_SUCCESS;
12109  }
12110  else
12111  {
12112  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12113  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12114  }
12115  }
12116  }
12117  }
12118 
12119  // 3. Try to allocate from existing blocks with making other allocations lost.
12120  if(canMakeOtherLost)
12121  {
12122  uint32_t tryIndex = 0;
12123  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12124  {
12125  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12126  VmaAllocationRequest bestRequest = {};
12127  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12128 
12129  // 1. Search existing allocations.
12131  {
12132  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12133  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12134  {
12135  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12136  VMA_ASSERT(pCurrBlock);
12137  VmaAllocationRequest currRequest = {};
12138  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12139  currentFrameIndex,
12140  m_FrameInUseCount,
12141  m_BufferImageGranularity,
12142  size,
12143  alignment,
12144  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12145  suballocType,
12146  canMakeOtherLost,
12147  strategy,
12148  &currRequest))
12149  {
12150  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12151  if(pBestRequestBlock == VMA_NULL ||
12152  currRequestCost < bestRequestCost)
12153  {
12154  pBestRequestBlock = pCurrBlock;
12155  bestRequest = currRequest;
12156  bestRequestCost = currRequestCost;
12157 
12158  if(bestRequestCost == 0)
12159  {
12160  break;
12161  }
12162  }
12163  }
12164  }
12165  }
12166  else // WORST_FIT, FIRST_FIT
12167  {
12168  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12169  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12170  {
12171  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12172  VMA_ASSERT(pCurrBlock);
12173  VmaAllocationRequest currRequest = {};
12174  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12175  currentFrameIndex,
12176  m_FrameInUseCount,
12177  m_BufferImageGranularity,
12178  size,
12179  alignment,
12180  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12181  suballocType,
12182  canMakeOtherLost,
12183  strategy,
12184  &currRequest))
12185  {
12186  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12187  if(pBestRequestBlock == VMA_NULL ||
12188  currRequestCost < bestRequestCost ||
12190  {
12191  pBestRequestBlock = pCurrBlock;
12192  bestRequest = currRequest;
12193  bestRequestCost = currRequestCost;
12194 
12195  if(bestRequestCost == 0 ||
12197  {
12198  break;
12199  }
12200  }
12201  }
12202  }
12203  }
12204 
12205  if(pBestRequestBlock != VMA_NULL)
12206  {
12207  if(mapped)
12208  {
12209  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12210  if(res != VK_SUCCESS)
12211  {
12212  return res;
12213  }
12214  }
12215 
12216  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12217  currentFrameIndex,
12218  m_FrameInUseCount,
12219  &bestRequest))
12220  {
12221  // Allocate from this pBlock.
12222  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12223  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12224  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12225  UpdateHasEmptyBlock();
12226  (*pAllocation)->InitBlockAllocation(
12227  pBestRequestBlock,
12228  bestRequest.offset,
12229  alignment,
12230  size,
12231  m_MemoryTypeIndex,
12232  suballocType,
12233  mapped,
12234  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12235  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12236  VMA_DEBUG_LOG(" Returned from existing block");
12237  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12238  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12239  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12240  {
12241  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12242  }
12243  if(IsCorruptionDetectionEnabled())
12244  {
12245  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12246  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12247  }
12248  return VK_SUCCESS;
12249  }
12250  // else: Some allocations must have been touched while we are here. Next try.
12251  }
12252  else
12253  {
12254  // Could not find place in any of the blocks - break outer loop.
12255  break;
12256  }
12257  }
12258  /* Maximum number of tries exceeded - a very unlike event when many other
12259  threads are simultaneously touching allocations making it impossible to make
12260  lost at the same time as we try to allocate. */
12261  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12262  {
12263  return VK_ERROR_TOO_MANY_OBJECTS;
12264  }
12265  }
12266 
12267  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12268 }
12269 
12270 void VmaBlockVector::Free(
12271  const VmaAllocation hAllocation)
12272 {
12273  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12274 
12275  bool budgetExceeded = false;
12276  {
12277  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12278  VmaBudget heapBudget = {};
12279  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12280  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12281  }
12282 
12283  // Scope for lock.
12284  {
12285  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12286 
12287  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12288 
12289  if(IsCorruptionDetectionEnabled())
12290  {
12291  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12292  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12293  }
12294 
12295  if(hAllocation->IsPersistentMap())
12296  {
12297  pBlock->Unmap(m_hAllocator, 1);
12298  }
12299 
12300  pBlock->m_pMetadata->Free(hAllocation);
12301  VMA_HEAVY_ASSERT(pBlock->Validate());
12302 
12303  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12304 
12305  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12306  // pBlock became empty after this deallocation.
12307  if(pBlock->m_pMetadata->IsEmpty())
12308  {
12309  // Already has empty block. We don't want to have two, so delete this one.
12310  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12311  {
12312  pBlockToDelete = pBlock;
12313  Remove(pBlock);
12314  }
12315  // else: We now have an empty block - leave it.
12316  }
12317  // pBlock didn't become empty, but we have another empty block - find and free that one.
12318  // (This is optional, heuristics.)
12319  else if(m_HasEmptyBlock && canDeleteBlock)
12320  {
12321  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12322  if(pLastBlock->m_pMetadata->IsEmpty())
12323  {
12324  pBlockToDelete = pLastBlock;
12325  m_Blocks.pop_back();
12326  }
12327  }
12328 
12329  UpdateHasEmptyBlock();
12330  IncrementallySortBlocks();
12331  }
12332 
12333  // Destruction of a free block. Deferred until this point, outside of mutex
12334  // lock, for performance reason.
12335  if(pBlockToDelete != VMA_NULL)
12336  {
12337  VMA_DEBUG_LOG(" Deleted empty block");
12338  pBlockToDelete->Destroy(m_hAllocator);
12339  vma_delete(m_hAllocator, pBlockToDelete);
12340  }
12341 }
12342 
12343 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12344 {
12345  VkDeviceSize result = 0;
12346  for(size_t i = m_Blocks.size(); i--; )
12347  {
12348  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12349  if(result >= m_PreferredBlockSize)
12350  {
12351  break;
12352  }
12353  }
12354  return result;
12355 }
12356 
12357 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12358 {
12359  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12360  {
12361  if(m_Blocks[blockIndex] == pBlock)
12362  {
12363  VmaVectorRemove(m_Blocks, blockIndex);
12364  return;
12365  }
12366  }
12367  VMA_ASSERT(0);
12368 }
12369 
12370 void VmaBlockVector::IncrementallySortBlocks()
12371 {
12372  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12373  {
12374  // Bubble sort only until first swap.
12375  for(size_t i = 1; i < m_Blocks.size(); ++i)
12376  {
12377  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12378  {
12379  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12380  return;
12381  }
12382  }
12383  }
12384 }
12385 
12386 VkResult VmaBlockVector::AllocateFromBlock(
12387  VmaDeviceMemoryBlock* pBlock,
12388  uint32_t currentFrameIndex,
12389  VkDeviceSize size,
12390  VkDeviceSize alignment,
12391  VmaAllocationCreateFlags allocFlags,
12392  void* pUserData,
12393  VmaSuballocationType suballocType,
12394  uint32_t strategy,
12395  VmaAllocation* pAllocation)
12396 {
12397  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12398  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12399  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12400  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12401 
12402  VmaAllocationRequest currRequest = {};
12403  if(pBlock->m_pMetadata->CreateAllocationRequest(
12404  currentFrameIndex,
12405  m_FrameInUseCount,
12406  m_BufferImageGranularity,
12407  size,
12408  alignment,
12409  isUpperAddress,
12410  suballocType,
12411  false, // canMakeOtherLost
12412  strategy,
12413  &currRequest))
12414  {
12415  // Allocate from pCurrBlock.
12416  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12417 
12418  if(mapped)
12419  {
12420  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12421  if(res != VK_SUCCESS)
12422  {
12423  return res;
12424  }
12425  }
12426 
12427  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12428  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12429  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12430  UpdateHasEmptyBlock();
12431  (*pAllocation)->InitBlockAllocation(
12432  pBlock,
12433  currRequest.offset,
12434  alignment,
12435  size,
12436  m_MemoryTypeIndex,
12437  suballocType,
12438  mapped,
12439  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12440  VMA_HEAVY_ASSERT(pBlock->Validate());
12441  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12442  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12444  {
12445  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12446  }
12447  if(IsCorruptionDetectionEnabled())
12448  {
12449  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12450  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12451  }
12452  return VK_SUCCESS;
12453  }
12454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12455 }
12456 
12457 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12458 {
12459  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12460  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12461  allocInfo.allocationSize = blockSize;
12462  VkDeviceMemory mem = VK_NULL_HANDLE;
12463  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12464  if(res < 0)
12465  {
12466  return res;
12467  }
12468 
12469  // New VkDeviceMemory successfully created.
12470 
12471  // Create new Allocation for it.
12472  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12473  pBlock->Init(
12474  m_hAllocator,
12475  m_hParentPool,
12476  m_MemoryTypeIndex,
12477  mem,
12478  allocInfo.allocationSize,
12479  m_NextBlockId++,
12480  m_Algorithm);
12481 
12482  m_Blocks.push_back(pBlock);
12483  if(pNewBlockIndex != VMA_NULL)
12484  {
12485  *pNewBlockIndex = m_Blocks.size() - 1;
12486  }
12487 
12488  return VK_SUCCESS;
12489 }
12490 
12491 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12492  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12493  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12494 {
12495  const size_t blockCount = m_Blocks.size();
12496  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12497 
12498  enum BLOCK_FLAG
12499  {
12500  BLOCK_FLAG_USED = 0x00000001,
12501  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12502  };
12503 
12504  struct BlockInfo
12505  {
12506  uint32_t flags;
12507  void* pMappedData;
12508  };
12509  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12510  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12511  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12512 
12513  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12514  const size_t moveCount = moves.size();
12515  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12516  {
12517  const VmaDefragmentationMove& move = moves[moveIndex];
12518  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12519  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12520  }
12521 
12522  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12523 
12524  // Go over all blocks. Get mapped pointer or map if necessary.
12525  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12526  {
12527  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12528  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12529  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12530  {
12531  currBlockInfo.pMappedData = pBlock->GetMappedData();
12532  // It is not originally mapped - map it.
12533  if(currBlockInfo.pMappedData == VMA_NULL)
12534  {
12535  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12536  if(pDefragCtx->res == VK_SUCCESS)
12537  {
12538  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12539  }
12540  }
12541  }
12542  }
12543 
12544  // Go over all moves. Do actual data transfer.
12545  if(pDefragCtx->res == VK_SUCCESS)
12546  {
12547  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12548  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12549 
12550  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12551  {
12552  const VmaDefragmentationMove& move = moves[moveIndex];
12553 
12554  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12555  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12556 
12557  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12558 
12559  // Invalidate source.
12560  if(isNonCoherent)
12561  {
12562  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12563  memRange.memory = pSrcBlock->GetDeviceMemory();
12564  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12565  memRange.size = VMA_MIN(
12566  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12567  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12568  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12569  }
12570 
12571  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12572  memmove(
12573  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12574  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12575  static_cast<size_t>(move.size));
12576 
12577  if(IsCorruptionDetectionEnabled())
12578  {
12579  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12580  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12581  }
12582 
12583  // Flush destination.
12584  if(isNonCoherent)
12585  {
12586  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12587  memRange.memory = pDstBlock->GetDeviceMemory();
12588  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12589  memRange.size = VMA_MIN(
12590  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12591  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12592  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12593  }
12594  }
12595  }
12596 
12597  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12598  // Regardless of pCtx->res == VK_SUCCESS.
12599  for(size_t blockIndex = blockCount; blockIndex--; )
12600  {
12601  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12602  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12603  {
12604  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12605  pBlock->Unmap(m_hAllocator, 1);
12606  }
12607  }
12608 }
12609 
12610 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12611  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12612  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12613  VkCommandBuffer commandBuffer)
12614 {
12615  const size_t blockCount = m_Blocks.size();
12616 
12617  pDefragCtx->blockContexts.resize(blockCount);
12618  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12619 
12620  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12621  const size_t moveCount = moves.size();
12622  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12623  {
12624  const VmaDefragmentationMove& move = moves[moveIndex];
12625  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12626  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12627  }
12628 
12629  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12630 
12631  // Go over all blocks. Create and bind buffer for whole block if necessary.
12632  {
12633  VkBufferCreateInfo bufCreateInfo;
12634  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12635 
12636  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12637  {
12638  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12639  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12640  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12641  {
12642  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12643  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12644  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12645  if(pDefragCtx->res == VK_SUCCESS)
12646  {
12647  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12648  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12649  }
12650  }
12651  }
12652  }
12653 
12654  // Go over all moves. Post data transfer commands to command buffer.
12655  if(pDefragCtx->res == VK_SUCCESS)
12656  {
12657  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12658  {
12659  const VmaDefragmentationMove& move = moves[moveIndex];
12660 
12661  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12662  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12663 
12664  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12665 
12666  VkBufferCopy region = {
12667  move.srcOffset,
12668  move.dstOffset,
12669  move.size };
12670  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12671  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12672  }
12673  }
12674 
12675  // Save buffers to defrag context for later destruction.
12676  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12677  {
12678  pDefragCtx->res = VK_NOT_READY;
12679  }
12680 }
12681 
12682 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12683 {
12684  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12685  {
12686  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12687  if(pBlock->m_pMetadata->IsEmpty())
12688  {
12689  if(m_Blocks.size() > m_MinBlockCount)
12690  {
12691  if(pDefragmentationStats != VMA_NULL)
12692  {
12693  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12694  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12695  }
12696 
12697  VmaVectorRemove(m_Blocks, blockIndex);
12698  pBlock->Destroy(m_hAllocator);
12699  vma_delete(m_hAllocator, pBlock);
12700  }
12701  else
12702  {
12703  break;
12704  }
12705  }
12706  }
12707  UpdateHasEmptyBlock();
12708 }
12709 
12710 void VmaBlockVector::UpdateHasEmptyBlock()
12711 {
12712  m_HasEmptyBlock = false;
12713  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12714  {
12715  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12716  if(pBlock->m_pMetadata->IsEmpty())
12717  {
12718  m_HasEmptyBlock = true;
12719  break;
12720  }
12721  }
12722 }
12723 
12724 #if VMA_STATS_STRING_ENABLED
12725 
12726 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12727 {
12728  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12729 
12730  json.BeginObject();
12731 
12732  if(IsCustomPool())
12733  {
12734  const char* poolName = m_hParentPool->GetName();
12735  if(poolName != VMA_NULL && poolName[0] != '\0')
12736  {
12737  json.WriteString("Name");
12738  json.WriteString(poolName);
12739  }
12740 
12741  json.WriteString("MemoryTypeIndex");
12742  json.WriteNumber(m_MemoryTypeIndex);
12743 
12744  json.WriteString("BlockSize");
12745  json.WriteNumber(m_PreferredBlockSize);
12746 
12747  json.WriteString("BlockCount");
12748  json.BeginObject(true);
12749  if(m_MinBlockCount > 0)
12750  {
12751  json.WriteString("Min");
12752  json.WriteNumber((uint64_t)m_MinBlockCount);
12753  }
12754  if(m_MaxBlockCount < SIZE_MAX)
12755  {
12756  json.WriteString("Max");
12757  json.WriteNumber((uint64_t)m_MaxBlockCount);
12758  }
12759  json.WriteString("Cur");
12760  json.WriteNumber((uint64_t)m_Blocks.size());
12761  json.EndObject();
12762 
12763  if(m_FrameInUseCount > 0)
12764  {
12765  json.WriteString("FrameInUseCount");
12766  json.WriteNumber(m_FrameInUseCount);
12767  }
12768 
12769  if(m_Algorithm != 0)
12770  {
12771  json.WriteString("Algorithm");
12772  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12773  }
12774  }
12775  else
12776  {
12777  json.WriteString("PreferredBlockSize");
12778  json.WriteNumber(m_PreferredBlockSize);
12779  }
12780 
12781  json.WriteString("Blocks");
12782  json.BeginObject();
12783  for(size_t i = 0; i < m_Blocks.size(); ++i)
12784  {
12785  json.BeginString();
12786  json.ContinueString(m_Blocks[i]->GetId());
12787  json.EndString();
12788 
12789  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12790  }
12791  json.EndObject();
12792 
12793  json.EndObject();
12794 }
12795 
12796 #endif // #if VMA_STATS_STRING_ENABLED
12797 
12798 void VmaBlockVector::Defragment(
12799  class VmaBlockVectorDefragmentationContext* pCtx,
12800  VmaDefragmentationStats* pStats,
12801  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12802  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12803  VkCommandBuffer commandBuffer)
12804 {
12805  pCtx->res = VK_SUCCESS;
12806 
12807  const VkMemoryPropertyFlags memPropFlags =
12808  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12809  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12810 
12811  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12812  isHostVisible;
12813  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12814  !IsCorruptionDetectionEnabled() &&
12815  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12816 
12817  // There are options to defragment this memory type.
12818  if(canDefragmentOnCpu || canDefragmentOnGpu)
12819  {
12820  bool defragmentOnGpu;
12821  // There is only one option to defragment this memory type.
12822  if(canDefragmentOnGpu != canDefragmentOnCpu)
12823  {
12824  defragmentOnGpu = canDefragmentOnGpu;
12825  }
12826  // Both options are available: Heuristics to choose the best one.
12827  else
12828  {
12829  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12830  m_hAllocator->IsIntegratedGpu();
12831  }
12832 
12833  bool overlappingMoveSupported = !defragmentOnGpu;
12834 
12835  if(m_hAllocator->m_UseMutex)
12836  {
12837  m_Mutex.LockWrite();
12838  pCtx->mutexLocked = true;
12839  }
12840 
12841  pCtx->Begin(overlappingMoveSupported);
12842 
12843  // Defragment.
12844 
12845  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12846  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12847  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12848  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12849  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12850 
12851  // Accumulate statistics.
12852  if(pStats != VMA_NULL)
12853  {
12854  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12855  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12856  pStats->bytesMoved += bytesMoved;
12857  pStats->allocationsMoved += allocationsMoved;
12858  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12859  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12860  if(defragmentOnGpu)
12861  {
12862  maxGpuBytesToMove -= bytesMoved;
12863  maxGpuAllocationsToMove -= allocationsMoved;
12864  }
12865  else
12866  {
12867  maxCpuBytesToMove -= bytesMoved;
12868  maxCpuAllocationsToMove -= allocationsMoved;
12869  }
12870  }
12871 
12872  if(pCtx->res >= VK_SUCCESS)
12873  {
12874  if(defragmentOnGpu)
12875  {
12876  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12877  }
12878  else
12879  {
12880  ApplyDefragmentationMovesCpu(pCtx, moves);
12881  }
12882  }
12883  }
12884 }
12885 
12886 void VmaBlockVector::DefragmentationEnd(
12887  class VmaBlockVectorDefragmentationContext* pCtx,
12888  VmaDefragmentationStats* pStats)
12889 {
12890  // Destroy buffers.
12891  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12892  {
12893  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12894  if(blockCtx.hBuffer)
12895  {
12896  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12897  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12898  }
12899  }
12900 
12901  if(pCtx->res >= VK_SUCCESS)
12902  {
12903  FreeEmptyBlocks(pStats);
12904  }
12905 
12906  if(pCtx->mutexLocked)
12907  {
12908  VMA_ASSERT(m_hAllocator->m_UseMutex);
12909  m_Mutex.UnlockWrite();
12910  }
12911 }
12912 
12913 size_t VmaBlockVector::CalcAllocationCount() const
12914 {
12915  size_t result = 0;
12916  for(size_t i = 0; i < m_Blocks.size(); ++i)
12917  {
12918  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12919  }
12920  return result;
12921 }
12922 
12923 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12924 {
12925  if(m_BufferImageGranularity == 1)
12926  {
12927  return false;
12928  }
12929  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12930  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12931  {
12932  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12933  VMA_ASSERT(m_Algorithm == 0);
12934  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12935  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12936  {
12937  return true;
12938  }
12939  }
12940  return false;
12941 }
12942 
12943 void VmaBlockVector::MakePoolAllocationsLost(
12944  uint32_t currentFrameIndex,
12945  size_t* pLostAllocationCount)
12946 {
12947  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12948  size_t lostAllocationCount = 0;
12949  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12950  {
12951  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12952  VMA_ASSERT(pBlock);
12953  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12954  }
12955  if(pLostAllocationCount != VMA_NULL)
12956  {
12957  *pLostAllocationCount = lostAllocationCount;
12958  }
12959 }
12960 
12961 VkResult VmaBlockVector::CheckCorruption()
12962 {
12963  if(!IsCorruptionDetectionEnabled())
12964  {
12965  return VK_ERROR_FEATURE_NOT_PRESENT;
12966  }
12967 
12968  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12969  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12970  {
12971  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12972  VMA_ASSERT(pBlock);
12973  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12974  if(res != VK_SUCCESS)
12975  {
12976  return res;
12977  }
12978  }
12979  return VK_SUCCESS;
12980 }
12981 
12982 void VmaBlockVector::AddStats(VmaStats* pStats)
12983 {
12984  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12985  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12986 
12987  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12988 
12989  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12990  {
12991  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12992  VMA_ASSERT(pBlock);
12993  VMA_HEAVY_ASSERT(pBlock->Validate());
12994  VmaStatInfo allocationStatInfo;
12995  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12996  VmaAddStatInfo(pStats->total, allocationStatInfo);
12997  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12998  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12999  }
13000 }
13001 
13003 // VmaDefragmentationAlgorithm_Generic members definition
13004 
13005 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13006  VmaAllocator hAllocator,
13007  VmaBlockVector* pBlockVector,
13008  uint32_t currentFrameIndex,
13009  bool overlappingMoveSupported) :
13010  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13011  m_AllocationCount(0),
13012  m_AllAllocations(false),
13013  m_BytesMoved(0),
13014  m_AllocationsMoved(0),
13015  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13016 {
13017  // Create block info for each block.
13018  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13019  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13020  {
13021  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13022  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13023  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13024  m_Blocks.push_back(pBlockInfo);
13025  }
13026 
13027  // Sort them by m_pBlock pointer value.
13028  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13029 }
13030 
13031 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13032 {
13033  for(size_t i = m_Blocks.size(); i--; )
13034  {
13035  vma_delete(m_hAllocator, m_Blocks[i]);
13036  }
13037 }
13038 
13039 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13040 {
13041  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13042  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13043  {
13044  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13045  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13046  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13047  {
13048  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13049  (*it)->m_Allocations.push_back(allocInfo);
13050  }
13051  else
13052  {
13053  VMA_ASSERT(0);
13054  }
13055 
13056  ++m_AllocationCount;
13057  }
13058 }
13059 
13060 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13061  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13062  VkDeviceSize maxBytesToMove,
13063  uint32_t maxAllocationsToMove)
13064 {
13065  if(m_Blocks.empty())
13066  {
13067  return VK_SUCCESS;
13068  }
13069 
13070  // This is a choice based on research.
13071  // Option 1:
13072  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13073  // Option 2:
13074  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13075  // Option 3:
13076  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13077 
13078  size_t srcBlockMinIndex = 0;
13079  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13080  /*
13081  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13082  {
13083  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13084  if(blocksWithNonMovableCount > 0)
13085  {
13086  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13087  }
13088  }
13089  */
13090 
13091  size_t srcBlockIndex = m_Blocks.size() - 1;
13092  size_t srcAllocIndex = SIZE_MAX;
13093  for(;;)
13094  {
13095  // 1. Find next allocation to move.
13096  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13097  // 1.2. Then start from last to first m_Allocations.
13098  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13099  {
13100  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13101  {
13102  // Finished: no more allocations to process.
13103  if(srcBlockIndex == srcBlockMinIndex)
13104  {
13105  return VK_SUCCESS;
13106  }
13107  else
13108  {
13109  --srcBlockIndex;
13110  srcAllocIndex = SIZE_MAX;
13111  }
13112  }
13113  else
13114  {
13115  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13116  }
13117  }
13118 
13119  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13120  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13121 
13122  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13123  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13124  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13125  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13126 
13127  // 2. Try to find new place for this allocation in preceding or current block.
13128  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13129  {
13130  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13131  VmaAllocationRequest dstAllocRequest;
13132  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13133  m_CurrentFrameIndex,
13134  m_pBlockVector->GetFrameInUseCount(),
13135  m_pBlockVector->GetBufferImageGranularity(),
13136  size,
13137  alignment,
13138  false, // upperAddress
13139  suballocType,
13140  false, // canMakeOtherLost
13141  strategy,
13142  &dstAllocRequest) &&
13143  MoveMakesSense(
13144  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13145  {
13146  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13147 
13148  // Reached limit on number of allocations or bytes to move.
13149  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13150  (m_BytesMoved + size > maxBytesToMove))
13151  {
13152  return VK_SUCCESS;
13153  }
13154 
13155  VmaDefragmentationMove move;
13156  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13157  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13158  move.srcOffset = srcOffset;
13159  move.dstOffset = dstAllocRequest.offset;
13160  move.size = size;
13161  moves.push_back(move);
13162 
13163  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13164  dstAllocRequest,
13165  suballocType,
13166  size,
13167  allocInfo.m_hAllocation);
13168  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13169 
13170  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13171 
13172  if(allocInfo.m_pChanged != VMA_NULL)
13173  {
13174  *allocInfo.m_pChanged = VK_TRUE;
13175  }
13176 
13177  ++m_AllocationsMoved;
13178  m_BytesMoved += size;
13179 
13180  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13181 
13182  break;
13183  }
13184  }
13185 
13186  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13187 
13188  if(srcAllocIndex > 0)
13189  {
13190  --srcAllocIndex;
13191  }
13192  else
13193  {
13194  if(srcBlockIndex > 0)
13195  {
13196  --srcBlockIndex;
13197  srcAllocIndex = SIZE_MAX;
13198  }
13199  else
13200  {
13201  return VK_SUCCESS;
13202  }
13203  }
13204  }
13205 }
13206 
13207 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13208 {
13209  size_t result = 0;
13210  for(size_t i = 0; i < m_Blocks.size(); ++i)
13211  {
13212  if(m_Blocks[i]->m_HasNonMovableAllocations)
13213  {
13214  ++result;
13215  }
13216  }
13217  return result;
13218 }
13219 
13220 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13221  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13222  VkDeviceSize maxBytesToMove,
13223  uint32_t maxAllocationsToMove)
13224 {
13225  if(!m_AllAllocations && m_AllocationCount == 0)
13226  {
13227  return VK_SUCCESS;
13228  }
13229 
13230  const size_t blockCount = m_Blocks.size();
13231  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13232  {
13233  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13234 
13235  if(m_AllAllocations)
13236  {
13237  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13238  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13239  it != pMetadata->m_Suballocations.end();
13240  ++it)
13241  {
13242  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13243  {
13244  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13245  pBlockInfo->m_Allocations.push_back(allocInfo);
13246  }
13247  }
13248  }
13249 
13250  pBlockInfo->CalcHasNonMovableAllocations();
13251 
13252  // This is a choice based on research.
13253  // Option 1:
13254  pBlockInfo->SortAllocationsByOffsetDescending();
13255  // Option 2:
13256  //pBlockInfo->SortAllocationsBySizeDescending();
13257  }
13258 
13259  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13261 
13262  // This is a choice based on research.
13263  const uint32_t roundCount = 2;
13264 
13265  // Execute defragmentation rounds (the main part).
13266  VkResult result = VK_SUCCESS;
13267  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13268  {
13269  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
13270  }
13271 
13272  return result;
13273 }
13274 
13275 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13276  size_t dstBlockIndex, VkDeviceSize dstOffset,
13277  size_t srcBlockIndex, VkDeviceSize srcOffset)
13278 {
13279  if(dstBlockIndex < srcBlockIndex)
13280  {
13281  return true;
13282  }
13283  if(dstBlockIndex > srcBlockIndex)
13284  {
13285  return false;
13286  }
13287  if(dstOffset < srcOffset)
13288  {
13289  return true;
13290  }
13291  return false;
13292 }
13293 
13295 // VmaDefragmentationAlgorithm_Fast
13296 
13297 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13298  VmaAllocator hAllocator,
13299  VmaBlockVector* pBlockVector,
13300  uint32_t currentFrameIndex,
13301  bool overlappingMoveSupported) :
13302  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13303  m_OverlappingMoveSupported(overlappingMoveSupported),
13304  m_AllocationCount(0),
13305  m_AllAllocations(false),
13306  m_BytesMoved(0),
13307  m_AllocationsMoved(0),
13308  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13309 {
13310  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13311 
13312 }
13313 
13314 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13315 {
13316 }
13317 
13318 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13319  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13320  VkDeviceSize maxBytesToMove,
13321  uint32_t maxAllocationsToMove)
13322 {
13323  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13324 
13325  const size_t blockCount = m_pBlockVector->GetBlockCount();
13326  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13327  {
13328  return VK_SUCCESS;
13329  }
13330 
13331  PreprocessMetadata();
13332 
13333  // Sort blocks in order from most destination.
13334 
13335  m_BlockInfos.resize(blockCount);
13336  for(size_t i = 0; i < blockCount; ++i)
13337  {
13338  m_BlockInfos[i].origBlockIndex = i;
13339  }
13340 
13341  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13342  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13343  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13344  });
13345 
13346  // THE MAIN ALGORITHM
13347 
13348  FreeSpaceDatabase freeSpaceDb;
13349 
13350  size_t dstBlockInfoIndex = 0;
13351  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13352  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13353  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13354  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13355  VkDeviceSize dstOffset = 0;
13356 
13357  bool end = false;
13358  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13359  {
13360  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13361  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13362  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13363  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13364  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13365  {
13366  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13367  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13368  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13369  if(m_AllocationsMoved == maxAllocationsToMove ||
13370  m_BytesMoved + srcAllocSize > maxBytesToMove)
13371  {
13372  end = true;
13373  break;
13374  }
13375  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13376 
13377  // Try to place it in one of free spaces from the database.
13378  size_t freeSpaceInfoIndex;
13379  VkDeviceSize dstAllocOffset;
13380  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13381  freeSpaceInfoIndex, dstAllocOffset))
13382  {
13383  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13384  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13385  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13386 
13387  // Same block
13388  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13389  {
13390  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13391 
13392  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13393 
13394  VmaSuballocation suballoc = *srcSuballocIt;
13395  suballoc.offset = dstAllocOffset;
13396  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13397  m_BytesMoved += srcAllocSize;
13398  ++m_AllocationsMoved;
13399 
13400  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13401  ++nextSuballocIt;
13402  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13403  srcSuballocIt = nextSuballocIt;
13404 
13405  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13406 
13407  VmaDefragmentationMove move = {
13408  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13409  srcAllocOffset, dstAllocOffset,
13410  srcAllocSize };
13411  moves.push_back(move);
13412  }
13413  // Different block
13414  else
13415  {
13416  // MOVE OPTION 2: Move the allocation to a different block.
13417 
13418  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13419 
13420  VmaSuballocation suballoc = *srcSuballocIt;
13421  suballoc.offset = dstAllocOffset;
13422  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13423  m_BytesMoved += srcAllocSize;
13424  ++m_AllocationsMoved;
13425 
13426  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13427  ++nextSuballocIt;
13428  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13429  srcSuballocIt = nextSuballocIt;
13430 
13431  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13432 
13433  VmaDefragmentationMove move = {
13434  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13435  srcAllocOffset, dstAllocOffset,
13436  srcAllocSize };
13437  moves.push_back(move);
13438  }
13439  }
13440  else
13441  {
13442  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13443 
13444  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13445  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13446  dstAllocOffset + srcAllocSize > dstBlockSize)
13447  {
13448  // But before that, register remaining free space at the end of dst block.
13449  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13450 
13451  ++dstBlockInfoIndex;
13452  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13453  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13454  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13455  dstBlockSize = pDstMetadata->GetSize();
13456  dstOffset = 0;
13457  dstAllocOffset = 0;
13458  }
13459 
13460  // Same block
13461  if(dstBlockInfoIndex == srcBlockInfoIndex)
13462  {
13463  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13464 
13465  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13466 
13467  bool skipOver = overlap;
13468  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13469  {
13470  // If destination and source place overlap, skip if it would move it
13471  // by only < 1/64 of its size.
13472  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13473  }
13474 
13475  if(skipOver)
13476  {
13477  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13478 
13479  dstOffset = srcAllocOffset + srcAllocSize;
13480  ++srcSuballocIt;
13481  }
13482  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13483  else
13484  {
13485  srcSuballocIt->offset = dstAllocOffset;
13486  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13487  dstOffset = dstAllocOffset + srcAllocSize;
13488  m_BytesMoved += srcAllocSize;
13489  ++m_AllocationsMoved;
13490  ++srcSuballocIt;
13491  VmaDefragmentationMove move = {
13492  srcOrigBlockIndex, dstOrigBlockIndex,
13493  srcAllocOffset, dstAllocOffset,
13494  srcAllocSize };
13495  moves.push_back(move);
13496  }
13497  }
13498  // Different block
13499  else
13500  {
13501  // MOVE OPTION 2: Move the allocation to a different block.
13502 
13503  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13504  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13505 
13506  VmaSuballocation suballoc = *srcSuballocIt;
13507  suballoc.offset = dstAllocOffset;
13508  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13509  dstOffset = dstAllocOffset + srcAllocSize;
13510  m_BytesMoved += srcAllocSize;
13511  ++m_AllocationsMoved;
13512 
13513  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13514  ++nextSuballocIt;
13515  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13516  srcSuballocIt = nextSuballocIt;
13517 
13518  pDstMetadata->m_Suballocations.push_back(suballoc);
13519 
13520  VmaDefragmentationMove move = {
13521  srcOrigBlockIndex, dstOrigBlockIndex,
13522  srcAllocOffset, dstAllocOffset,
13523  srcAllocSize };
13524  moves.push_back(move);
13525  }
13526  }
13527  }
13528  }
13529 
13530  m_BlockInfos.clear();
13531 
13532  PostprocessMetadata();
13533 
13534  return VK_SUCCESS;
13535 }
13536 
13537 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13538 {
13539  const size_t blockCount = m_pBlockVector->GetBlockCount();
13540  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13541  {
13542  VmaBlockMetadata_Generic* const pMetadata =
13543  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13544  pMetadata->m_FreeCount = 0;
13545  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13546  pMetadata->m_FreeSuballocationsBySize.clear();
13547  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13548  it != pMetadata->m_Suballocations.end(); )
13549  {
13550  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13551  {
13552  VmaSuballocationList::iterator nextIt = it;
13553  ++nextIt;
13554  pMetadata->m_Suballocations.erase(it);
13555  it = nextIt;
13556  }
13557  else
13558  {
13559  ++it;
13560  }
13561  }
13562  }
13563 }
13564 
13565 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13566 {
13567  const size_t blockCount = m_pBlockVector->GetBlockCount();
13568  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13569  {
13570  VmaBlockMetadata_Generic* const pMetadata =
13571  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13572  const VkDeviceSize blockSize = pMetadata->GetSize();
13573 
13574  // No allocations in this block - entire area is free.
13575  if(pMetadata->m_Suballocations.empty())
13576  {
13577  pMetadata->m_FreeCount = 1;
13578  //pMetadata->m_SumFreeSize is already set to blockSize.
13579  VmaSuballocation suballoc = {
13580  0, // offset
13581  blockSize, // size
13582  VMA_NULL, // hAllocation
13583  VMA_SUBALLOCATION_TYPE_FREE };
13584  pMetadata->m_Suballocations.push_back(suballoc);
13585  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13586  }
13587  // There are some allocations in this block.
13588  else
13589  {
13590  VkDeviceSize offset = 0;
13591  VmaSuballocationList::iterator it;
13592  for(it = pMetadata->m_Suballocations.begin();
13593  it != pMetadata->m_Suballocations.end();
13594  ++it)
13595  {
13596  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13597  VMA_ASSERT(it->offset >= offset);
13598 
13599  // Need to insert preceding free space.
13600  if(it->offset > offset)
13601  {
13602  ++pMetadata->m_FreeCount;
13603  const VkDeviceSize freeSize = it->offset - offset;
13604  VmaSuballocation suballoc = {
13605  offset, // offset
13606  freeSize, // size
13607  VMA_NULL, // hAllocation
13608  VMA_SUBALLOCATION_TYPE_FREE };
13609  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13610  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13611  {
13612  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13613  }
13614  }
13615 
13616  pMetadata->m_SumFreeSize -= it->size;
13617  offset = it->offset + it->size;
13618  }
13619 
13620  // Need to insert trailing free space.
13621  if(offset < blockSize)
13622  {
13623  ++pMetadata->m_FreeCount;
13624  const VkDeviceSize freeSize = blockSize - offset;
13625  VmaSuballocation suballoc = {
13626  offset, // offset
13627  freeSize, // size
13628  VMA_NULL, // hAllocation
13629  VMA_SUBALLOCATION_TYPE_FREE };
13630  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13631  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13632  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13633  {
13634  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13635  }
13636  }
13637 
13638  VMA_SORT(
13639  pMetadata->m_FreeSuballocationsBySize.begin(),
13640  pMetadata->m_FreeSuballocationsBySize.end(),
13641  VmaSuballocationItemSizeLess());
13642  }
13643 
13644  VMA_HEAVY_ASSERT(pMetadata->Validate());
13645  }
13646 }
13647 
13648 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13649 {
13650  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13651  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13652  while(it != pMetadata->m_Suballocations.end())
13653  {
13654  if(it->offset < suballoc.offset)
13655  {
13656  ++it;
13657  }
13658  }
13659  pMetadata->m_Suballocations.insert(it, suballoc);
13660 }
13661 
13663 // VmaBlockVectorDefragmentationContext
13664 
13665 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13666  VmaAllocator hAllocator,
13667  VmaPool hCustomPool,
13668  VmaBlockVector* pBlockVector,
13669  uint32_t currFrameIndex) :
13670  res(VK_SUCCESS),
13671  mutexLocked(false),
13672  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13673  m_hAllocator(hAllocator),
13674  m_hCustomPool(hCustomPool),
13675  m_pBlockVector(pBlockVector),
13676  m_CurrFrameIndex(currFrameIndex),
13677  m_pAlgorithm(VMA_NULL),
13678  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13679  m_AllAllocations(false)
13680 {
13681 }
13682 
13683 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13684 {
13685  vma_delete(m_hAllocator, m_pAlgorithm);
13686 }
13687 
13688 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13689 {
13690  AllocInfo info = { hAlloc, pChanged };
13691  m_Allocations.push_back(info);
13692 }
13693 
13694 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13695 {
13696  const bool allAllocations = m_AllAllocations ||
13697  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13698 
13699  /********************************
13700  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13701  ********************************/
13702 
13703  /*
13704  Fast algorithm is supported only when certain criteria are met:
13705  - VMA_DEBUG_MARGIN is 0.
13706  - All allocations in this block vector are moveable.
13707  - There is no possibility of image/buffer granularity conflict.
13708  */
13709  if(VMA_DEBUG_MARGIN == 0 &&
13710  allAllocations &&
13711  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13712  {
13713  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13714  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13715  }
13716  else
13717  {
13718  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13719  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13720  }
13721 
13722  if(allAllocations)
13723  {
13724  m_pAlgorithm->AddAll();
13725  }
13726  else
13727  {
13728  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13729  {
13730  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13731  }
13732  }
13733 }
13734 
13736 // VmaDefragmentationContext
13737 
13738 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13739  VmaAllocator hAllocator,
13740  uint32_t currFrameIndex,
13741  uint32_t flags,
13742  VmaDefragmentationStats* pStats) :
13743  m_hAllocator(hAllocator),
13744  m_CurrFrameIndex(currFrameIndex),
13745  m_Flags(flags),
13746  m_pStats(pStats),
13747  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13748 {
13749  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13750 }
13751 
13752 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13753 {
13754  for(size_t i = m_CustomPoolContexts.size(); i--; )
13755  {
13756  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13757  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13758  vma_delete(m_hAllocator, pBlockVectorCtx);
13759  }
13760  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13761  {
13762  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13763  if(pBlockVectorCtx)
13764  {
13765  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13766  vma_delete(m_hAllocator, pBlockVectorCtx);
13767  }
13768  }
13769 }
13770 
13771 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13772 {
13773  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13774  {
13775  VmaPool pool = pPools[poolIndex];
13776  VMA_ASSERT(pool);
13777  // Pools with algorithm other than default are not defragmented.
13778  if(pool->m_BlockVector.GetAlgorithm() == 0)
13779  {
13780  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13781 
13782  for(size_t i = m_CustomPoolContexts.size(); i--; )
13783  {
13784  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13785  {
13786  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13787  break;
13788  }
13789  }
13790 
13791  if(!pBlockVectorDefragCtx)
13792  {
13793  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13794  m_hAllocator,
13795  pool,
13796  &pool->m_BlockVector,
13797  m_CurrFrameIndex);
13798  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13799  }
13800 
13801  pBlockVectorDefragCtx->AddAll();
13802  }
13803  }
13804 }
13805 
13806 void VmaDefragmentationContext_T::AddAllocations(
13807  uint32_t allocationCount,
13808  VmaAllocation* pAllocations,
13809  VkBool32* pAllocationsChanged)
13810 {
13811  // Dispatch pAllocations among defragmentators. Create them when necessary.
13812  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13813  {
13814  const VmaAllocation hAlloc = pAllocations[allocIndex];
13815  VMA_ASSERT(hAlloc);
13816  // DedicatedAlloc cannot be defragmented.
13817  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13818  // Lost allocation cannot be defragmented.
13819  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13820  {
13821  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13822 
13823  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13824  // This allocation belongs to custom pool.
13825  if(hAllocPool != VK_NULL_HANDLE)
13826  {
13827  // Pools with algorithm other than default are not defragmented.
13828  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13829  {
13830  for(size_t i = m_CustomPoolContexts.size(); i--; )
13831  {
13832  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13833  {
13834  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13835  break;
13836  }
13837  }
13838  if(!pBlockVectorDefragCtx)
13839  {
13840  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13841  m_hAllocator,
13842  hAllocPool,
13843  &hAllocPool->m_BlockVector,
13844  m_CurrFrameIndex);
13845  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13846  }
13847  }
13848  }
13849  // This allocation belongs to default pool.
13850  else
13851  {
13852  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13853  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13854  if(!pBlockVectorDefragCtx)
13855  {
13856  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13857  m_hAllocator,
13858  VMA_NULL, // hCustomPool
13859  m_hAllocator->m_pBlockVectors[memTypeIndex],
13860  m_CurrFrameIndex);
13861  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13862  }
13863  }
13864 
13865  if(pBlockVectorDefragCtx)
13866  {
13867  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13868  &pAllocationsChanged[allocIndex] : VMA_NULL;
13869  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13870  }
13871  }
13872  }
13873 }
13874 
13875 VkResult VmaDefragmentationContext_T::Defragment(
13876  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13877  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13878  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13879 {
13880  if(pStats)
13881  {
13882  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13883  }
13884 
13885  if(commandBuffer == VK_NULL_HANDLE)
13886  {
13887  maxGpuBytesToMove = 0;
13888  maxGpuAllocationsToMove = 0;
13889  }
13890 
13891  VkResult res = VK_SUCCESS;
13892 
13893  // Process default pools.
13894  for(uint32_t memTypeIndex = 0;
13895  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13896  ++memTypeIndex)
13897  {
13898  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13899  if(pBlockVectorCtx)
13900  {
13901  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13902  pBlockVectorCtx->GetBlockVector()->Defragment(
13903  pBlockVectorCtx,
13904  pStats,
13905  maxCpuBytesToMove, maxCpuAllocationsToMove,
13906  maxGpuBytesToMove, maxGpuAllocationsToMove,
13907  commandBuffer);
13908  if(pBlockVectorCtx->res != VK_SUCCESS)
13909  {
13910  res = pBlockVectorCtx->res;
13911  }
13912  }
13913  }
13914 
13915  // Process custom pools.
13916  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13917  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13918  ++customCtxIndex)
13919  {
13920  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13921  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13922  pBlockVectorCtx->GetBlockVector()->Defragment(
13923  pBlockVectorCtx,
13924  pStats,
13925  maxCpuBytesToMove, maxCpuAllocationsToMove,
13926  maxGpuBytesToMove, maxGpuAllocationsToMove,
13927  commandBuffer);
13928  if(pBlockVectorCtx->res != VK_SUCCESS)
13929  {
13930  res = pBlockVectorCtx->res;
13931  }
13932  }
13933 
13934  return res;
13935 }
13936 
13938 // VmaRecorder
13939 
13940 #if VMA_RECORDING_ENABLED
13941 
13942 VmaRecorder::VmaRecorder() :
13943  m_UseMutex(true),
13944  m_Flags(0),
13945  m_File(VMA_NULL),
13946  m_Freq(INT64_MAX),
13947  m_StartCounter(INT64_MAX)
13948 {
13949 }
13950 
13951 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13952 {
13953  m_UseMutex = useMutex;
13954  m_Flags = settings.flags;
13955 
13956  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13957  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13958 
13959  // Open file for writing.
13960  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13961  if(err != 0)
13962  {
13963  return VK_ERROR_INITIALIZATION_FAILED;
13964  }
13965 
13966  // Write header.
13967  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13968  fprintf(m_File, "%s\n", "1,8");
13969 
13970  return VK_SUCCESS;
13971 }
13972 
13973 VmaRecorder::~VmaRecorder()
13974 {
13975  if(m_File != VMA_NULL)
13976  {
13977  fclose(m_File);
13978  }
13979 }
13980 
13981 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13982 {
13983  CallParams callParams;
13984  GetBasicParams(callParams);
13985 
13986  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13987  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13988  Flush();
13989 }
13990 
13991 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13992 {
13993  CallParams callParams;
13994  GetBasicParams(callParams);
13995 
13996  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13997  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13998  Flush();
13999 }
14000 
14001 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14002 {
14003  CallParams callParams;
14004  GetBasicParams(callParams);
14005 
14006  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14007  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14008  createInfo.memoryTypeIndex,
14009  createInfo.flags,
14010  createInfo.blockSize,
14011  (uint64_t)createInfo.minBlockCount,
14012  (uint64_t)createInfo.maxBlockCount,
14013  createInfo.frameInUseCount,
14014  pool);
14015  Flush();
14016 }
14017 
14018 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14019 {
14020  CallParams callParams;
14021  GetBasicParams(callParams);
14022 
14023  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14024  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14025  pool);
14026  Flush();
14027 }
14028 
14029 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14030  const VkMemoryRequirements& vkMemReq,
14031  const VmaAllocationCreateInfo& createInfo,
14032  VmaAllocation allocation)
14033 {
14034  CallParams callParams;
14035  GetBasicParams(callParams);
14036 
14037  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14038  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14039  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14040  vkMemReq.size,
14041  vkMemReq.alignment,
14042  vkMemReq.memoryTypeBits,
14043  createInfo.flags,
14044  createInfo.usage,
14045  createInfo.requiredFlags,
14046  createInfo.preferredFlags,
14047  createInfo.memoryTypeBits,
14048  createInfo.pool,
14049  allocation,
14050  userDataStr.GetString());
14051  Flush();
14052 }
14053 
14054 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14055  const VkMemoryRequirements& vkMemReq,
14056  const VmaAllocationCreateInfo& createInfo,
14057  uint64_t allocationCount,
14058  const VmaAllocation* pAllocations)
14059 {
14060  CallParams callParams;
14061  GetBasicParams(callParams);
14062 
14063  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14064  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14065  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14066  vkMemReq.size,
14067  vkMemReq.alignment,
14068  vkMemReq.memoryTypeBits,
14069  createInfo.flags,
14070  createInfo.usage,
14071  createInfo.requiredFlags,
14072  createInfo.preferredFlags,
14073  createInfo.memoryTypeBits,
14074  createInfo.pool);
14075  PrintPointerList(allocationCount, pAllocations);
14076  fprintf(m_File, ",%s\n", userDataStr.GetString());
14077  Flush();
14078 }
14079 
14080 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14081  const VkMemoryRequirements& vkMemReq,
14082  bool requiresDedicatedAllocation,
14083  bool prefersDedicatedAllocation,
14084  const VmaAllocationCreateInfo& createInfo,
14085  VmaAllocation allocation)
14086 {
14087  CallParams callParams;
14088  GetBasicParams(callParams);
14089 
14090  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14091  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14092  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14093  vkMemReq.size,
14094  vkMemReq.alignment,
14095  vkMemReq.memoryTypeBits,
14096  requiresDedicatedAllocation ? 1 : 0,
14097  prefersDedicatedAllocation ? 1 : 0,
14098  createInfo.flags,
14099  createInfo.usage,
14100  createInfo.requiredFlags,
14101  createInfo.preferredFlags,
14102  createInfo.memoryTypeBits,
14103  createInfo.pool,
14104  allocation,
14105  userDataStr.GetString());
14106  Flush();
14107 }
14108 
14109 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14110  const VkMemoryRequirements& vkMemReq,
14111  bool requiresDedicatedAllocation,
14112  bool prefersDedicatedAllocation,
14113  const VmaAllocationCreateInfo& createInfo,
14114  VmaAllocation allocation)
14115 {
14116  CallParams callParams;
14117  GetBasicParams(callParams);
14118 
14119  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14120  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14121  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14122  vkMemReq.size,
14123  vkMemReq.alignment,
14124  vkMemReq.memoryTypeBits,
14125  requiresDedicatedAllocation ? 1 : 0,
14126  prefersDedicatedAllocation ? 1 : 0,
14127  createInfo.flags,
14128  createInfo.usage,
14129  createInfo.requiredFlags,
14130  createInfo.preferredFlags,
14131  createInfo.memoryTypeBits,
14132  createInfo.pool,
14133  allocation,
14134  userDataStr.GetString());
14135  Flush();
14136 }
14137 
14138 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14139  VmaAllocation allocation)
14140 {
14141  CallParams callParams;
14142  GetBasicParams(callParams);
14143 
14144  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14145  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14146  allocation);
14147  Flush();
14148 }
14149 
14150 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14151  uint64_t allocationCount,
14152  const VmaAllocation* pAllocations)
14153 {
14154  CallParams callParams;
14155  GetBasicParams(callParams);
14156 
14157  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14158  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14159  PrintPointerList(allocationCount, pAllocations);
14160  fprintf(m_File, "\n");
14161  Flush();
14162 }
14163 
14164 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14165  VmaAllocation allocation,
14166  const void* pUserData)
14167 {
14168  CallParams callParams;
14169  GetBasicParams(callParams);
14170 
14171  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14172  UserDataString userDataStr(
14173  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14174  pUserData);
14175  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14176  allocation,
14177  userDataStr.GetString());
14178  Flush();
14179 }
14180 
14181 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14182  VmaAllocation allocation)
14183 {
14184  CallParams callParams;
14185  GetBasicParams(callParams);
14186 
14187  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14188  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14189  allocation);
14190  Flush();
14191 }
14192 
14193 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14194  VmaAllocation allocation)
14195 {
14196  CallParams callParams;
14197  GetBasicParams(callParams);
14198 
14199  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14200  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14201  allocation);
14202  Flush();
14203 }
14204 
14205 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14206  VmaAllocation allocation)
14207 {
14208  CallParams callParams;
14209  GetBasicParams(callParams);
14210 
14211  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14212  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14213  allocation);
14214  Flush();
14215 }
14216 
14217 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14218  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14219 {
14220  CallParams callParams;
14221  GetBasicParams(callParams);
14222 
14223  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14224  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14225  allocation,
14226  offset,
14227  size);
14228  Flush();
14229 }
14230 
14231 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14232  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14233 {
14234  CallParams callParams;
14235  GetBasicParams(callParams);
14236 
14237  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14238  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14239  allocation,
14240  offset,
14241  size);
14242  Flush();
14243 }
14244 
14245 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14246  const VkBufferCreateInfo& bufCreateInfo,
14247  const VmaAllocationCreateInfo& allocCreateInfo,
14248  VmaAllocation allocation)
14249 {
14250  CallParams callParams;
14251  GetBasicParams(callParams);
14252 
14253  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14254  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14255  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14256  bufCreateInfo.flags,
14257  bufCreateInfo.size,
14258  bufCreateInfo.usage,
14259  bufCreateInfo.sharingMode,
14260  allocCreateInfo.flags,
14261  allocCreateInfo.usage,
14262  allocCreateInfo.requiredFlags,
14263  allocCreateInfo.preferredFlags,
14264  allocCreateInfo.memoryTypeBits,
14265  allocCreateInfo.pool,
14266  allocation,
14267  userDataStr.GetString());
14268  Flush();
14269 }
14270 
14271 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14272  const VkImageCreateInfo& imageCreateInfo,
14273  const VmaAllocationCreateInfo& allocCreateInfo,
14274  VmaAllocation allocation)
14275 {
14276  CallParams callParams;
14277  GetBasicParams(callParams);
14278 
14279  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14280  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14281  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14282  imageCreateInfo.flags,
14283  imageCreateInfo.imageType,
14284  imageCreateInfo.format,
14285  imageCreateInfo.extent.width,
14286  imageCreateInfo.extent.height,
14287  imageCreateInfo.extent.depth,
14288  imageCreateInfo.mipLevels,
14289  imageCreateInfo.arrayLayers,
14290  imageCreateInfo.samples,
14291  imageCreateInfo.tiling,
14292  imageCreateInfo.usage,
14293  imageCreateInfo.sharingMode,
14294  imageCreateInfo.initialLayout,
14295  allocCreateInfo.flags,
14296  allocCreateInfo.usage,
14297  allocCreateInfo.requiredFlags,
14298  allocCreateInfo.preferredFlags,
14299  allocCreateInfo.memoryTypeBits,
14300  allocCreateInfo.pool,
14301  allocation,
14302  userDataStr.GetString());
14303  Flush();
14304 }
14305 
14306 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14307  VmaAllocation allocation)
14308 {
14309  CallParams callParams;
14310  GetBasicParams(callParams);
14311 
14312  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14313  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14314  allocation);
14315  Flush();
14316 }
14317 
14318 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14319  VmaAllocation allocation)
14320 {
14321  CallParams callParams;
14322  GetBasicParams(callParams);
14323 
14324  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14325  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14326  allocation);
14327  Flush();
14328 }
14329 
14330 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14331  VmaAllocation allocation)
14332 {
14333  CallParams callParams;
14334  GetBasicParams(callParams);
14335 
14336  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14337  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14338  allocation);
14339  Flush();
14340 }
14341 
14342 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14343  VmaAllocation allocation)
14344 {
14345  CallParams callParams;
14346  GetBasicParams(callParams);
14347 
14348  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14349  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14350  allocation);
14351  Flush();
14352 }
14353 
14354 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14355  VmaPool pool)
14356 {
14357  CallParams callParams;
14358  GetBasicParams(callParams);
14359 
14360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14361  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14362  pool);
14363  Flush();
14364 }
14365 
14366 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14367  const VmaDefragmentationInfo2& info,
14369 {
14370  CallParams callParams;
14371  GetBasicParams(callParams);
14372 
14373  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14374  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14375  info.flags);
14376  PrintPointerList(info.allocationCount, info.pAllocations);
14377  fprintf(m_File, ",");
14378  PrintPointerList(info.poolCount, info.pPools);
14379  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14380  info.maxCpuBytesToMove,
14382  info.maxGpuBytesToMove,
14384  info.commandBuffer,
14385  ctx);
14386  Flush();
14387 }
14388 
14389 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14391 {
14392  CallParams callParams;
14393  GetBasicParams(callParams);
14394 
14395  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14396  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14397  ctx);
14398  Flush();
14399 }
14400 
14401 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14402  VmaPool pool,
14403  const char* name)
14404 {
14405  CallParams callParams;
14406  GetBasicParams(callParams);
14407 
14408  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14409  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14410  pool, name != VMA_NULL ? name : "");
14411  Flush();
14412 }
14413 
14414 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14415 {
14416  if(pUserData != VMA_NULL)
14417  {
14418  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14419  {
14420  m_Str = (const char*)pUserData;
14421  }
14422  else
14423  {
14424  sprintf_s(m_PtrStr, "%p", pUserData);
14425  m_Str = m_PtrStr;
14426  }
14427  }
14428  else
14429  {
14430  m_Str = "";
14431  }
14432 }
14433 
14434 void VmaRecorder::WriteConfiguration(
14435  const VkPhysicalDeviceProperties& devProps,
14436  const VkPhysicalDeviceMemoryProperties& memProps,
14437  uint32_t vulkanApiVersion,
14438  bool dedicatedAllocationExtensionEnabled,
14439  bool bindMemory2ExtensionEnabled,
14440  bool memoryBudgetExtensionEnabled)
14441 {
14442  fprintf(m_File, "Config,Begin\n");
14443 
14444  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14445 
14446  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14447  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14448  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14449  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14450  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14451  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14452 
14453  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14454  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14455  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14456 
14457  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14458  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14459  {
14460  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14461  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14462  }
14463  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14464  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14465  {
14466  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14467  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14468  }
14469 
14470  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14471  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14472  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
14473 
14474  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14475  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14476  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14477  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14478  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14479  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14480  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14481  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14482  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14483 
14484  fprintf(m_File, "Config,End\n");
14485 }
14486 
14487 void VmaRecorder::GetBasicParams(CallParams& outParams)
14488 {
14489  outParams.threadId = GetCurrentThreadId();
14490 
14491  LARGE_INTEGER counter;
14492  QueryPerformanceCounter(&counter);
14493  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14494 }
14495 
14496 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14497 {
14498  if(count)
14499  {
14500  fprintf(m_File, "%p", pItems[0]);
14501  for(uint64_t i = 1; i < count; ++i)
14502  {
14503  fprintf(m_File, " %p", pItems[i]);
14504  }
14505  }
14506 }
14507 
14508 void VmaRecorder::Flush()
14509 {
14510  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14511  {
14512  fflush(m_File);
14513  }
14514 }
14515 
14516 #endif // #if VMA_RECORDING_ENABLED
14517 
14519 // VmaAllocationObjectAllocator
14520 
14521 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14522  m_Allocator(pAllocationCallbacks, 1024)
14523 {
14524 }
14525 
14526 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14527 {
14528  VmaMutexLock mutexLock(m_Mutex);
14529  return m_Allocator.Alloc();
14530 }
14531 
14532 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14533 {
14534  VmaMutexLock mutexLock(m_Mutex);
14535  m_Allocator.Free(hAlloc);
14536 }
14537 
14539 // VmaAllocator_T
14540 
14541 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14542  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14543  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
14544  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14545  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14546  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14547  m_hDevice(pCreateInfo->device),
14548  m_hInstance(pCreateInfo->instance),
14549  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14550  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14551  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14552  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14553  m_HeapSizeLimitMask(0),
14554  m_PreferredLargeHeapBlockSize(0),
14555  m_PhysicalDevice(pCreateInfo->physicalDevice),
14556  m_CurrentFrameIndex(0),
14557  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14558  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14559  m_NextPoolId(0)
14561  ,m_pRecorder(VMA_NULL)
14562 #endif
14563 {
14564  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14565  {
14566  m_UseKhrDedicatedAllocation = false;
14567  m_UseKhrBindMemory2 = false;
14568  }
14569 
14570  if(VMA_DEBUG_DETECT_CORRUPTION)
14571  {
14572  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14573  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14574  }
14575 
14576  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14577 
14578  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14579  {
14580 #if !(VMA_DEDICATED_ALLOCATION)
14582  {
14583  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14584  }
14585 #endif
14586 #if !(VMA_BIND_MEMORY2)
14587  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14588  {
14589  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14590  }
14591 #endif
14592  }
14593 #if !(VMA_MEMORY_BUDGET)
14594  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14595  {
14596  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14597  }
14598 #endif
14599 #if VMA_VULKAN_VERSION < 1001000
14600  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14601  {
14602  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14603  }
14604 #endif
14605 
14606  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14607  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14608  memset(&m_MemProps, 0, sizeof(m_MemProps));
14609 
14610  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14611  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14612  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14613 
14614  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14615  {
14616  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14617  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14618  }
14619 
14620  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14621 
14622  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14623  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14624 
14625  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14626  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14627  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14628  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14629 
14630  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14631  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14632 
14633  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14634  {
14635  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14636  {
14637  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14638  if(limit != VK_WHOLE_SIZE)
14639  {
14640  m_HeapSizeLimitMask |= 1u << heapIndex;
14641  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14642  {
14643  m_MemProps.memoryHeaps[heapIndex].size = limit;
14644  }
14645  }
14646  }
14647  }
14648 
14649  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14650  {
14651  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14652 
14653  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14654  this,
14655  VK_NULL_HANDLE, // hParentPool
14656  memTypeIndex,
14657  preferredBlockSize,
14658  0,
14659  SIZE_MAX,
14660  GetBufferImageGranularity(),
14661  pCreateInfo->frameInUseCount,
14662  false, // explicitBlockSize
14663  false); // linearAlgorithm
14664  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14665  // becase minBlockCount is 0.
14666  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14667 
14668  }
14669 }
14670 
14671 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14672 {
14673  VkResult res = VK_SUCCESS;
14674 
14675  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14676  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14677  {
14678 #if VMA_RECORDING_ENABLED
14679  m_pRecorder = vma_new(this, VmaRecorder)();
14680  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14681  if(res != VK_SUCCESS)
14682  {
14683  return res;
14684  }
14685  m_pRecorder->WriteConfiguration(
14686  m_PhysicalDeviceProperties,
14687  m_MemProps,
14688  m_VulkanApiVersion,
14689  m_UseKhrDedicatedAllocation,
14690  m_UseKhrBindMemory2,
14691  m_UseExtMemoryBudget);
14692  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14693 #else
14694  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14695  return VK_ERROR_FEATURE_NOT_PRESENT;
14696 #endif
14697  }
14698 
14699 #if VMA_MEMORY_BUDGET
14700  if(m_UseExtMemoryBudget)
14701  {
14702  UpdateVulkanBudget();
14703  }
14704 #endif // #if VMA_MEMORY_BUDGET
14705 
14706  return res;
14707 }
14708 
14709 VmaAllocator_T::~VmaAllocator_T()
14710 {
14711 #if VMA_RECORDING_ENABLED
14712  if(m_pRecorder != VMA_NULL)
14713  {
14714  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14715  vma_delete(this, m_pRecorder);
14716  }
14717 #endif
14718 
14719  VMA_ASSERT(m_Pools.empty());
14720 
14721  for(size_t i = GetMemoryTypeCount(); i--; )
14722  {
14723  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14724  {
14725  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14726  }
14727 
14728  vma_delete(this, m_pDedicatedAllocations[i]);
14729  vma_delete(this, m_pBlockVectors[i]);
14730  }
14731 }
14732 
14733 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14734 {
14735 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14736  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14737  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14738  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14739  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14740  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14741  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14742  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14743  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14744  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14745  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14746  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14747  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14748  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14749  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14750  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14751  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14752  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14753 #if VMA_VULKAN_VERSION >= 1001000
14754  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14755  {
14756  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14757  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14758  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
14759  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14760  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
14761  m_VulkanFunctions.vkBindBufferMemory2KHR =
14762  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
14763  m_VulkanFunctions.vkBindImageMemory2KHR =
14764  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
14765  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14766  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
14767  }
14768 #endif
14769 #if VMA_DEDICATED_ALLOCATION
14770  if(m_UseKhrDedicatedAllocation)
14771  {
14772  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14773  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14774  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14775  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14776  }
14777 #endif
14778 #if VMA_BIND_MEMORY2
14779  if(m_UseKhrBindMemory2)
14780  {
14781  m_VulkanFunctions.vkBindBufferMemory2KHR =
14782  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14783  m_VulkanFunctions.vkBindImageMemory2KHR =
14784  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14785  }
14786 #endif // #if VMA_BIND_MEMORY2
14787 #if VMA_MEMORY_BUDGET
14788  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14789  {
14790  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14791  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14792  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
14793  }
14794 #endif // #if VMA_MEMORY_BUDGET
14795 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14796 
14797 #define VMA_COPY_IF_NOT_NULL(funcName) \
14798  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14799 
14800  if(pVulkanFunctions != VMA_NULL)
14801  {
14802  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14803  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14804  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14805  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14806  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14807  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14808  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14809  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14810  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14811  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14812  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14813  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14814  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14815  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14816  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14817  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14818  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14819 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14820  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14821  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14822 #endif
14823 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14824  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14825  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14826 #endif
14827 #if VMA_MEMORY_BUDGET
14828  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14829 #endif
14830  }
14831 
14832 #undef VMA_COPY_IF_NOT_NULL
14833 
14834  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14835  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14836  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14837  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14838  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14839  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14840  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14841  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14842  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14843  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14844  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14845  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14846  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14847  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14848  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14849  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14850  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14851  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14852  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14853 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14854  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14855  {
14856  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14857  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14858  }
14859 #endif
14860 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14861  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14862  {
14863  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14864  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14865  }
14866 #endif
14867 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14868  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14869  {
14870  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14871  }
14872 #endif
14873 }
14874 
14875 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14876 {
14877  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14878  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14879  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14880  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14881 }
14882 
14883 VkResult VmaAllocator_T::AllocateMemoryOfType(
14884  VkDeviceSize size,
14885  VkDeviceSize alignment,
14886  bool dedicatedAllocation,
14887  VkBuffer dedicatedBuffer,
14888  VkImage dedicatedImage,
14889  const VmaAllocationCreateInfo& createInfo,
14890  uint32_t memTypeIndex,
14891  VmaSuballocationType suballocType,
14892  size_t allocationCount,
14893  VmaAllocation* pAllocations)
14894 {
14895  VMA_ASSERT(pAllocations != VMA_NULL);
14896  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14897 
14898  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14899 
14900  // If memory type is not HOST_VISIBLE, disable MAPPED.
14901  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14902  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14903  {
14904  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14905  }
14906  // If memory is lazily allocated, it should be always dedicated.
14907  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
14908  {
14910  }
14911 
14912  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14913  VMA_ASSERT(blockVector);
14914 
14915  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14916  bool preferDedicatedMemory =
14917  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14918  dedicatedAllocation ||
14919  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14920  size > preferredBlockSize / 2;
14921 
14922  if(preferDedicatedMemory &&
14923  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14924  finalCreateInfo.pool == VK_NULL_HANDLE)
14925  {
14927  }
14928 
14929  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14930  {
14931  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14932  {
14933  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14934  }
14935  else
14936  {
14937  return AllocateDedicatedMemory(
14938  size,
14939  suballocType,
14940  memTypeIndex,
14941  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14942  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14943  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14944  finalCreateInfo.pUserData,
14945  dedicatedBuffer,
14946  dedicatedImage,
14947  allocationCount,
14948  pAllocations);
14949  }
14950  }
14951  else
14952  {
14953  VkResult res = blockVector->Allocate(
14954  m_CurrentFrameIndex.load(),
14955  size,
14956  alignment,
14957  finalCreateInfo,
14958  suballocType,
14959  allocationCount,
14960  pAllocations);
14961  if(res == VK_SUCCESS)
14962  {
14963  return res;
14964  }
14965 
14966  // 5. Try dedicated memory.
14967  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14968  {
14969  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14970  }
14971  else
14972  {
14973  res = AllocateDedicatedMemory(
14974  size,
14975  suballocType,
14976  memTypeIndex,
14977  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14978  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14979  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14980  finalCreateInfo.pUserData,
14981  dedicatedBuffer,
14982  dedicatedImage,
14983  allocationCount,
14984  pAllocations);
14985  if(res == VK_SUCCESS)
14986  {
14987  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14988  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14989  return VK_SUCCESS;
14990  }
14991  else
14992  {
14993  // Everything failed: Return error code.
14994  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14995  return res;
14996  }
14997  }
14998  }
14999 }
15000 
15001 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15002  VkDeviceSize size,
15003  VmaSuballocationType suballocType,
15004  uint32_t memTypeIndex,
15005  bool withinBudget,
15006  bool map,
15007  bool isUserDataString,
15008  void* pUserData,
15009  VkBuffer dedicatedBuffer,
15010  VkImage dedicatedImage,
15011  size_t allocationCount,
15012  VmaAllocation* pAllocations)
15013 {
15014  VMA_ASSERT(allocationCount > 0 && pAllocations);
15015 
15016  if(withinBudget)
15017  {
15018  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15019  VmaBudget heapBudget = {};
15020  GetBudget(&heapBudget, heapIndex, 1);
15021  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15022  {
15023  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15024  }
15025  }
15026 
15027  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15028  allocInfo.memoryTypeIndex = memTypeIndex;
15029  allocInfo.allocationSize = size;
15030 
15031 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15032  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15033  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15034  {
15035  if(dedicatedBuffer != VK_NULL_HANDLE)
15036  {
15037  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15038  dedicatedAllocInfo.buffer = dedicatedBuffer;
15039  allocInfo.pNext = &dedicatedAllocInfo;
15040  }
15041  else if(dedicatedImage != VK_NULL_HANDLE)
15042  {
15043  dedicatedAllocInfo.image = dedicatedImage;
15044  allocInfo.pNext = &dedicatedAllocInfo;
15045  }
15046  }
15047 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15048 
15049  size_t allocIndex;
15050  VkResult res = VK_SUCCESS;
15051  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15052  {
15053  res = AllocateDedicatedMemoryPage(
15054  size,
15055  suballocType,
15056  memTypeIndex,
15057  allocInfo,
15058  map,
15059  isUserDataString,
15060  pUserData,
15061  pAllocations + allocIndex);
15062  if(res != VK_SUCCESS)
15063  {
15064  break;
15065  }
15066  }
15067 
15068  if(res == VK_SUCCESS)
15069  {
15070  // Register them in m_pDedicatedAllocations.
15071  {
15072  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15073  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15074  VMA_ASSERT(pDedicatedAllocations);
15075  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15076  {
15077  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15078  }
15079  }
15080 
15081  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15082  }
15083  else
15084  {
15085  // Free all already created allocations.
15086  while(allocIndex--)
15087  {
15088  VmaAllocation currAlloc = pAllocations[allocIndex];
15089  VkDeviceMemory hMemory = currAlloc->GetMemory();
15090 
15091  /*
15092  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15093  before vkFreeMemory.
15094 
15095  if(currAlloc->GetMappedData() != VMA_NULL)
15096  {
15097  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15098  }
15099  */
15100 
15101  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15102  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15103  currAlloc->SetUserData(this, VMA_NULL);
15104  currAlloc->Dtor();
15105  m_AllocationObjectAllocator.Free(currAlloc);
15106  }
15107 
15108  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15109  }
15110 
15111  return res;
15112 }
15113 
15114 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15115  VkDeviceSize size,
15116  VmaSuballocationType suballocType,
15117  uint32_t memTypeIndex,
15118  const VkMemoryAllocateInfo& allocInfo,
15119  bool map,
15120  bool isUserDataString,
15121  void* pUserData,
15122  VmaAllocation* pAllocation)
15123 {
15124  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15125  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15126  if(res < 0)
15127  {
15128  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15129  return res;
15130  }
15131 
15132  void* pMappedData = VMA_NULL;
15133  if(map)
15134  {
15135  res = (*m_VulkanFunctions.vkMapMemory)(
15136  m_hDevice,
15137  hMemory,
15138  0,
15139  VK_WHOLE_SIZE,
15140  0,
15141  &pMappedData);
15142  if(res < 0)
15143  {
15144  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15145  FreeVulkanMemory(memTypeIndex, size, hMemory);
15146  return res;
15147  }
15148  }
15149 
15150  *pAllocation = m_AllocationObjectAllocator.Allocate();
15151  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
15152  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15153  (*pAllocation)->SetUserData(this, pUserData);
15154  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15155  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15156  {
15157  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15158  }
15159 
15160  return VK_SUCCESS;
15161 }
15162 
15163 void VmaAllocator_T::GetBufferMemoryRequirements(
15164  VkBuffer hBuffer,
15165  VkMemoryRequirements& memReq,
15166  bool& requiresDedicatedAllocation,
15167  bool& prefersDedicatedAllocation) const
15168 {
15169 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15170  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15171  {
15172  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15173  memReqInfo.buffer = hBuffer;
15174 
15175  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15176 
15177  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15178  memReq2.pNext = &memDedicatedReq;
15179 
15180  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15181 
15182  memReq = memReq2.memoryRequirements;
15183  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15184  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15185  }
15186  else
15187 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15188  {
15189  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15190  requiresDedicatedAllocation = false;
15191  prefersDedicatedAllocation = false;
15192  }
15193 }
15194 
15195 void VmaAllocator_T::GetImageMemoryRequirements(
15196  VkImage hImage,
15197  VkMemoryRequirements& memReq,
15198  bool& requiresDedicatedAllocation,
15199  bool& prefersDedicatedAllocation) const
15200 {
15201 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15202  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15203  {
15204  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15205  memReqInfo.image = hImage;
15206 
15207  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15208 
15209  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15210  memReq2.pNext = &memDedicatedReq;
15211 
15212  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15213 
15214  memReq = memReq2.memoryRequirements;
15215  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15216  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15217  }
15218  else
15219 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15220  {
15221  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15222  requiresDedicatedAllocation = false;
15223  prefersDedicatedAllocation = false;
15224  }
15225 }
15226 
15227 VkResult VmaAllocator_T::AllocateMemory(
15228  const VkMemoryRequirements& vkMemReq,
15229  bool requiresDedicatedAllocation,
15230  bool prefersDedicatedAllocation,
15231  VkBuffer dedicatedBuffer,
15232  VkImage dedicatedImage,
15233  const VmaAllocationCreateInfo& createInfo,
15234  VmaSuballocationType suballocType,
15235  size_t allocationCount,
15236  VmaAllocation* pAllocations)
15237 {
15238  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15239 
15240  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15241 
15242  if(vkMemReq.size == 0)
15243  {
15244  return VK_ERROR_VALIDATION_FAILED_EXT;
15245  }
15246  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15247  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15248  {
15249  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15250  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15251  }
15252  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15254  {
15255  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15256  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15257  }
15258  if(requiresDedicatedAllocation)
15259  {
15260  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15261  {
15262  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15263  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15264  }
15265  if(createInfo.pool != VK_NULL_HANDLE)
15266  {
15267  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15268  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15269  }
15270  }
15271  if((createInfo.pool != VK_NULL_HANDLE) &&
15272  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15273  {
15274  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15275  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15276  }
15277 
15278  if(createInfo.pool != VK_NULL_HANDLE)
15279  {
15280  const VkDeviceSize alignmentForPool = VMA_MAX(
15281  vkMemReq.alignment,
15282  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15283 
15284  VmaAllocationCreateInfo createInfoForPool = createInfo;
15285  // If memory type is not HOST_VISIBLE, disable MAPPED.
15286  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15287  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15288  {
15289  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15290  }
15291 
15292  return createInfo.pool->m_BlockVector.Allocate(
15293  m_CurrentFrameIndex.load(),
15294  vkMemReq.size,
15295  alignmentForPool,
15296  createInfoForPool,
15297  suballocType,
15298  allocationCount,
15299  pAllocations);
15300  }
15301  else
15302  {
15303  // Bit mask of memory Vulkan types acceptable for this allocation.
15304  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15305  uint32_t memTypeIndex = UINT32_MAX;
15306  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15307  if(res == VK_SUCCESS)
15308  {
15309  VkDeviceSize alignmentForMemType = VMA_MAX(
15310  vkMemReq.alignment,
15311  GetMemoryTypeMinAlignment(memTypeIndex));
15312 
15313  res = AllocateMemoryOfType(
15314  vkMemReq.size,
15315  alignmentForMemType,
15316  requiresDedicatedAllocation || prefersDedicatedAllocation,
15317  dedicatedBuffer,
15318  dedicatedImage,
15319  createInfo,
15320  memTypeIndex,
15321  suballocType,
15322  allocationCount,
15323  pAllocations);
15324  // Succeeded on first try.
15325  if(res == VK_SUCCESS)
15326  {
15327  return res;
15328  }
15329  // Allocation from this memory type failed. Try other compatible memory types.
15330  else
15331  {
15332  for(;;)
15333  {
15334  // Remove old memTypeIndex from list of possibilities.
15335  memoryTypeBits &= ~(1u << memTypeIndex);
15336  // Find alternative memTypeIndex.
15337  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15338  if(res == VK_SUCCESS)
15339  {
15340  alignmentForMemType = VMA_MAX(
15341  vkMemReq.alignment,
15342  GetMemoryTypeMinAlignment(memTypeIndex));
15343 
15344  res = AllocateMemoryOfType(
15345  vkMemReq.size,
15346  alignmentForMemType,
15347  requiresDedicatedAllocation || prefersDedicatedAllocation,
15348  dedicatedBuffer,
15349  dedicatedImage,
15350  createInfo,
15351  memTypeIndex,
15352  suballocType,
15353  allocationCount,
15354  pAllocations);
15355  // Allocation from this alternative memory type succeeded.
15356  if(res == VK_SUCCESS)
15357  {
15358  return res;
15359  }
15360  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15361  }
15362  // No other matching memory type index could be found.
15363  else
15364  {
15365  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15366  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15367  }
15368  }
15369  }
15370  }
15371  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15372  else
15373  return res;
15374  }
15375 }
15376 
15377 void VmaAllocator_T::FreeMemory(
15378  size_t allocationCount,
15379  const VmaAllocation* pAllocations)
15380 {
15381  VMA_ASSERT(pAllocations);
15382 
15383  for(size_t allocIndex = allocationCount; allocIndex--; )
15384  {
15385  VmaAllocation allocation = pAllocations[allocIndex];
15386 
15387  if(allocation != VK_NULL_HANDLE)
15388  {
15389  if(TouchAllocation(allocation))
15390  {
15391  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15392  {
15393  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15394  }
15395 
15396  switch(allocation->GetType())
15397  {
15398  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15399  {
15400  VmaBlockVector* pBlockVector = VMA_NULL;
15401  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15402  if(hPool != VK_NULL_HANDLE)
15403  {
15404  pBlockVector = &hPool->m_BlockVector;
15405  }
15406  else
15407  {
15408  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15409  pBlockVector = m_pBlockVectors[memTypeIndex];
15410  }
15411  pBlockVector->Free(allocation);
15412  }
15413  break;
15414  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15415  FreeDedicatedMemory(allocation);
15416  break;
15417  default:
15418  VMA_ASSERT(0);
15419  }
15420  }
15421 
15422  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
15423  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15424  allocation->SetUserData(this, VMA_NULL);
15425  allocation->Dtor();
15426  m_AllocationObjectAllocator.Free(allocation);
15427  }
15428  }
15429 }
15430 
15431 VkResult VmaAllocator_T::ResizeAllocation(
15432  const VmaAllocation alloc,
15433  VkDeviceSize newSize)
15434 {
15435  // This function is deprecated and so it does nothing. It's left for backward compatibility.
15436  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
15437  {
15438  return VK_ERROR_VALIDATION_FAILED_EXT;
15439  }
15440  if(newSize == alloc->GetSize())
15441  {
15442  return VK_SUCCESS;
15443  }
15444  return VK_ERROR_OUT_OF_POOL_MEMORY;
15445 }
15446 
15447 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15448 {
15449  // Initialize.
15450  InitStatInfo(pStats->total);
15451  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15452  InitStatInfo(pStats->memoryType[i]);
15453  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15454  InitStatInfo(pStats->memoryHeap[i]);
15455 
15456  // Process default pools.
15457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15458  {
15459  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15460  VMA_ASSERT(pBlockVector);
15461  pBlockVector->AddStats(pStats);
15462  }
15463 
15464  // Process custom pools.
15465  {
15466  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15467  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15468  {
15469  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15470  }
15471  }
15472 
15473  // Process dedicated allocations.
15474  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15475  {
15476  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15477  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15478  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15479  VMA_ASSERT(pDedicatedAllocVector);
15480  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15481  {
15482  VmaStatInfo allocationStatInfo;
15483  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15484  VmaAddStatInfo(pStats->total, allocationStatInfo);
15485  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15486  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15487  }
15488  }
15489 
15490  // Postprocess.
15491  VmaPostprocessCalcStatInfo(pStats->total);
15492  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15493  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15494  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15495  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15496 }
15497 
15498 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
15499 {
15500 #if VMA_MEMORY_BUDGET
15501  if(m_UseExtMemoryBudget)
15502  {
15503  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15504  {
15505  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15506  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15507  {
15508  const uint32_t heapIndex = firstHeap + i;
15509 
15510  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15511  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15512 
15513  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15514  {
15515  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
15516  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15517  }
15518  else
15519  {
15520  outBudget->usage = 0;
15521  }
15522 
15523  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15524  outBudget->budget = VMA_MIN(
15525  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15526  }
15527  }
15528  else
15529  {
15530  UpdateVulkanBudget(); // Outside of mutex lock
15531  GetBudget(outBudget, firstHeap, heapCount); // Recursion
15532  }
15533  }
15534  else
15535 #endif
15536  {
15537  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15538  {
15539  const uint32_t heapIndex = firstHeap + i;
15540 
15541  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15542  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15543 
15544  outBudget->usage = outBudget->blockBytes;
15545  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15546  }
15547  }
15548 }
15549 
15550 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15551 
15552 VkResult VmaAllocator_T::DefragmentationBegin(
15553  const VmaDefragmentationInfo2& info,
15554  VmaDefragmentationStats* pStats,
15555  VmaDefragmentationContext* pContext)
15556 {
15557  if(info.pAllocationsChanged != VMA_NULL)
15558  {
15559  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15560  }
15561 
15562  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15563  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15564 
15565  (*pContext)->AddPools(info.poolCount, info.pPools);
15566  (*pContext)->AddAllocations(
15568 
15569  VkResult res = (*pContext)->Defragment(
15572  info.commandBuffer, pStats);
15573 
15574  if(res != VK_NOT_READY)
15575  {
15576  vma_delete(this, *pContext);
15577  *pContext = VMA_NULL;
15578  }
15579 
15580  return res;
15581 }
15582 
15583 VkResult VmaAllocator_T::DefragmentationEnd(
15584  VmaDefragmentationContext context)
15585 {
15586  vma_delete(this, context);
15587  return VK_SUCCESS;
15588 }
15589 
15590 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15591 {
15592  if(hAllocation->CanBecomeLost())
15593  {
15594  /*
15595  Warning: This is a carefully designed algorithm.
15596  Do not modify unless you really know what you're doing :)
15597  */
15598  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15599  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15600  for(;;)
15601  {
15602  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15603  {
15604  pAllocationInfo->memoryType = UINT32_MAX;
15605  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15606  pAllocationInfo->offset = 0;
15607  pAllocationInfo->size = hAllocation->GetSize();
15608  pAllocationInfo->pMappedData = VMA_NULL;
15609  pAllocationInfo->pUserData = hAllocation->GetUserData();
15610  return;
15611  }
15612  else if(localLastUseFrameIndex == localCurrFrameIndex)
15613  {
15614  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15615  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15616  pAllocationInfo->offset = hAllocation->GetOffset();
15617  pAllocationInfo->size = hAllocation->GetSize();
15618  pAllocationInfo->pMappedData = VMA_NULL;
15619  pAllocationInfo->pUserData = hAllocation->GetUserData();
15620  return;
15621  }
15622  else // Last use time earlier than current time.
15623  {
15624  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15625  {
15626  localLastUseFrameIndex = localCurrFrameIndex;
15627  }
15628  }
15629  }
15630  }
15631  else
15632  {
15633 #if VMA_STATS_STRING_ENABLED
15634  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15635  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15636  for(;;)
15637  {
15638  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15639  if(localLastUseFrameIndex == localCurrFrameIndex)
15640  {
15641  break;
15642  }
15643  else // Last use time earlier than current time.
15644  {
15645  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15646  {
15647  localLastUseFrameIndex = localCurrFrameIndex;
15648  }
15649  }
15650  }
15651 #endif
15652 
15653  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15654  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15655  pAllocationInfo->offset = hAllocation->GetOffset();
15656  pAllocationInfo->size = hAllocation->GetSize();
15657  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15658  pAllocationInfo->pUserData = hAllocation->GetUserData();
15659  }
15660 }
15661 
15662 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15663 {
15664  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15665  if(hAllocation->CanBecomeLost())
15666  {
15667  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15668  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15669  for(;;)
15670  {
15671  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15672  {
15673  return false;
15674  }
15675  else if(localLastUseFrameIndex == localCurrFrameIndex)
15676  {
15677  return true;
15678  }
15679  else // Last use time earlier than current time.
15680  {
15681  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15682  {
15683  localLastUseFrameIndex = localCurrFrameIndex;
15684  }
15685  }
15686  }
15687  }
15688  else
15689  {
15690 #if VMA_STATS_STRING_ENABLED
15691  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15692  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15693  for(;;)
15694  {
15695  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15696  if(localLastUseFrameIndex == localCurrFrameIndex)
15697  {
15698  break;
15699  }
15700  else // Last use time earlier than current time.
15701  {
15702  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15703  {
15704  localLastUseFrameIndex = localCurrFrameIndex;
15705  }
15706  }
15707  }
15708 #endif
15709 
15710  return true;
15711  }
15712 }
15713 
15714 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15715 {
15716  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15717 
15718  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15719 
15720  if(newCreateInfo.maxBlockCount == 0)
15721  {
15722  newCreateInfo.maxBlockCount = SIZE_MAX;
15723  }
15724  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15725  {
15726  return VK_ERROR_INITIALIZATION_FAILED;
15727  }
15728 
15729  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15730 
15731  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15732 
15733  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15734  if(res != VK_SUCCESS)
15735  {
15736  vma_delete(this, *pPool);
15737  *pPool = VMA_NULL;
15738  return res;
15739  }
15740 
15741  // Add to m_Pools.
15742  {
15743  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15744  (*pPool)->SetId(m_NextPoolId++);
15745  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15746  }
15747 
15748  return VK_SUCCESS;
15749 }
15750 
15751 void VmaAllocator_T::DestroyPool(VmaPool pool)
15752 {
15753  // Remove from m_Pools.
15754  {
15755  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15756  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15757  VMA_ASSERT(success && "Pool not found in Allocator.");
15758  }
15759 
15760  vma_delete(this, pool);
15761 }
15762 
15763 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15764 {
15765  pool->m_BlockVector.GetPoolStats(pPoolStats);
15766 }
15767 
15768 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15769 {
15770  m_CurrentFrameIndex.store(frameIndex);
15771 
15772 #if VMA_MEMORY_BUDGET
15773  if(m_UseExtMemoryBudget)
15774  {
15775  UpdateVulkanBudget();
15776  }
15777 #endif // #if VMA_MEMORY_BUDGET
15778 }
15779 
15780 void VmaAllocator_T::MakePoolAllocationsLost(
15781  VmaPool hPool,
15782  size_t* pLostAllocationCount)
15783 {
15784  hPool->m_BlockVector.MakePoolAllocationsLost(
15785  m_CurrentFrameIndex.load(),
15786  pLostAllocationCount);
15787 }
15788 
15789 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15790 {
15791  return hPool->m_BlockVector.CheckCorruption();
15792 }
15793 
15794 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15795 {
15796  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15797 
15798  // Process default pools.
15799  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15800  {
15801  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15802  {
15803  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15804  VMA_ASSERT(pBlockVector);
15805  VkResult localRes = pBlockVector->CheckCorruption();
15806  switch(localRes)
15807  {
15808  case VK_ERROR_FEATURE_NOT_PRESENT:
15809  break;
15810  case VK_SUCCESS:
15811  finalRes = VK_SUCCESS;
15812  break;
15813  default:
15814  return localRes;
15815  }
15816  }
15817  }
15818 
15819  // Process custom pools.
15820  {
15821  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15822  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15823  {
15824  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15825  {
15826  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15827  switch(localRes)
15828  {
15829  case VK_ERROR_FEATURE_NOT_PRESENT:
15830  break;
15831  case VK_SUCCESS:
15832  finalRes = VK_SUCCESS;
15833  break;
15834  default:
15835  return localRes;
15836  }
15837  }
15838  }
15839  }
15840 
15841  return finalRes;
15842 }
15843 
15844 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15845 {
15846  *pAllocation = m_AllocationObjectAllocator.Allocate();
15847  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15848  (*pAllocation)->InitLost();
15849 }
15850 
15851 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15852 {
15853  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15854 
15855  // HeapSizeLimit is in effect for this heap.
15856  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15857  {
15858  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15859  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15860  for(;;)
15861  {
15862  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15863  if(blockBytesAfterAllocation > heapSize)
15864  {
15865  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15866  }
15867  if(m_Budget.m_BlockBytes->compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15868  {
15869  break;
15870  }
15871  }
15872  }
15873  else
15874  {
15875  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15876  }
15877 
15878  // VULKAN CALL vkAllocateMemory.
15879  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15880 
15881  if(res == VK_SUCCESS)
15882  {
15883 #if VMA_MEMORY_BUDGET
15884  ++m_Budget.m_OperationsSinceBudgetFetch;
15885 #endif
15886 
15887  // Informative callback.
15888  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15889  {
15890  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15891  }
15892  }
15893  else
15894  {
15895  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15896  }
15897 
15898  return res;
15899 }
15900 
15901 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15902 {
15903  // Informative callback.
15904  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15905  {
15906  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15907  }
15908 
15909  // VULKAN CALL vkFreeMemory.
15910  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15911 
15912  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
15913 }
15914 
15915 VkResult VmaAllocator_T::BindVulkanBuffer(
15916  VkDeviceMemory memory,
15917  VkDeviceSize memoryOffset,
15918  VkBuffer buffer,
15919  const void* pNext)
15920 {
15921  if(pNext != VMA_NULL)
15922  {
15923 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15924  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15925  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15926  {
15927  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15928  bindBufferMemoryInfo.pNext = pNext;
15929  bindBufferMemoryInfo.buffer = buffer;
15930  bindBufferMemoryInfo.memory = memory;
15931  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15932  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15933  }
15934  else
15935 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15936  {
15937  return VK_ERROR_EXTENSION_NOT_PRESENT;
15938  }
15939  }
15940  else
15941  {
15942  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15943  }
15944 }
15945 
15946 VkResult VmaAllocator_T::BindVulkanImage(
15947  VkDeviceMemory memory,
15948  VkDeviceSize memoryOffset,
15949  VkImage image,
15950  const void* pNext)
15951 {
15952  if(pNext != VMA_NULL)
15953  {
15954 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15955  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15956  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15957  {
15958  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15959  bindBufferMemoryInfo.pNext = pNext;
15960  bindBufferMemoryInfo.image = image;
15961  bindBufferMemoryInfo.memory = memory;
15962  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15963  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15964  }
15965  else
15966 #endif // #if VMA_BIND_MEMORY2
15967  {
15968  return VK_ERROR_EXTENSION_NOT_PRESENT;
15969  }
15970  }
15971  else
15972  {
15973  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15974  }
15975 }
15976 
15977 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15978 {
15979  if(hAllocation->CanBecomeLost())
15980  {
15981  return VK_ERROR_MEMORY_MAP_FAILED;
15982  }
15983 
15984  switch(hAllocation->GetType())
15985  {
15986  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15987  {
15988  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15989  char *pBytes = VMA_NULL;
15990  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15991  if(res == VK_SUCCESS)
15992  {
15993  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15994  hAllocation->BlockAllocMap();
15995  }
15996  return res;
15997  }
15998  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15999  return hAllocation->DedicatedAllocMap(this, ppData);
16000  default:
16001  VMA_ASSERT(0);
16002  return VK_ERROR_MEMORY_MAP_FAILED;
16003  }
16004 }
16005 
16006 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16007 {
16008  switch(hAllocation->GetType())
16009  {
16010  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16011  {
16012  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16013  hAllocation->BlockAllocUnmap();
16014  pBlock->Unmap(this, 1);
16015  }
16016  break;
16017  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16018  hAllocation->DedicatedAllocUnmap(this);
16019  break;
16020  default:
16021  VMA_ASSERT(0);
16022  }
16023 }
16024 
16025 VkResult VmaAllocator_T::BindBufferMemory(
16026  VmaAllocation hAllocation,
16027  VkDeviceSize allocationLocalOffset,
16028  VkBuffer hBuffer,
16029  const void* pNext)
16030 {
16031  VkResult res = VK_SUCCESS;
16032  switch(hAllocation->GetType())
16033  {
16034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16035  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16036  break;
16037  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16038  {
16039  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16040  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16041  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16042  break;
16043  }
16044  default:
16045  VMA_ASSERT(0);
16046  }
16047  return res;
16048 }
16049 
16050 VkResult VmaAllocator_T::BindImageMemory(
16051  VmaAllocation hAllocation,
16052  VkDeviceSize allocationLocalOffset,
16053  VkImage hImage,
16054  const void* pNext)
16055 {
16056  VkResult res = VK_SUCCESS;
16057  switch(hAllocation->GetType())
16058  {
16059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16060  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16061  break;
16062  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16063  {
16064  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16065  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16066  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16067  break;
16068  }
16069  default:
16070  VMA_ASSERT(0);
16071  }
16072  return res;
16073 }
16074 
16075 void VmaAllocator_T::FlushOrInvalidateAllocation(
16076  VmaAllocation hAllocation,
16077  VkDeviceSize offset, VkDeviceSize size,
16078  VMA_CACHE_OPERATION op)
16079 {
16080  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16081  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16082  {
16083  const VkDeviceSize allocationSize = hAllocation->GetSize();
16084  VMA_ASSERT(offset <= allocationSize);
16085 
16086  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16087 
16088  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16089  memRange.memory = hAllocation->GetMemory();
16090 
16091  switch(hAllocation->GetType())
16092  {
16093  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16094  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16095  if(size == VK_WHOLE_SIZE)
16096  {
16097  memRange.size = allocationSize - memRange.offset;
16098  }
16099  else
16100  {
16101  VMA_ASSERT(offset + size <= allocationSize);
16102  memRange.size = VMA_MIN(
16103  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16104  allocationSize - memRange.offset);
16105  }
16106  break;
16107 
16108  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16109  {
16110  // 1. Still within this allocation.
16111  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16112  if(size == VK_WHOLE_SIZE)
16113  {
16114  size = allocationSize - offset;
16115  }
16116  else
16117  {
16118  VMA_ASSERT(offset + size <= allocationSize);
16119  }
16120  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16121 
16122  // 2. Adjust to whole block.
16123  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16124  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16125  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16126  memRange.offset += allocationOffset;
16127  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16128 
16129  break;
16130  }
16131 
16132  default:
16133  VMA_ASSERT(0);
16134  }
16135 
16136  switch(op)
16137  {
16138  case VMA_CACHE_FLUSH:
16139  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16140  break;
16141  case VMA_CACHE_INVALIDATE:
16142  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16143  break;
16144  default:
16145  VMA_ASSERT(0);
16146  }
16147  }
16148  // else: Just ignore this call.
16149 }
16150 
16151 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16152 {
16153  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16154 
16155  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16156  {
16157  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16158  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16159  VMA_ASSERT(pDedicatedAllocations);
16160  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16161  VMA_ASSERT(success);
16162  }
16163 
16164  VkDeviceMemory hMemory = allocation->GetMemory();
16165 
16166  /*
16167  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16168  before vkFreeMemory.
16169 
16170  if(allocation->GetMappedData() != VMA_NULL)
16171  {
16172  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16173  }
16174  */
16175 
16176  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16177 
16178  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16179 }
16180 
16181 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16182 {
16183  VkBufferCreateInfo dummyBufCreateInfo;
16184  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16185 
16186  uint32_t memoryTypeBits = 0;
16187 
16188  // Create buffer.
16189  VkBuffer buf = VK_NULL_HANDLE;
16190  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16191  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16192  if(res == VK_SUCCESS)
16193  {
16194  // Query for supported memory types.
16195  VkMemoryRequirements memReq;
16196  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16197  memoryTypeBits = memReq.memoryTypeBits;
16198 
16199  // Destroy buffer.
16200  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16201  }
16202 
16203  return memoryTypeBits;
16204 }
16205 
16206 #if VMA_MEMORY_BUDGET
16207 
16208 void VmaAllocator_T::UpdateVulkanBudget()
16209 {
16210  VMA_ASSERT(m_UseExtMemoryBudget);
16211 
16212  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16213 
16214  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16215  memProps.pNext = &budgetProps;
16216 
16217  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16218 
16219  {
16220  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16221 
16222  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16223  {
16224  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16225  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16226  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16227  }
16228  m_Budget.m_OperationsSinceBudgetFetch = 0;
16229  }
16230 }
16231 
16232 #endif // #if VMA_MEMORY_BUDGET
16233 
16234 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16235 {
16236  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16237  !hAllocation->CanBecomeLost() &&
16238  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16239  {
16240  void* pData = VMA_NULL;
16241  VkResult res = Map(hAllocation, &pData);
16242  if(res == VK_SUCCESS)
16243  {
16244  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16245  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16246  Unmap(hAllocation);
16247  }
16248  else
16249  {
16250  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16251  }
16252  }
16253 }
16254 
16255 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16256 {
16257  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16258  if(memoryTypeBits == UINT32_MAX)
16259  {
16260  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16261  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16262  }
16263  return memoryTypeBits;
16264 }
16265 
16266 #if VMA_STATS_STRING_ENABLED
16267 
16268 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16269 {
16270  bool dedicatedAllocationsStarted = false;
16271  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16272  {
16273  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16274  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16275  VMA_ASSERT(pDedicatedAllocVector);
16276  if(pDedicatedAllocVector->empty() == false)
16277  {
16278  if(dedicatedAllocationsStarted == false)
16279  {
16280  dedicatedAllocationsStarted = true;
16281  json.WriteString("DedicatedAllocations");
16282  json.BeginObject();
16283  }
16284 
16285  json.BeginString("Type ");
16286  json.ContinueString(memTypeIndex);
16287  json.EndString();
16288 
16289  json.BeginArray();
16290 
16291  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16292  {
16293  json.BeginObject(true);
16294  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16295  hAlloc->PrintParameters(json);
16296  json.EndObject();
16297  }
16298 
16299  json.EndArray();
16300  }
16301  }
16302  if(dedicatedAllocationsStarted)
16303  {
16304  json.EndObject();
16305  }
16306 
16307  {
16308  bool allocationsStarted = false;
16309  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16310  {
16311  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16312  {
16313  if(allocationsStarted == false)
16314  {
16315  allocationsStarted = true;
16316  json.WriteString("DefaultPools");
16317  json.BeginObject();
16318  }
16319 
16320  json.BeginString("Type ");
16321  json.ContinueString(memTypeIndex);
16322  json.EndString();
16323 
16324  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16325  }
16326  }
16327  if(allocationsStarted)
16328  {
16329  json.EndObject();
16330  }
16331  }
16332 
16333  // Custom pools
16334  {
16335  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16336  const size_t poolCount = m_Pools.size();
16337  if(poolCount > 0)
16338  {
16339  json.WriteString("Pools");
16340  json.BeginObject();
16341  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16342  {
16343  json.BeginString();
16344  json.ContinueString(m_Pools[poolIndex]->GetId());
16345  json.EndString();
16346 
16347  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16348  }
16349  json.EndObject();
16350  }
16351  }
16352 }
16353 
16354 #endif // #if VMA_STATS_STRING_ENABLED
16355 
16357 // Public interface
16358 
16359 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16360  const VmaAllocatorCreateInfo* pCreateInfo,
16361  VmaAllocator* pAllocator)
16362 {
16363  VMA_ASSERT(pCreateInfo && pAllocator);
16364  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16365  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
16366  VMA_DEBUG_LOG("vmaCreateAllocator");
16367  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16368  return (*pAllocator)->Init(pCreateInfo);
16369 }
16370 
16371 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16372  VmaAllocator allocator)
16373 {
16374  if(allocator != VK_NULL_HANDLE)
16375  {
16376  VMA_DEBUG_LOG("vmaDestroyAllocator");
16377  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
16378  vma_delete(&allocationCallbacks, allocator);
16379  }
16380 }
16381 
16382 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16383  VmaAllocator allocator,
16384  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16385 {
16386  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16387  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16388 }
16389 
16390 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16391  VmaAllocator allocator,
16392  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16393 {
16394  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16395  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16396 }
16397 
16398 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16399  VmaAllocator allocator,
16400  uint32_t memoryTypeIndex,
16401  VkMemoryPropertyFlags* pFlags)
16402 {
16403  VMA_ASSERT(allocator && pFlags);
16404  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16405  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16406 }
16407 
16408 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16409  VmaAllocator allocator,
16410  uint32_t frameIndex)
16411 {
16412  VMA_ASSERT(allocator);
16413  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
16414 
16415  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16416 
16417  allocator->SetCurrentFrameIndex(frameIndex);
16418 }
16419 
16420 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
16421  VmaAllocator allocator,
16422  VmaStats* pStats)
16423 {
16424  VMA_ASSERT(allocator && pStats);
16425  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16426  allocator->CalculateStats(pStats);
16427 }
16428 
16429 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
16430  VmaAllocator allocator,
16431  VmaBudget* pBudget)
16432 {
16433  VMA_ASSERT(allocator && pBudget);
16434  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16435  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
16436 }
16437 
16438 #if VMA_STATS_STRING_ENABLED
16439 
16440 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16441  VmaAllocator allocator,
16442  char** ppStatsString,
16443  VkBool32 detailedMap)
16444 {
16445  VMA_ASSERT(allocator && ppStatsString);
16446  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16447 
16448  VmaStringBuilder sb(allocator);
16449  {
16450  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16451  json.BeginObject();
16452 
16453  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
16454  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
16455 
16456  VmaStats stats;
16457  allocator->CalculateStats(&stats);
16458 
16459  json.WriteString("Total");
16460  VmaPrintStatInfo(json, stats.total);
16461 
16462  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16463  {
16464  json.BeginString("Heap ");
16465  json.ContinueString(heapIndex);
16466  json.EndString();
16467  json.BeginObject();
16468 
16469  json.WriteString("Size");
16470  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
16471 
16472  json.WriteString("Flags");
16473  json.BeginArray(true);
16474  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
16475  {
16476  json.WriteString("DEVICE_LOCAL");
16477  }
16478  json.EndArray();
16479 
16480  json.WriteString("Budget");
16481  json.BeginObject();
16482  {
16483  json.WriteString("BlockBytes");
16484  json.WriteNumber(budget[heapIndex].blockBytes);
16485  json.WriteString("AllocationBytes");
16486  json.WriteNumber(budget[heapIndex].allocationBytes);
16487  json.WriteString("Usage");
16488  json.WriteNumber(budget[heapIndex].usage);
16489  json.WriteString("Budget");
16490  json.WriteNumber(budget[heapIndex].budget);
16491  }
16492  json.EndObject();
16493 
16494  if(stats.memoryHeap[heapIndex].blockCount > 0)
16495  {
16496  json.WriteString("Stats");
16497  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
16498  }
16499 
16500  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16501  {
16502  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16503  {
16504  json.BeginString("Type ");
16505  json.ContinueString(typeIndex);
16506  json.EndString();
16507 
16508  json.BeginObject();
16509 
16510  json.WriteString("Flags");
16511  json.BeginArray(true);
16512  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16513  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
16514  {
16515  json.WriteString("DEVICE_LOCAL");
16516  }
16517  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16518  {
16519  json.WriteString("HOST_VISIBLE");
16520  }
16521  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
16522  {
16523  json.WriteString("HOST_COHERENT");
16524  }
16525  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
16526  {
16527  json.WriteString("HOST_CACHED");
16528  }
16529  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
16530  {
16531  json.WriteString("LAZILY_ALLOCATED");
16532  }
16533  json.EndArray();
16534 
16535  if(stats.memoryType[typeIndex].blockCount > 0)
16536  {
16537  json.WriteString("Stats");
16538  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
16539  }
16540 
16541  json.EndObject();
16542  }
16543  }
16544 
16545  json.EndObject();
16546  }
16547  if(detailedMap == VK_TRUE)
16548  {
16549  allocator->PrintDetailedMap(json);
16550  }
16551 
16552  json.EndObject();
16553  }
16554 
16555  const size_t len = sb.GetLength();
16556  char* const pChars = vma_new_array(allocator, char, len + 1);
16557  if(len > 0)
16558  {
16559  memcpy(pChars, sb.GetData(), len);
16560  }
16561  pChars[len] = '\0';
16562  *ppStatsString = pChars;
16563 }
16564 
16565 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
16566  VmaAllocator allocator,
16567  char* pStatsString)
16568 {
16569  if(pStatsString != VMA_NULL)
16570  {
16571  VMA_ASSERT(allocator);
16572  size_t len = strlen(pStatsString);
16573  vma_delete_array(allocator, pStatsString, len + 1);
16574  }
16575 }
16576 
16577 #endif // #if VMA_STATS_STRING_ENABLED
16578 
16579 /*
16580 This function is not protected by any mutex because it just reads immutable data.
16581 */
16582 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
16583  VmaAllocator allocator,
16584  uint32_t memoryTypeBits,
16585  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16586  uint32_t* pMemoryTypeIndex)
16587 {
16588  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16589  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16590  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16591 
16592  if(pAllocationCreateInfo->memoryTypeBits != 0)
16593  {
16594  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
16595  }
16596 
16597  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
16598  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
16599  uint32_t notPreferredFlags = 0;
16600 
16601  // Convert usage to requiredFlags and preferredFlags.
16602  switch(pAllocationCreateInfo->usage)
16603  {
16605  break;
16607  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16608  {
16609  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16610  }
16611  break;
16613  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
16614  break;
16616  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16617  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16618  {
16619  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16620  }
16621  break;
16623  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16624  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16625  break;
16627  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16628  break;
16630  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
16631  break;
16632  default:
16633  VMA_ASSERT(0);
16634  break;
16635  }
16636 
16637  *pMemoryTypeIndex = UINT32_MAX;
16638  uint32_t minCost = UINT32_MAX;
16639  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16640  memTypeIndex < allocator->GetMemoryTypeCount();
16641  ++memTypeIndex, memTypeBit <<= 1)
16642  {
16643  // This memory type is acceptable according to memoryTypeBits bitmask.
16644  if((memTypeBit & memoryTypeBits) != 0)
16645  {
16646  const VkMemoryPropertyFlags currFlags =
16647  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16648  // This memory type contains requiredFlags.
16649  if((requiredFlags & ~currFlags) == 0)
16650  {
16651  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16652  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
16653  VmaCountBitsSet(currFlags & notPreferredFlags);
16654  // Remember memory type with lowest cost.
16655  if(currCost < minCost)
16656  {
16657  *pMemoryTypeIndex = memTypeIndex;
16658  if(currCost == 0)
16659  {
16660  return VK_SUCCESS;
16661  }
16662  minCost = currCost;
16663  }
16664  }
16665  }
16666  }
16667  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16668 }
16669 
16670 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
16671  VmaAllocator allocator,
16672  const VkBufferCreateInfo* pBufferCreateInfo,
16673  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16674  uint32_t* pMemoryTypeIndex)
16675 {
16676  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16677  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16678  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16679  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16680 
16681  const VkDevice hDev = allocator->m_hDevice;
16682  VkBuffer hBuffer = VK_NULL_HANDLE;
16683  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16684  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16685  if(res == VK_SUCCESS)
16686  {
16687  VkMemoryRequirements memReq = {};
16688  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16689  hDev, hBuffer, &memReq);
16690 
16691  res = vmaFindMemoryTypeIndex(
16692  allocator,
16693  memReq.memoryTypeBits,
16694  pAllocationCreateInfo,
16695  pMemoryTypeIndex);
16696 
16697  allocator->GetVulkanFunctions().vkDestroyBuffer(
16698  hDev, hBuffer, allocator->GetAllocationCallbacks());
16699  }
16700  return res;
16701 }
16702 
16703 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
16704  VmaAllocator allocator,
16705  const VkImageCreateInfo* pImageCreateInfo,
16706  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16707  uint32_t* pMemoryTypeIndex)
16708 {
16709  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16710  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16713 
16714  const VkDevice hDev = allocator->m_hDevice;
16715  VkImage hImage = VK_NULL_HANDLE;
16716  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16717  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16718  if(res == VK_SUCCESS)
16719  {
16720  VkMemoryRequirements memReq = {};
16721  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16722  hDev, hImage, &memReq);
16723 
16724  res = vmaFindMemoryTypeIndex(
16725  allocator,
16726  memReq.memoryTypeBits,
16727  pAllocationCreateInfo,
16728  pMemoryTypeIndex);
16729 
16730  allocator->GetVulkanFunctions().vkDestroyImage(
16731  hDev, hImage, allocator->GetAllocationCallbacks());
16732  }
16733  return res;
16734 }
16735 
16736 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
16737  VmaAllocator allocator,
16738  const VmaPoolCreateInfo* pCreateInfo,
16739  VmaPool* pPool)
16740 {
16741  VMA_ASSERT(allocator && pCreateInfo && pPool);
16742 
16743  VMA_DEBUG_LOG("vmaCreatePool");
16744 
16745  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16746 
16747  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16748 
16749 #if VMA_RECORDING_ENABLED
16750  if(allocator->GetRecorder() != VMA_NULL)
16751  {
16752  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16753  }
16754 #endif
16755 
16756  return res;
16757 }
16758 
16759 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
16760  VmaAllocator allocator,
16761  VmaPool pool)
16762 {
16763  VMA_ASSERT(allocator);
16764 
16765  if(pool == VK_NULL_HANDLE)
16766  {
16767  return;
16768  }
16769 
16770  VMA_DEBUG_LOG("vmaDestroyPool");
16771 
16772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16773 
16774 #if VMA_RECORDING_ENABLED
16775  if(allocator->GetRecorder() != VMA_NULL)
16776  {
16777  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16778  }
16779 #endif
16780 
16781  allocator->DestroyPool(pool);
16782 }
16783 
16784 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
16785  VmaAllocator allocator,
16786  VmaPool pool,
16787  VmaPoolStats* pPoolStats)
16788 {
16789  VMA_ASSERT(allocator && pool && pPoolStats);
16790 
16791  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16792 
16793  allocator->GetPoolStats(pool, pPoolStats);
16794 }
16795 
16796 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
16797  VmaAllocator allocator,
16798  VmaPool pool,
16799  size_t* pLostAllocationCount)
16800 {
16801  VMA_ASSERT(allocator && pool);
16802 
16803  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16804 
16805 #if VMA_RECORDING_ENABLED
16806  if(allocator->GetRecorder() != VMA_NULL)
16807  {
16808  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16809  }
16810 #endif
16811 
16812  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16813 }
16814 
16815 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16816 {
16817  VMA_ASSERT(allocator && pool);
16818 
16819  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16820 
16821  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16822 
16823  return allocator->CheckPoolCorruption(pool);
16824 }
16825 
16826 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
16827  VmaAllocator allocator,
16828  VmaPool pool,
16829  const char** ppName)
16830 {
16831  VMA_ASSERT(allocator && pool);
16832 
16833  VMA_DEBUG_LOG("vmaGetPoolName");
16834 
16835  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16836 
16837  *ppName = pool->GetName();
16838 }
16839 
16840 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
16841  VmaAllocator allocator,
16842  VmaPool pool,
16843  const char* pName)
16844 {
16845  VMA_ASSERT(allocator && pool);
16846 
16847  VMA_DEBUG_LOG("vmaSetPoolName");
16848 
16849  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16850 
16851  pool->SetName(pName);
16852 
16853 #if VMA_RECORDING_ENABLED
16854  if(allocator->GetRecorder() != VMA_NULL)
16855  {
16856  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
16857  }
16858 #endif
16859 }
16860 
16861 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
16862  VmaAllocator allocator,
16863  const VkMemoryRequirements* pVkMemoryRequirements,
16864  const VmaAllocationCreateInfo* pCreateInfo,
16865  VmaAllocation* pAllocation,
16866  VmaAllocationInfo* pAllocationInfo)
16867 {
16868  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16869 
16870  VMA_DEBUG_LOG("vmaAllocateMemory");
16871 
16872  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16873 
16874  VkResult result = allocator->AllocateMemory(
16875  *pVkMemoryRequirements,
16876  false, // requiresDedicatedAllocation
16877  false, // prefersDedicatedAllocation
16878  VK_NULL_HANDLE, // dedicatedBuffer
16879  VK_NULL_HANDLE, // dedicatedImage
16880  *pCreateInfo,
16881  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16882  1, // allocationCount
16883  pAllocation);
16884 
16885 #if VMA_RECORDING_ENABLED
16886  if(allocator->GetRecorder() != VMA_NULL)
16887  {
16888  allocator->GetRecorder()->RecordAllocateMemory(
16889  allocator->GetCurrentFrameIndex(),
16890  *pVkMemoryRequirements,
16891  *pCreateInfo,
16892  *pAllocation);
16893  }
16894 #endif
16895 
16896  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16897  {
16898  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16899  }
16900 
16901  return result;
16902 }
16903 
16904 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
16905  VmaAllocator allocator,
16906  const VkMemoryRequirements* pVkMemoryRequirements,
16907  const VmaAllocationCreateInfo* pCreateInfo,
16908  size_t allocationCount,
16909  VmaAllocation* pAllocations,
16910  VmaAllocationInfo* pAllocationInfo)
16911 {
16912  if(allocationCount == 0)
16913  {
16914  return VK_SUCCESS;
16915  }
16916 
16917  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16918 
16919  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16920 
16921  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16922 
16923  VkResult result = allocator->AllocateMemory(
16924  *pVkMemoryRequirements,
16925  false, // requiresDedicatedAllocation
16926  false, // prefersDedicatedAllocation
16927  VK_NULL_HANDLE, // dedicatedBuffer
16928  VK_NULL_HANDLE, // dedicatedImage
16929  *pCreateInfo,
16930  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16931  allocationCount,
16932  pAllocations);
16933 
16934 #if VMA_RECORDING_ENABLED
16935  if(allocator->GetRecorder() != VMA_NULL)
16936  {
16937  allocator->GetRecorder()->RecordAllocateMemoryPages(
16938  allocator->GetCurrentFrameIndex(),
16939  *pVkMemoryRequirements,
16940  *pCreateInfo,
16941  (uint64_t)allocationCount,
16942  pAllocations);
16943  }
16944 #endif
16945 
16946  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16947  {
16948  for(size_t i = 0; i < allocationCount; ++i)
16949  {
16950  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16951  }
16952  }
16953 
16954  return result;
16955 }
16956 
16957 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
16958  VmaAllocator allocator,
16959  VkBuffer buffer,
16960  const VmaAllocationCreateInfo* pCreateInfo,
16961  VmaAllocation* pAllocation,
16962  VmaAllocationInfo* pAllocationInfo)
16963 {
16964  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16965 
16966  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16967 
16968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16969 
16970  VkMemoryRequirements vkMemReq = {};
16971  bool requiresDedicatedAllocation = false;
16972  bool prefersDedicatedAllocation = false;
16973  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16974  requiresDedicatedAllocation,
16975  prefersDedicatedAllocation);
16976 
16977  VkResult result = allocator->AllocateMemory(
16978  vkMemReq,
16979  requiresDedicatedAllocation,
16980  prefersDedicatedAllocation,
16981  buffer, // dedicatedBuffer
16982  VK_NULL_HANDLE, // dedicatedImage
16983  *pCreateInfo,
16984  VMA_SUBALLOCATION_TYPE_BUFFER,
16985  1, // allocationCount
16986  pAllocation);
16987 
16988 #if VMA_RECORDING_ENABLED
16989  if(allocator->GetRecorder() != VMA_NULL)
16990  {
16991  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16992  allocator->GetCurrentFrameIndex(),
16993  vkMemReq,
16994  requiresDedicatedAllocation,
16995  prefersDedicatedAllocation,
16996  *pCreateInfo,
16997  *pAllocation);
16998  }
16999 #endif
17000 
17001  if(pAllocationInfo && result == VK_SUCCESS)
17002  {
17003  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17004  }
17005 
17006  return result;
17007 }
17008 
17009 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17010  VmaAllocator allocator,
17011  VkImage image,
17012  const VmaAllocationCreateInfo* pCreateInfo,
17013  VmaAllocation* pAllocation,
17014  VmaAllocationInfo* pAllocationInfo)
17015 {
17016  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17017 
17018  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17019 
17020  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17021 
17022  VkMemoryRequirements vkMemReq = {};
17023  bool requiresDedicatedAllocation = false;
17024  bool prefersDedicatedAllocation = false;
17025  allocator->GetImageMemoryRequirements(image, vkMemReq,
17026  requiresDedicatedAllocation, prefersDedicatedAllocation);
17027 
17028  VkResult result = allocator->AllocateMemory(
17029  vkMemReq,
17030  requiresDedicatedAllocation,
17031  prefersDedicatedAllocation,
17032  VK_NULL_HANDLE, // dedicatedBuffer
17033  image, // dedicatedImage
17034  *pCreateInfo,
17035  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17036  1, // allocationCount
17037  pAllocation);
17038 
17039 #if VMA_RECORDING_ENABLED
17040  if(allocator->GetRecorder() != VMA_NULL)
17041  {
17042  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17043  allocator->GetCurrentFrameIndex(),
17044  vkMemReq,
17045  requiresDedicatedAllocation,
17046  prefersDedicatedAllocation,
17047  *pCreateInfo,
17048  *pAllocation);
17049  }
17050 #endif
17051 
17052  if(pAllocationInfo && result == VK_SUCCESS)
17053  {
17054  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17055  }
17056 
17057  return result;
17058 }
17059 
17060 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17061  VmaAllocator allocator,
17062  VmaAllocation allocation)
17063 {
17064  VMA_ASSERT(allocator);
17065 
17066  if(allocation == VK_NULL_HANDLE)
17067  {
17068  return;
17069  }
17070 
17071  VMA_DEBUG_LOG("vmaFreeMemory");
17072 
17073  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17074 
17075 #if VMA_RECORDING_ENABLED
17076  if(allocator->GetRecorder() != VMA_NULL)
17077  {
17078  allocator->GetRecorder()->RecordFreeMemory(
17079  allocator->GetCurrentFrameIndex(),
17080  allocation);
17081  }
17082 #endif
17083 
17084  allocator->FreeMemory(
17085  1, // allocationCount
17086  &allocation);
17087 }
17088 
17089 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17090  VmaAllocator allocator,
17091  size_t allocationCount,
17092  VmaAllocation* pAllocations)
17093 {
17094  if(allocationCount == 0)
17095  {
17096  return;
17097  }
17098 
17099  VMA_ASSERT(allocator);
17100 
17101  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17102 
17103  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17104 
17105 #if VMA_RECORDING_ENABLED
17106  if(allocator->GetRecorder() != VMA_NULL)
17107  {
17108  allocator->GetRecorder()->RecordFreeMemoryPages(
17109  allocator->GetCurrentFrameIndex(),
17110  (uint64_t)allocationCount,
17111  pAllocations);
17112  }
17113 #endif
17114 
17115  allocator->FreeMemory(allocationCount, pAllocations);
17116 }
17117 
17118 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17119  VmaAllocator allocator,
17120  VmaAllocation allocation,
17121  VkDeviceSize newSize)
17122 {
17123  VMA_ASSERT(allocator && allocation);
17124 
17125  VMA_DEBUG_LOG("vmaResizeAllocation");
17126 
17127  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17128 
17129  return allocator->ResizeAllocation(allocation, newSize);
17130 }
17131 
17132 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17133  VmaAllocator allocator,
17134  VmaAllocation allocation,
17135  VmaAllocationInfo* pAllocationInfo)
17136 {
17137  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17138 
17139  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17140 
17141 #if VMA_RECORDING_ENABLED
17142  if(allocator->GetRecorder() != VMA_NULL)
17143  {
17144  allocator->GetRecorder()->RecordGetAllocationInfo(
17145  allocator->GetCurrentFrameIndex(),
17146  allocation);
17147  }
17148 #endif
17149 
17150  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17151 }
17152 
17153 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17154  VmaAllocator allocator,
17155  VmaAllocation allocation)
17156 {
17157  VMA_ASSERT(allocator && allocation);
17158 
17159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17160 
17161 #if VMA_RECORDING_ENABLED
17162  if(allocator->GetRecorder() != VMA_NULL)
17163  {
17164  allocator->GetRecorder()->RecordTouchAllocation(
17165  allocator->GetCurrentFrameIndex(),
17166  allocation);
17167  }
17168 #endif
17169 
17170  return allocator->TouchAllocation(allocation);
17171 }
17172 
17173 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17174  VmaAllocator allocator,
17175  VmaAllocation allocation,
17176  void* pUserData)
17177 {
17178  VMA_ASSERT(allocator && allocation);
17179 
17180  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17181 
17182  allocation->SetUserData(allocator, pUserData);
17183 
17184 #if VMA_RECORDING_ENABLED
17185  if(allocator->GetRecorder() != VMA_NULL)
17186  {
17187  allocator->GetRecorder()->RecordSetAllocationUserData(
17188  allocator->GetCurrentFrameIndex(),
17189  allocation,
17190  pUserData);
17191  }
17192 #endif
17193 }
17194 
17195 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17196  VmaAllocator allocator,
17197  VmaAllocation* pAllocation)
17198 {
17199  VMA_ASSERT(allocator && pAllocation);
17200 
17201  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17202 
17203  allocator->CreateLostAllocation(pAllocation);
17204 
17205 #if VMA_RECORDING_ENABLED
17206  if(allocator->GetRecorder() != VMA_NULL)
17207  {
17208  allocator->GetRecorder()->RecordCreateLostAllocation(
17209  allocator->GetCurrentFrameIndex(),
17210  *pAllocation);
17211  }
17212 #endif
17213 }
17214 
17215 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17216  VmaAllocator allocator,
17217  VmaAllocation allocation,
17218  void** ppData)
17219 {
17220  VMA_ASSERT(allocator && allocation && ppData);
17221 
17222  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17223 
17224  VkResult res = allocator->Map(allocation, ppData);
17225 
17226 #if VMA_RECORDING_ENABLED
17227  if(allocator->GetRecorder() != VMA_NULL)
17228  {
17229  allocator->GetRecorder()->RecordMapMemory(
17230  allocator->GetCurrentFrameIndex(),
17231  allocation);
17232  }
17233 #endif
17234 
17235  return res;
17236 }
17237 
17238 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17239  VmaAllocator allocator,
17240  VmaAllocation allocation)
17241 {
17242  VMA_ASSERT(allocator && allocation);
17243 
17244  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17245 
17246 #if VMA_RECORDING_ENABLED
17247  if(allocator->GetRecorder() != VMA_NULL)
17248  {
17249  allocator->GetRecorder()->RecordUnmapMemory(
17250  allocator->GetCurrentFrameIndex(),
17251  allocation);
17252  }
17253 #endif
17254 
17255  allocator->Unmap(allocation);
17256 }
17257 
17258 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17259 {
17260  VMA_ASSERT(allocator && allocation);
17261 
17262  VMA_DEBUG_LOG("vmaFlushAllocation");
17263 
17264  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17265 
17266  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17267 
17268 #if VMA_RECORDING_ENABLED
17269  if(allocator->GetRecorder() != VMA_NULL)
17270  {
17271  allocator->GetRecorder()->RecordFlushAllocation(
17272  allocator->GetCurrentFrameIndex(),
17273  allocation, offset, size);
17274  }
17275 #endif
17276 }
17277 
17278 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17279 {
17280  VMA_ASSERT(allocator && allocation);
17281 
17282  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17283 
17284  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17285 
17286  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17287 
17288 #if VMA_RECORDING_ENABLED
17289  if(allocator->GetRecorder() != VMA_NULL)
17290  {
17291  allocator->GetRecorder()->RecordInvalidateAllocation(
17292  allocator->GetCurrentFrameIndex(),
17293  allocation, offset, size);
17294  }
17295 #endif
17296 }
17297 
17298 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17299 {
17300  VMA_ASSERT(allocator);
17301 
17302  VMA_DEBUG_LOG("vmaCheckCorruption");
17303 
17304  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17305 
17306  return allocator->CheckCorruption(memoryTypeBits);
17307 }
17308 
17309 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17310  VmaAllocator allocator,
17311  VmaAllocation* pAllocations,
17312  size_t allocationCount,
17313  VkBool32* pAllocationsChanged,
17314  const VmaDefragmentationInfo *pDefragmentationInfo,
17315  VmaDefragmentationStats* pDefragmentationStats)
17316 {
17317  // Deprecated interface, reimplemented using new one.
17318 
17319  VmaDefragmentationInfo2 info2 = {};
17320  info2.allocationCount = (uint32_t)allocationCount;
17321  info2.pAllocations = pAllocations;
17322  info2.pAllocationsChanged = pAllocationsChanged;
17323  if(pDefragmentationInfo != VMA_NULL)
17324  {
17325  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17326  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17327  }
17328  else
17329  {
17330  info2.maxCpuAllocationsToMove = UINT32_MAX;
17331  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
17332  }
17333  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
17334 
17336  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
17337  if(res == VK_NOT_READY)
17338  {
17339  res = vmaDefragmentationEnd( allocator, ctx);
17340  }
17341  return res;
17342 }
17343 
17344 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
17345  VmaAllocator allocator,
17346  const VmaDefragmentationInfo2* pInfo,
17347  VmaDefragmentationStats* pStats,
17348  VmaDefragmentationContext *pContext)
17349 {
17350  VMA_ASSERT(allocator && pInfo && pContext);
17351 
17352  // Degenerate case: Nothing to defragment.
17353  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
17354  {
17355  return VK_SUCCESS;
17356  }
17357 
17358  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
17359  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
17360  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
17361  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17362 
17363  VMA_DEBUG_LOG("vmaDefragmentationBegin");
17364 
17365  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17366 
17367  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
17368 
17369 #if VMA_RECORDING_ENABLED
17370  if(allocator->GetRecorder() != VMA_NULL)
17371  {
17372  allocator->GetRecorder()->RecordDefragmentationBegin(
17373  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
17374  }
17375 #endif
17376 
17377  return res;
17378 }
17379 
17380 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
17381  VmaAllocator allocator,
17382  VmaDefragmentationContext context)
17383 {
17384  VMA_ASSERT(allocator);
17385 
17386  VMA_DEBUG_LOG("vmaDefragmentationEnd");
17387 
17388  if(context != VK_NULL_HANDLE)
17389  {
17390  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17391 
17392 #if VMA_RECORDING_ENABLED
17393  if(allocator->GetRecorder() != VMA_NULL)
17394  {
17395  allocator->GetRecorder()->RecordDefragmentationEnd(
17396  allocator->GetCurrentFrameIndex(), context);
17397  }
17398 #endif
17399 
17400  return allocator->DefragmentationEnd(context);
17401  }
17402  else
17403  {
17404  return VK_SUCCESS;
17405  }
17406 }
17407 
17408 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
17409  VmaAllocator allocator,
17410  VmaAllocation allocation,
17411  VkBuffer buffer)
17412 {
17413  VMA_ASSERT(allocator && allocation && buffer);
17414 
17415  VMA_DEBUG_LOG("vmaBindBufferMemory");
17416 
17417  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17418 
17419  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17420 }
17421 
17422 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
17423  VmaAllocator allocator,
17424  VmaAllocation allocation,
17425  VkDeviceSize allocationLocalOffset,
17426  VkBuffer buffer,
17427  const void* pNext)
17428 {
17429  VMA_ASSERT(allocator && allocation && buffer);
17430 
17431  VMA_DEBUG_LOG("vmaBindBufferMemory2");
17432 
17433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17434 
17435  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17436 }
17437 
17438 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
17439  VmaAllocator allocator,
17440  VmaAllocation allocation,
17441  VkImage image)
17442 {
17443  VMA_ASSERT(allocator && allocation && image);
17444 
17445  VMA_DEBUG_LOG("vmaBindImageMemory");
17446 
17447  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17448 
17449  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17450 }
17451 
17452 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
17453  VmaAllocator allocator,
17454  VmaAllocation allocation,
17455  VkDeviceSize allocationLocalOffset,
17456  VkImage image,
17457  const void* pNext)
17458 {
17459  VMA_ASSERT(allocator && allocation && image);
17460 
17461  VMA_DEBUG_LOG("vmaBindImageMemory2");
17462 
17463  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17464 
17465  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
17466 }
17467 
17468 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
17469  VmaAllocator allocator,
17470  const VkBufferCreateInfo* pBufferCreateInfo,
17471  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17472  VkBuffer* pBuffer,
17473  VmaAllocation* pAllocation,
17474  VmaAllocationInfo* pAllocationInfo)
17475 {
17476  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
17477 
17478  if(pBufferCreateInfo->size == 0)
17479  {
17480  return VK_ERROR_VALIDATION_FAILED_EXT;
17481  }
17482 
17483  VMA_DEBUG_LOG("vmaCreateBuffer");
17484 
17485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17486 
17487  *pBuffer = VK_NULL_HANDLE;
17488  *pAllocation = VK_NULL_HANDLE;
17489 
17490  // 1. Create VkBuffer.
17491  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17492  allocator->m_hDevice,
17493  pBufferCreateInfo,
17494  allocator->GetAllocationCallbacks(),
17495  pBuffer);
17496  if(res >= 0)
17497  {
17498  // 2. vkGetBufferMemoryRequirements.
17499  VkMemoryRequirements vkMemReq = {};
17500  bool requiresDedicatedAllocation = false;
17501  bool prefersDedicatedAllocation = false;
17502  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17503  requiresDedicatedAllocation, prefersDedicatedAllocation);
17504 
17505  // Make sure alignment requirements for specific buffer usages reported
17506  // in Physical Device Properties are included in alignment reported by memory requirements.
17507  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
17508  {
17509  VMA_ASSERT(vkMemReq.alignment %
17510  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
17511  }
17512  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
17513  {
17514  VMA_ASSERT(vkMemReq.alignment %
17515  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
17516  }
17517  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
17518  {
17519  VMA_ASSERT(vkMemReq.alignment %
17520  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
17521  }
17522 
17523  // 3. Allocate memory using allocator.
17524  res = allocator->AllocateMemory(
17525  vkMemReq,
17526  requiresDedicatedAllocation,
17527  prefersDedicatedAllocation,
17528  *pBuffer, // dedicatedBuffer
17529  VK_NULL_HANDLE, // dedicatedImage
17530  *pAllocationCreateInfo,
17531  VMA_SUBALLOCATION_TYPE_BUFFER,
17532  1, // allocationCount
17533  pAllocation);
17534 
17535 #if VMA_RECORDING_ENABLED
17536  if(allocator->GetRecorder() != VMA_NULL)
17537  {
17538  allocator->GetRecorder()->RecordCreateBuffer(
17539  allocator->GetCurrentFrameIndex(),
17540  *pBufferCreateInfo,
17541  *pAllocationCreateInfo,
17542  *pAllocation);
17543  }
17544 #endif
17545 
17546  if(res >= 0)
17547  {
17548  // 3. Bind buffer with memory.
17549  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17550  {
17551  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17552  }
17553  if(res >= 0)
17554  {
17555  // All steps succeeded.
17556  #if VMA_STATS_STRING_ENABLED
17557  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17558  #endif
17559  if(pAllocationInfo != VMA_NULL)
17560  {
17561  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17562  }
17563 
17564  return VK_SUCCESS;
17565  }
17566  allocator->FreeMemory(
17567  1, // allocationCount
17568  pAllocation);
17569  *pAllocation = VK_NULL_HANDLE;
17570  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17571  *pBuffer = VK_NULL_HANDLE;
17572  return res;
17573  }
17574  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17575  *pBuffer = VK_NULL_HANDLE;
17576  return res;
17577  }
17578  return res;
17579 }
17580 
17581 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
17582  VmaAllocator allocator,
17583  VkBuffer buffer,
17584  VmaAllocation allocation)
17585 {
17586  VMA_ASSERT(allocator);
17587 
17588  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17589  {
17590  return;
17591  }
17592 
17593  VMA_DEBUG_LOG("vmaDestroyBuffer");
17594 
17595  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17596 
17597 #if VMA_RECORDING_ENABLED
17598  if(allocator->GetRecorder() != VMA_NULL)
17599  {
17600  allocator->GetRecorder()->RecordDestroyBuffer(
17601  allocator->GetCurrentFrameIndex(),
17602  allocation);
17603  }
17604 #endif
17605 
17606  if(buffer != VK_NULL_HANDLE)
17607  {
17608  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17609  }
17610 
17611  if(allocation != VK_NULL_HANDLE)
17612  {
17613  allocator->FreeMemory(
17614  1, // allocationCount
17615  &allocation);
17616  }
17617 }
17618 
17619 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
17620  VmaAllocator allocator,
17621  const VkImageCreateInfo* pImageCreateInfo,
17622  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17623  VkImage* pImage,
17624  VmaAllocation* pAllocation,
17625  VmaAllocationInfo* pAllocationInfo)
17626 {
17627  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17628 
17629  if(pImageCreateInfo->extent.width == 0 ||
17630  pImageCreateInfo->extent.height == 0 ||
17631  pImageCreateInfo->extent.depth == 0 ||
17632  pImageCreateInfo->mipLevels == 0 ||
17633  pImageCreateInfo->arrayLayers == 0)
17634  {
17635  return VK_ERROR_VALIDATION_FAILED_EXT;
17636  }
17637 
17638  VMA_DEBUG_LOG("vmaCreateImage");
17639 
17640  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17641 
17642  *pImage = VK_NULL_HANDLE;
17643  *pAllocation = VK_NULL_HANDLE;
17644 
17645  // 1. Create VkImage.
17646  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17647  allocator->m_hDevice,
17648  pImageCreateInfo,
17649  allocator->GetAllocationCallbacks(),
17650  pImage);
17651  if(res >= 0)
17652  {
17653  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17654  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17655  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17656 
17657  // 2. Allocate memory using allocator.
17658  VkMemoryRequirements vkMemReq = {};
17659  bool requiresDedicatedAllocation = false;
17660  bool prefersDedicatedAllocation = false;
17661  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17662  requiresDedicatedAllocation, prefersDedicatedAllocation);
17663 
17664  res = allocator->AllocateMemory(
17665  vkMemReq,
17666  requiresDedicatedAllocation,
17667  prefersDedicatedAllocation,
17668  VK_NULL_HANDLE, // dedicatedBuffer
17669  *pImage, // dedicatedImage
17670  *pAllocationCreateInfo,
17671  suballocType,
17672  1, // allocationCount
17673  pAllocation);
17674 
17675 #if VMA_RECORDING_ENABLED
17676  if(allocator->GetRecorder() != VMA_NULL)
17677  {
17678  allocator->GetRecorder()->RecordCreateImage(
17679  allocator->GetCurrentFrameIndex(),
17680  *pImageCreateInfo,
17681  *pAllocationCreateInfo,
17682  *pAllocation);
17683  }
17684 #endif
17685 
17686  if(res >= 0)
17687  {
17688  // 3. Bind image with memory.
17689  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17690  {
17691  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17692  }
17693  if(res >= 0)
17694  {
17695  // All steps succeeded.
17696  #if VMA_STATS_STRING_ENABLED
17697  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17698  #endif
17699  if(pAllocationInfo != VMA_NULL)
17700  {
17701  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17702  }
17703 
17704  return VK_SUCCESS;
17705  }
17706  allocator->FreeMemory(
17707  1, // allocationCount
17708  pAllocation);
17709  *pAllocation = VK_NULL_HANDLE;
17710  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17711  *pImage = VK_NULL_HANDLE;
17712  return res;
17713  }
17714  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17715  *pImage = VK_NULL_HANDLE;
17716  return res;
17717  }
17718  return res;
17719 }
17720 
17721 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
17722  VmaAllocator allocator,
17723  VkImage image,
17724  VmaAllocation allocation)
17725 {
17726  VMA_ASSERT(allocator);
17727 
17728  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17729  {
17730  return;
17731  }
17732 
17733  VMA_DEBUG_LOG("vmaDestroyImage");
17734 
17735  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17736 
17737 #if VMA_RECORDING_ENABLED
17738  if(allocator->GetRecorder() != VMA_NULL)
17739  {
17740  allocator->GetRecorder()->RecordDestroyImage(
17741  allocator->GetCurrentFrameIndex(),
17742  allocation);
17743  }
17744 #endif
17745 
17746  if(image != VK_NULL_HANDLE)
17747  {
17748  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17749  }
17750  if(allocation != VK_NULL_HANDLE)
17751  {
17752  allocator->FreeMemory(
17753  1, // allocationCount
17754  &allocation);
17755  }
17756 }
17757 
17758 #endif // #ifdef VMA_IMPLEMENTATION
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1979
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1937
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1863
VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1974
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1935
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1840
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2000
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2576
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3125
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2026
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:1924
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2177
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2322
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2648
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2405
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1872
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2654
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2385
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1966
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1859
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2372
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2006
VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1972
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1940
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2791
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2145
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2479
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2151
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1939
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1766
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2416
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2202
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1951
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1994
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2346
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3111
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2667
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2409
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1897
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3205
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2400
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2260
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2141
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2604
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2616
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3143
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2657
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1928
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1926
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3110
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2796
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2328
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1936
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2597
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2472
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2159
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2335
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:1912
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3122
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1944
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3174
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3209
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2651
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1938
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1976
VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2292
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3159
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2314
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2070
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1943
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1945
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3188
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1846
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2155
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2621
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2085
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2138
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3213
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2282
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2149
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2147
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2009
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1867
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2629
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2601
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1861
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaPool
Represents custom memory pool.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2308
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2379
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2607
VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2324
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2143
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1942
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3119
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3200
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2181
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2595
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2453
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2426
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2150
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2012
VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2430
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2003
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2051
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2213
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1934
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2810
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1997
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3113
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1949
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2151
VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2299
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2423
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2420
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2448
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2150
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2801
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1982
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1941
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2815
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2559
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2158
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2063
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2670
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2437
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2157
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2076
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2265
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3179
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1948
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2643
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1950
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3164
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2782
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2359
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1947
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2660
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2541
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3211
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2151
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2390
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2442
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1946
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2439
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2458
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3140
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3215
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2466
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3217
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1990
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2150
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2777
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2587
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2192
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2435
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3134
VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2591
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3169
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3195
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2396