Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1761 /*
1762 Define this macro to 0/1 to disable/enable support for recording functionality,
1763 available through VmaAllocatorCreateInfo::pRecordSettings.
1764 */
1765 #ifndef VMA_RECORDING_ENABLED
1766  #define VMA_RECORDING_ENABLED 0
1767 #endif
1768 
1769 #ifndef NOMINMAX
1770  #define NOMINMAX // For windows.h
1771 #endif
1772 
1773 #ifndef VULKAN_H_
1774  #include <vulkan/vulkan.h>
1775 #endif
1776 
1777 #if VMA_RECORDING_ENABLED
1778  #include <windows.h>
1779 #endif
1780 
1781 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1782 // where AAA = major, BBB = minor, CCC = patch.
1783 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1784 #if !defined(VMA_VULKAN_VERSION)
1785  #if defined(VK_VERSION_1_1)
1786  #define VMA_VULKAN_VERSION 1001000
1787  #else
1788  #define VMA_VULKAN_VERSION 1000000
1789  #endif
1790 #endif
1791 
1792 #if !defined(VMA_DEDICATED_ALLOCATION)
1793  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1794  #define VMA_DEDICATED_ALLOCATION 1
1795  #else
1796  #define VMA_DEDICATED_ALLOCATION 0
1797  #endif
1798 #endif
1799 
1800 #if !defined(VMA_BIND_MEMORY2)
1801  #if VK_KHR_bind_memory2
1802  #define VMA_BIND_MEMORY2 1
1803  #else
1804  #define VMA_BIND_MEMORY2 0
1805  #endif
1806 #endif
1807 
1808 #if !defined(VMA_MEMORY_BUDGET)
1809  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1810  #define VMA_MEMORY_BUDGET 1
1811  #else
1812  #define VMA_MEMORY_BUDGET 0
1813  #endif
1814 #endif
1815 
1816 // Define these macros to decorate all public functions with additional code,
1817 // before and after returned type, appropriately. This may be useful for
1818 // exporing the functions when compiling VMA as a separate library. Example:
1819 // #define VMA_CALL_PRE __declspec(dllexport)
1820 // #define VMA_CALL_POST __cdecl
1821 #ifndef VMA_CALL_PRE
1822  #define VMA_CALL_PRE
1823 #endif
1824 #ifndef VMA_CALL_POST
1825  #define VMA_CALL_POST
1826 #endif
1827 
1837 VK_DEFINE_HANDLE(VmaAllocator)
1838 
1839 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1841  VmaAllocator allocator,
1842  uint32_t memoryType,
1843  VkDeviceMemory memory,
1844  VkDeviceSize size);
1846 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1847  VmaAllocator allocator,
1848  uint32_t memoryType,
1849  VkDeviceMemory memory,
1850  VkDeviceSize size);
1851 
1865 
1925 
1928 typedef VkFlags VmaAllocatorCreateFlags;
1929 
1934 typedef struct VmaVulkanFunctions {
1935  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1936  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1937  PFN_vkAllocateMemory vkAllocateMemory;
1938  PFN_vkFreeMemory vkFreeMemory;
1939  PFN_vkMapMemory vkMapMemory;
1940  PFN_vkUnmapMemory vkUnmapMemory;
1941  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1942  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1943  PFN_vkBindBufferMemory vkBindBufferMemory;
1944  PFN_vkBindImageMemory vkBindImageMemory;
1945  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1946  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1947  PFN_vkCreateBuffer vkCreateBuffer;
1948  PFN_vkDestroyBuffer vkDestroyBuffer;
1949  PFN_vkCreateImage vkCreateImage;
1950  PFN_vkDestroyImage vkDestroyImage;
1951  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1952 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
1953  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1954  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1955 #endif
1956 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
1957  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1958  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1959 #endif
1960 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
1961  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
1962 #endif
1964 
1966 typedef enum VmaRecordFlagBits {
1973 
1976 typedef VkFlags VmaRecordFlags;
1977 
1979 typedef struct VmaRecordSettings
1980 {
1990  const char* pFilePath;
1992 
1995 {
1999 
2000  VkPhysicalDevice physicalDevice;
2002 
2003  VkDevice device;
2005 
2008 
2009  const VkAllocationCallbacks* pAllocationCallbacks;
2011 
2051  const VkDeviceSize* pHeapSizeLimit;
2076  VkInstance instance;
2087 
2089 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2090  const VmaAllocatorCreateInfo* pCreateInfo,
2091  VmaAllocator* pAllocator);
2092 
2094 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2095  VmaAllocator allocator);
2096 
2101 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2102  VmaAllocator allocator,
2103  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2104 
2109 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2110  VmaAllocator allocator,
2111  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2112 
2119 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2120  VmaAllocator allocator,
2121  uint32_t memoryTypeIndex,
2122  VkMemoryPropertyFlags* pFlags);
2123 
2132 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2133  VmaAllocator allocator,
2134  uint32_t frameIndex);
2135 
2138 typedef struct VmaStatInfo
2139 {
2141  uint32_t blockCount;
2147  VkDeviceSize usedBytes;
2149  VkDeviceSize unusedBytes;
2152 } VmaStatInfo;
2153 
2155 typedef struct VmaStats
2156 {
2157  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2158  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2160 } VmaStats;
2161 
2171 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2172  VmaAllocator allocator,
2173  VmaStats* pStats);
2174 
2177 typedef struct VmaBudget
2178 {
2181  VkDeviceSize blockBytes;
2182 
2192  VkDeviceSize allocationBytes;
2193 
2202  VkDeviceSize usage;
2203 
2213  VkDeviceSize budget;
2214 } VmaBudget;
2215 
2226 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2227  VmaAllocator allocator,
2228  VmaBudget* pBudget);
2229 
2230 #ifndef VMA_STATS_STRING_ENABLED
2231 #define VMA_STATS_STRING_ENABLED 1
2232 #endif
2233 
2234 #if VMA_STATS_STRING_ENABLED
2235 
2237 
2239 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2240  VmaAllocator allocator,
2241  char** ppStatsString,
2242  VkBool32 detailedMap);
2243 
2244 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2245  VmaAllocator allocator,
2246  char* pStatsString);
2247 
2248 #endif // #if VMA_STATS_STRING_ENABLED
2249 
2258 VK_DEFINE_HANDLE(VmaPool)
2259 
2260 typedef enum VmaMemoryUsage
2261 {
2323 
2325 } VmaMemoryUsage;
2326 
2336 
2401 
2417 
2427 
2434 
2438 
2440 {
2453  VkMemoryPropertyFlags requiredFlags;
2458  VkMemoryPropertyFlags preferredFlags;
2466  uint32_t memoryTypeBits;
2479  void* pUserData;
2481 
2498 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2499  VmaAllocator allocator,
2500  uint32_t memoryTypeBits,
2501  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2502  uint32_t* pMemoryTypeIndex);
2503 
2516 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2517  VmaAllocator allocator,
2518  const VkBufferCreateInfo* pBufferCreateInfo,
2519  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2520  uint32_t* pMemoryTypeIndex);
2521 
2534 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2535  VmaAllocator allocator,
2536  const VkImageCreateInfo* pImageCreateInfo,
2537  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2538  uint32_t* pMemoryTypeIndex);
2539 
2560 
2577 
2588 
2594 
2597 typedef VkFlags VmaPoolCreateFlags;
2598 
2601 typedef struct VmaPoolCreateInfo {
2616  VkDeviceSize blockSize;
2645 
2648 typedef struct VmaPoolStats {
2651  VkDeviceSize size;
2654  VkDeviceSize unusedSize;
2667  VkDeviceSize unusedRangeSizeMax;
2670  size_t blockCount;
2671 } VmaPoolStats;
2672 
2679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2680  VmaAllocator allocator,
2681  const VmaPoolCreateInfo* pCreateInfo,
2682  VmaPool* pPool);
2683 
2686 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2687  VmaAllocator allocator,
2688  VmaPool pool);
2689 
2696 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2697  VmaAllocator allocator,
2698  VmaPool pool,
2699  VmaPoolStats* pPoolStats);
2700 
2707 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2708  VmaAllocator allocator,
2709  VmaPool pool,
2710  size_t* pLostAllocationCount);
2711 
2726 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2727 
2734 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2735  VmaAllocator allocator,
2736  VmaPool pool,
2737  const char** ppName);
2738 
2744 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2745  VmaAllocator allocator,
2746  VmaPool pool,
2747  const char* pName);
2748 
2773 VK_DEFINE_HANDLE(VmaAllocation)
2774 
2775 
2777 typedef struct VmaAllocationInfo {
2782  uint32_t memoryType;
2791  VkDeviceMemory deviceMemory;
2796  VkDeviceSize offset;
2801  VkDeviceSize size;
2815  void* pUserData;
2817 
2828 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
2829  VmaAllocator allocator,
2830  const VkMemoryRequirements* pVkMemoryRequirements,
2831  const VmaAllocationCreateInfo* pCreateInfo,
2832  VmaAllocation* pAllocation,
2833  VmaAllocationInfo* pAllocationInfo);
2834 
2854 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
2855  VmaAllocator allocator,
2856  const VkMemoryRequirements* pVkMemoryRequirements,
2857  const VmaAllocationCreateInfo* pCreateInfo,
2858  size_t allocationCount,
2859  VmaAllocation* pAllocations,
2860  VmaAllocationInfo* pAllocationInfo);
2861 
2868 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
2869  VmaAllocator allocator,
2870  VkBuffer buffer,
2871  const VmaAllocationCreateInfo* pCreateInfo,
2872  VmaAllocation* pAllocation,
2873  VmaAllocationInfo* pAllocationInfo);
2874 
2876 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
2877  VmaAllocator allocator,
2878  VkImage image,
2879  const VmaAllocationCreateInfo* pCreateInfo,
2880  VmaAllocation* pAllocation,
2881  VmaAllocationInfo* pAllocationInfo);
2882 
2887 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
2888  VmaAllocator allocator,
2889  VmaAllocation allocation);
2890 
2901 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
2902  VmaAllocator allocator,
2903  size_t allocationCount,
2904  VmaAllocation* pAllocations);
2905 
2912 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
2913  VmaAllocator allocator,
2914  VmaAllocation allocation,
2915  VkDeviceSize newSize);
2916 
2933 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
2934  VmaAllocator allocator,
2935  VmaAllocation allocation,
2936  VmaAllocationInfo* pAllocationInfo);
2937 
2952 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
2953  VmaAllocator allocator,
2954  VmaAllocation allocation);
2955 
2969 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
2970  VmaAllocator allocator,
2971  VmaAllocation allocation,
2972  void* pUserData);
2973 
2984 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
2985  VmaAllocator allocator,
2986  VmaAllocation* pAllocation);
2987 
3026 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3027  VmaAllocator allocator,
3028  VmaAllocation allocation,
3029  void** ppData);
3030 
3039 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3040  VmaAllocator allocator,
3041  VmaAllocation allocation);
3042 
3061 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3062 
3081 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3082 
3099 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3100 
3107 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3108 
3109 typedef enum VmaDefragmentationFlagBits {
3113 typedef VkFlags VmaDefragmentationFlags;
3114 
3119 typedef struct VmaDefragmentationInfo2 {
3143  uint32_t poolCount;
3164  VkDeviceSize maxCpuBytesToMove;
3174  VkDeviceSize maxGpuBytesToMove;
3188  VkCommandBuffer commandBuffer;
3190 
3195 typedef struct VmaDefragmentationInfo {
3200  VkDeviceSize maxBytesToMove;
3207 
3209 typedef struct VmaDefragmentationStats {
3211  VkDeviceSize bytesMoved;
3213  VkDeviceSize bytesFreed;
3219 
3249 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3250  VmaAllocator allocator,
3251  const VmaDefragmentationInfo2* pInfo,
3252  VmaDefragmentationStats* pStats,
3253  VmaDefragmentationContext *pContext);
3254 
3260 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3261  VmaAllocator allocator,
3262  VmaDefragmentationContext context);
3263 
3304 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3305  VmaAllocator allocator,
3306  VmaAllocation* pAllocations,
3307  size_t allocationCount,
3308  VkBool32* pAllocationsChanged,
3309  const VmaDefragmentationInfo *pDefragmentationInfo,
3310  VmaDefragmentationStats* pDefragmentationStats);
3311 
3324 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3325  VmaAllocator allocator,
3326  VmaAllocation allocation,
3327  VkBuffer buffer);
3328 
3339 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3340  VmaAllocator allocator,
3341  VmaAllocation allocation,
3342  VkDeviceSize allocationLocalOffset,
3343  VkBuffer buffer,
3344  const void* pNext);
3345 
3358 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3359  VmaAllocator allocator,
3360  VmaAllocation allocation,
3361  VkImage image);
3362 
3373 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3374  VmaAllocator allocator,
3375  VmaAllocation allocation,
3376  VkDeviceSize allocationLocalOffset,
3377  VkImage image,
3378  const void* pNext);
3379 
3406 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3407  VmaAllocator allocator,
3408  const VkBufferCreateInfo* pBufferCreateInfo,
3409  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3410  VkBuffer* pBuffer,
3411  VmaAllocation* pAllocation,
3412  VmaAllocationInfo* pAllocationInfo);
3413 
3425 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3426  VmaAllocator allocator,
3427  VkBuffer buffer,
3428  VmaAllocation allocation);
3429 
3431 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3432  VmaAllocator allocator,
3433  const VkImageCreateInfo* pImageCreateInfo,
3434  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3435  VkImage* pImage,
3436  VmaAllocation* pAllocation,
3437  VmaAllocationInfo* pAllocationInfo);
3438 
3450 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3451  VmaAllocator allocator,
3452  VkImage image,
3453  VmaAllocation allocation);
3454 
3455 #ifdef __cplusplus
3456 }
3457 #endif
3458 
3459 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3460 
3461 // For Visual Studio IntelliSense.
3462 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3463 #define VMA_IMPLEMENTATION
3464 #endif
3465 
3466 #ifdef VMA_IMPLEMENTATION
3467 #undef VMA_IMPLEMENTATION
3468 
3469 #include <cstdint>
3470 #include <cstdlib>
3471 #include <cstring>
3472 
3473 /*******************************************************************************
3474 CONFIGURATION SECTION
3475 
3476 Define some of these macros before each #include of this header or change them
3477 here if you need other then default behavior depending on your environment.
3478 */
3479 
3480 /*
3481 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3482 internally, like:
3483 
3484  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3485 
3486 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3487 VmaAllocatorCreateInfo::pVulkanFunctions.
3488 */
3489 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3490 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3491 #endif
3492 
3493 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3494 //#define VMA_USE_STL_CONTAINERS 1
3495 
3496 /* Set this macro to 1 to make the library including and using STL containers:
3497 std::pair, std::vector, std::list, std::unordered_map.
3498 
3499 Set it to 0 or undefined to make the library using its own implementation of
3500 the containers.
3501 */
3502 #if VMA_USE_STL_CONTAINERS
3503  #define VMA_USE_STL_VECTOR 1
3504  #define VMA_USE_STL_UNORDERED_MAP 1
3505  #define VMA_USE_STL_LIST 1
3506 #endif
3507 
3508 #ifndef VMA_USE_STL_SHARED_MUTEX
3509  // Compiler conforms to C++17.
3510  #if __cplusplus >= 201703L
3511  #define VMA_USE_STL_SHARED_MUTEX 1
3512  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3513  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3514  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3515  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3516  #define VMA_USE_STL_SHARED_MUTEX 1
3517  #else
3518  #define VMA_USE_STL_SHARED_MUTEX 0
3519  #endif
3520 #endif
3521 
3522 /*
3523 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3524 Library has its own container implementation.
3525 */
3526 #if VMA_USE_STL_VECTOR
3527  #include <vector>
3528 #endif
3529 
3530 #if VMA_USE_STL_UNORDERED_MAP
3531  #include <unordered_map>
3532 #endif
3533 
3534 #if VMA_USE_STL_LIST
3535  #include <list>
3536 #endif
3537 
3538 /*
3539 Following headers are used in this CONFIGURATION section only, so feel free to
3540 remove them if not needed.
3541 */
3542 #include <cassert> // for assert
3543 #include <algorithm> // for min, max
3544 #include <mutex>
3545 
3546 #ifndef VMA_NULL
3547  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3548  #define VMA_NULL nullptr
3549 #endif
3550 
3551 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3552 #include <cstdlib>
3553 void *aligned_alloc(size_t alignment, size_t size)
3554 {
3555  // alignment must be >= sizeof(void*)
3556  if(alignment < sizeof(void*))
3557  {
3558  alignment = sizeof(void*);
3559  }
3560 
3561  return memalign(alignment, size);
3562 }
3563 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3564 #include <cstdlib>
3565 void *aligned_alloc(size_t alignment, size_t size)
3566 {
3567  // alignment must be >= sizeof(void*)
3568  if(alignment < sizeof(void*))
3569  {
3570  alignment = sizeof(void*);
3571  }
3572 
3573  void *pointer;
3574  if(posix_memalign(&pointer, alignment, size) == 0)
3575  return pointer;
3576  return VMA_NULL;
3577 }
3578 #endif
3579 
3580 // If your compiler is not compatible with C++11 and definition of
3581 // aligned_alloc() function is missing, uncommeting following line may help:
3582 
3583 //#include <malloc.h>
3584 
3585 // Normal assert to check for programmer's errors, especially in Debug configuration.
3586 #ifndef VMA_ASSERT
3587  #ifdef _DEBUG
3588  #define VMA_ASSERT(expr) assert(expr)
3589  #else
3590  #define VMA_ASSERT(expr)
3591  #endif
3592 #endif
3593 
3594 // Assert that will be called very often, like inside data structures e.g. operator[].
3595 // Making it non-empty can make program slow.
3596 #ifndef VMA_HEAVY_ASSERT
3597  #ifdef _DEBUG
3598  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3599  #else
3600  #define VMA_HEAVY_ASSERT(expr)
3601  #endif
3602 #endif
3603 
3604 #ifndef VMA_ALIGN_OF
3605  #define VMA_ALIGN_OF(type) (__alignof(type))
3606 #endif
3607 
3608 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3609  #if defined(_WIN32)
3610  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3611  #else
3612  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3613  #endif
3614 #endif
3615 
3616 #ifndef VMA_SYSTEM_FREE
3617  #if defined(_WIN32)
3618  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3619  #else
3620  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3621  #endif
3622 #endif
3623 
3624 #ifndef VMA_MIN
3625  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3626 #endif
3627 
3628 #ifndef VMA_MAX
3629  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3630 #endif
3631 
3632 #ifndef VMA_SWAP
3633  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3634 #endif
3635 
3636 #ifndef VMA_SORT
3637  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3638 #endif
3639 
3640 #ifndef VMA_DEBUG_LOG
3641  #define VMA_DEBUG_LOG(format, ...)
3642  /*
3643  #define VMA_DEBUG_LOG(format, ...) do { \
3644  printf(format, __VA_ARGS__); \
3645  printf("\n"); \
3646  } while(false)
3647  */
3648 #endif
3649 
3650 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3651 #if VMA_STATS_STRING_ENABLED
3652  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3653  {
3654  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3655  }
3656  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3657  {
3658  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3659  }
3660  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3661  {
3662  snprintf(outStr, strLen, "%p", ptr);
3663  }
3664 #endif
3665 
3666 #ifndef VMA_MUTEX
3667  class VmaMutex
3668  {
3669  public:
3670  void Lock() { m_Mutex.lock(); }
3671  void Unlock() { m_Mutex.unlock(); }
3672  private:
3673  std::mutex m_Mutex;
3674  };
3675  #define VMA_MUTEX VmaMutex
3676 #endif
3677 
3678 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3679 #ifndef VMA_RW_MUTEX
3680  #if VMA_USE_STL_SHARED_MUTEX
3681  // Use std::shared_mutex from C++17.
3682  #include <shared_mutex>
3683  class VmaRWMutex
3684  {
3685  public:
3686  void LockRead() { m_Mutex.lock_shared(); }
3687  void UnlockRead() { m_Mutex.unlock_shared(); }
3688  void LockWrite() { m_Mutex.lock(); }
3689  void UnlockWrite() { m_Mutex.unlock(); }
3690  private:
3691  std::shared_mutex m_Mutex;
3692  };
3693  #define VMA_RW_MUTEX VmaRWMutex
3694  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3695  // Use SRWLOCK from WinAPI.
3696  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3697  class VmaRWMutex
3698  {
3699  public:
3700  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3701  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3702  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3703  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3704  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3705  private:
3706  SRWLOCK m_Lock;
3707  };
3708  #define VMA_RW_MUTEX VmaRWMutex
3709  #else
3710  // Less efficient fallback: Use normal mutex.
3711  class VmaRWMutex
3712  {
3713  public:
3714  void LockRead() { m_Mutex.Lock(); }
3715  void UnlockRead() { m_Mutex.Unlock(); }
3716  void LockWrite() { m_Mutex.Lock(); }
3717  void UnlockWrite() { m_Mutex.Unlock(); }
3718  private:
3719  VMA_MUTEX m_Mutex;
3720  };
3721  #define VMA_RW_MUTEX VmaRWMutex
3722  #endif // #if VMA_USE_STL_SHARED_MUTEX
3723 #endif // #ifndef VMA_RW_MUTEX
3724 
3725 /*
3726 If providing your own implementation, you need to implement a subset of std::atomic.
3727 */
3728 #ifndef VMA_ATOMIC_UINT32
3729  #include <atomic>
3730  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3731 #endif
3732 
3733 #ifndef VMA_ATOMIC_UINT64
3734  #include <atomic>
3735  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3736 #endif
3737 
3738 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3739 
3743  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3744 #endif
3745 
3746 #ifndef VMA_DEBUG_ALIGNMENT
3747 
3751  #define VMA_DEBUG_ALIGNMENT (1)
3752 #endif
3753 
3754 #ifndef VMA_DEBUG_MARGIN
3755 
3759  #define VMA_DEBUG_MARGIN (0)
3760 #endif
3761 
3762 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3763 
3767  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3768 #endif
3769 
3770 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3771 
3776  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3777 #endif
3778 
3779 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3780 
3784  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3785 #endif
3786 
3787 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3788 
3792  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3793 #endif
3794 
3795 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3796  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3798 #endif
3799 
3800 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3801  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3803 #endif
3804 
3805 #ifndef VMA_CLASS_NO_COPY
3806  #define VMA_CLASS_NO_COPY(className) \
3807  private: \
3808  className(const className&) = delete; \
3809  className& operator=(const className&) = delete;
3810 #endif
3811 
3812 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3813 
3814 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3815 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3816 
3817 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3818 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3819 
3820 /*******************************************************************************
3821 END OF CONFIGURATION
3822 */
3823 
3824 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3825 
3826 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3827  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3828 
3829 // Returns number of bits set to 1 in (v).
3830 static inline uint32_t VmaCountBitsSet(uint32_t v)
3831 {
3832  uint32_t c = v - ((v >> 1) & 0x55555555);
3833  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3834  c = ((c >> 4) + c) & 0x0F0F0F0F;
3835  c = ((c >> 8) + c) & 0x00FF00FF;
3836  c = ((c >> 16) + c) & 0x0000FFFF;
3837  return c;
3838 }
3839 
3840 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3841 // Use types like uint32_t, uint64_t as T.
3842 template <typename T>
3843 static inline T VmaAlignUp(T val, T align)
3844 {
3845  return (val + align - 1) / align * align;
3846 }
3847 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3848 // Use types like uint32_t, uint64_t as T.
3849 template <typename T>
3850 static inline T VmaAlignDown(T val, T align)
3851 {
3852  return val / align * align;
3853 }
3854 
3855 // Division with mathematical rounding to nearest number.
3856 template <typename T>
3857 static inline T VmaRoundDiv(T x, T y)
3858 {
3859  return (x + (y / (T)2)) / y;
3860 }
3861 
3862 /*
3863 Returns true if given number is a power of two.
3864 T must be unsigned integer number or signed integer but always nonnegative.
3865 For 0 returns true.
3866 */
3867 template <typename T>
3868 inline bool VmaIsPow2(T x)
3869 {
3870  return (x & (x-1)) == 0;
3871 }
3872 
3873 // Returns smallest power of 2 greater or equal to v.
3874 static inline uint32_t VmaNextPow2(uint32_t v)
3875 {
3876  v--;
3877  v |= v >> 1;
3878  v |= v >> 2;
3879  v |= v >> 4;
3880  v |= v >> 8;
3881  v |= v >> 16;
3882  v++;
3883  return v;
3884 }
3885 static inline uint64_t VmaNextPow2(uint64_t v)
3886 {
3887  v--;
3888  v |= v >> 1;
3889  v |= v >> 2;
3890  v |= v >> 4;
3891  v |= v >> 8;
3892  v |= v >> 16;
3893  v |= v >> 32;
3894  v++;
3895  return v;
3896 }
3897 
3898 // Returns largest power of 2 less or equal to v.
3899 static inline uint32_t VmaPrevPow2(uint32_t v)
3900 {
3901  v |= v >> 1;
3902  v |= v >> 2;
3903  v |= v >> 4;
3904  v |= v >> 8;
3905  v |= v >> 16;
3906  v = v ^ (v >> 1);
3907  return v;
3908 }
3909 static inline uint64_t VmaPrevPow2(uint64_t v)
3910 {
3911  v |= v >> 1;
3912  v |= v >> 2;
3913  v |= v >> 4;
3914  v |= v >> 8;
3915  v |= v >> 16;
3916  v |= v >> 32;
3917  v = v ^ (v >> 1);
3918  return v;
3919 }
3920 
3921 static inline bool VmaStrIsEmpty(const char* pStr)
3922 {
3923  return pStr == VMA_NULL || *pStr == '\0';
3924 }
3925 
3926 #if VMA_STATS_STRING_ENABLED
3927 
3928 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3929 {
3930  switch(algorithm)
3931  {
3933  return "Linear";
3935  return "Buddy";
3936  case 0:
3937  return "Default";
3938  default:
3939  VMA_ASSERT(0);
3940  return "";
3941  }
3942 }
3943 
3944 #endif // #if VMA_STATS_STRING_ENABLED
3945 
3946 #ifndef VMA_SORT
3947 
3948 template<typename Iterator, typename Compare>
3949 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3950 {
3951  Iterator centerValue = end; --centerValue;
3952  Iterator insertIndex = beg;
3953  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3954  {
3955  if(cmp(*memTypeIndex, *centerValue))
3956  {
3957  if(insertIndex != memTypeIndex)
3958  {
3959  VMA_SWAP(*memTypeIndex, *insertIndex);
3960  }
3961  ++insertIndex;
3962  }
3963  }
3964  if(insertIndex != centerValue)
3965  {
3966  VMA_SWAP(*insertIndex, *centerValue);
3967  }
3968  return insertIndex;
3969 }
3970 
3971 template<typename Iterator, typename Compare>
3972 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3973 {
3974  if(beg < end)
3975  {
3976  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3977  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3978  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3979  }
3980 }
3981 
3982 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3983 
3984 #endif // #ifndef VMA_SORT
3985 
3986 /*
3987 Returns true if two memory blocks occupy overlapping pages.
3988 ResourceA must be in less memory offset than ResourceB.
3989 
3990 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3991 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3992 */
3993 static inline bool VmaBlocksOnSamePage(
3994  VkDeviceSize resourceAOffset,
3995  VkDeviceSize resourceASize,
3996  VkDeviceSize resourceBOffset,
3997  VkDeviceSize pageSize)
3998 {
3999  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4000  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4001  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4002  VkDeviceSize resourceBStart = resourceBOffset;
4003  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4004  return resourceAEndPage == resourceBStartPage;
4005 }
4006 
4007 enum VmaSuballocationType
4008 {
4009  VMA_SUBALLOCATION_TYPE_FREE = 0,
4010  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4011  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4012  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4013  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4014  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4015  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4016 };
4017 
4018 /*
4019 Returns true if given suballocation types could conflict and must respect
4020 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4021 or linear image and another one is optimal image. If type is unknown, behave
4022 conservatively.
4023 */
4024 static inline bool VmaIsBufferImageGranularityConflict(
4025  VmaSuballocationType suballocType1,
4026  VmaSuballocationType suballocType2)
4027 {
4028  if(suballocType1 > suballocType2)
4029  {
4030  VMA_SWAP(suballocType1, suballocType2);
4031  }
4032 
4033  switch(suballocType1)
4034  {
4035  case VMA_SUBALLOCATION_TYPE_FREE:
4036  return false;
4037  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4038  return true;
4039  case VMA_SUBALLOCATION_TYPE_BUFFER:
4040  return
4041  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4042  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4043  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4044  return
4045  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4046  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4047  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4048  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4049  return
4050  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4051  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4052  return false;
4053  default:
4054  VMA_ASSERT(0);
4055  return true;
4056  }
4057 }
4058 
4059 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4060 {
4061 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4062  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4063  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4064  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4065  {
4066  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4067  }
4068 #else
4069  // no-op
4070 #endif
4071 }
4072 
4073 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4074 {
4075 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4076  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4077  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4078  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4079  {
4080  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4081  {
4082  return false;
4083  }
4084  }
4085 #endif
4086  return true;
4087 }
4088 
4089 /*
4090 Fills structure with parameters of an example buffer to be used for transfers
4091 during GPU memory defragmentation.
4092 */
4093 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4094 {
4095  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4096  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4097  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4098  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4099 }
4100 
4101 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4102 struct VmaMutexLock
4103 {
4104  VMA_CLASS_NO_COPY(VmaMutexLock)
4105 public:
4106  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4107  m_pMutex(useMutex ? &mutex : VMA_NULL)
4108  { if(m_pMutex) { m_pMutex->Lock(); } }
4109  ~VmaMutexLock()
4110  { if(m_pMutex) { m_pMutex->Unlock(); } }
4111 private:
4112  VMA_MUTEX* m_pMutex;
4113 };
4114 
4115 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4116 struct VmaMutexLockRead
4117 {
4118  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4119 public:
4120  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4121  m_pMutex(useMutex ? &mutex : VMA_NULL)
4122  { if(m_pMutex) { m_pMutex->LockRead(); } }
4123  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4124 private:
4125  VMA_RW_MUTEX* m_pMutex;
4126 };
4127 
4128 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4129 struct VmaMutexLockWrite
4130 {
4131  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4132 public:
4133  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4134  m_pMutex(useMutex ? &mutex : VMA_NULL)
4135  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4136  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4137 private:
4138  VMA_RW_MUTEX* m_pMutex;
4139 };
4140 
4141 #if VMA_DEBUG_GLOBAL_MUTEX
4142  static VMA_MUTEX gDebugGlobalMutex;
4143  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4144 #else
4145  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4146 #endif
4147 
4148 // Minimum size of a free suballocation to register it in the free suballocation collection.
4149 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4150 
4151 /*
4152 Performs binary search and returns iterator to first element that is greater or
4153 equal to (key), according to comparison (cmp).
4154 
4155 Cmp should return true if first argument is less than second argument.
4156 
4157 Returned value is the found element, if present in the collection or place where
4158 new element with value (key) should be inserted.
4159 */
4160 template <typename CmpLess, typename IterT, typename KeyT>
4161 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4162 {
4163  size_t down = 0, up = (end - beg);
4164  while(down < up)
4165  {
4166  const size_t mid = (down + up) / 2;
4167  if(cmp(*(beg+mid), key))
4168  {
4169  down = mid + 1;
4170  }
4171  else
4172  {
4173  up = mid;
4174  }
4175  }
4176  return beg + down;
4177 }
4178 
4179 template<typename CmpLess, typename IterT, typename KeyT>
4180 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4181 {
4182  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4183  beg, end, value, cmp);
4184  if(it == end ||
4185  (!cmp(*it, value) && !cmp(value, *it)))
4186  {
4187  return it;
4188  }
4189  return end;
4190 }
4191 
4192 /*
4193 Returns true if all pointers in the array are not-null and unique.
4194 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4195 T must be pointer type, e.g. VmaAllocation, VmaPool.
4196 */
4197 template<typename T>
4198 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4199 {
4200  for(uint32_t i = 0; i < count; ++i)
4201  {
4202  const T iPtr = arr[i];
4203  if(iPtr == VMA_NULL)
4204  {
4205  return false;
4206  }
4207  for(uint32_t j = i + 1; j < count; ++j)
4208  {
4209  if(iPtr == arr[j])
4210  {
4211  return false;
4212  }
4213  }
4214  }
4215  return true;
4216 }
4217 
4219 // Memory allocation
4220 
4221 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4222 {
4223  if((pAllocationCallbacks != VMA_NULL) &&
4224  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4225  {
4226  return (*pAllocationCallbacks->pfnAllocation)(
4227  pAllocationCallbacks->pUserData,
4228  size,
4229  alignment,
4230  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4231  }
4232  else
4233  {
4234  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4235  }
4236 }
4237 
4238 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4239 {
4240  if((pAllocationCallbacks != VMA_NULL) &&
4241  (pAllocationCallbacks->pfnFree != VMA_NULL))
4242  {
4243  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4244  }
4245  else
4246  {
4247  VMA_SYSTEM_FREE(ptr);
4248  }
4249 }
4250 
4251 template<typename T>
4252 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4253 {
4254  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4255 }
4256 
4257 template<typename T>
4258 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4259 {
4260  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4261 }
4262 
4263 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4264 
4265 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4266 
4267 template<typename T>
4268 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4269 {
4270  ptr->~T();
4271  VmaFree(pAllocationCallbacks, ptr);
4272 }
4273 
4274 template<typename T>
4275 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4276 {
4277  if(ptr != VMA_NULL)
4278  {
4279  for(size_t i = count; i--; )
4280  {
4281  ptr[i].~T();
4282  }
4283  VmaFree(pAllocationCallbacks, ptr);
4284  }
4285 }
4286 
4287 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4288 {
4289  if(srcStr != VMA_NULL)
4290  {
4291  const size_t len = strlen(srcStr);
4292  char* const result = vma_new_array(allocs, char, len + 1);
4293  memcpy(result, srcStr, len + 1);
4294  return result;
4295  }
4296  else
4297  {
4298  return VMA_NULL;
4299  }
4300 }
4301 
4302 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4303 {
4304  if(str != VMA_NULL)
4305  {
4306  const size_t len = strlen(str);
4307  vma_delete_array(allocs, str, len + 1);
4308  }
4309 }
4310 
4311 // STL-compatible allocator.
4312 template<typename T>
4313 class VmaStlAllocator
4314 {
4315 public:
4316  const VkAllocationCallbacks* const m_pCallbacks;
4317  typedef T value_type;
4318 
4319  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4320  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4321 
4322  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4323  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4324 
4325  template<typename U>
4326  bool operator==(const VmaStlAllocator<U>& rhs) const
4327  {
4328  return m_pCallbacks == rhs.m_pCallbacks;
4329  }
4330  template<typename U>
4331  bool operator!=(const VmaStlAllocator<U>& rhs) const
4332  {
4333  return m_pCallbacks != rhs.m_pCallbacks;
4334  }
4335 
4336  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4337 };
4338 
4339 #if VMA_USE_STL_VECTOR
4340 
4341 #define VmaVector std::vector
4342 
4343 template<typename T, typename allocatorT>
4344 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4345 {
4346  vec.insert(vec.begin() + index, item);
4347 }
4348 
4349 template<typename T, typename allocatorT>
4350 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4351 {
4352  vec.erase(vec.begin() + index);
4353 }
4354 
4355 #else // #if VMA_USE_STL_VECTOR
4356 
4357 /* Class with interface compatible with subset of std::vector.
4358 T must be POD because constructors and destructors are not called and memcpy is
4359 used for these objects. */
4360 template<typename T, typename AllocatorT>
4361 class VmaVector
4362 {
4363 public:
4364  typedef T value_type;
4365 
4366  VmaVector(const AllocatorT& allocator) :
4367  m_Allocator(allocator),
4368  m_pArray(VMA_NULL),
4369  m_Count(0),
4370  m_Capacity(0)
4371  {
4372  }
4373 
4374  VmaVector(size_t count, const AllocatorT& allocator) :
4375  m_Allocator(allocator),
4376  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4377  m_Count(count),
4378  m_Capacity(count)
4379  {
4380  }
4381 
4382  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4383  // value is unused.
4384  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4385  : VmaVector(count, allocator) {}
4386 
4387  VmaVector(const VmaVector<T, AllocatorT>& src) :
4388  m_Allocator(src.m_Allocator),
4389  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4390  m_Count(src.m_Count),
4391  m_Capacity(src.m_Count)
4392  {
4393  if(m_Count != 0)
4394  {
4395  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4396  }
4397  }
4398 
4399  ~VmaVector()
4400  {
4401  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4402  }
4403 
4404  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4405  {
4406  if(&rhs != this)
4407  {
4408  resize(rhs.m_Count);
4409  if(m_Count != 0)
4410  {
4411  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4412  }
4413  }
4414  return *this;
4415  }
4416 
4417  bool empty() const { return m_Count == 0; }
4418  size_t size() const { return m_Count; }
4419  T* data() { return m_pArray; }
4420  const T* data() const { return m_pArray; }
4421 
4422  T& operator[](size_t index)
4423  {
4424  VMA_HEAVY_ASSERT(index < m_Count);
4425  return m_pArray[index];
4426  }
4427  const T& operator[](size_t index) const
4428  {
4429  VMA_HEAVY_ASSERT(index < m_Count);
4430  return m_pArray[index];
4431  }
4432 
4433  T& front()
4434  {
4435  VMA_HEAVY_ASSERT(m_Count > 0);
4436  return m_pArray[0];
4437  }
4438  const T& front() const
4439  {
4440  VMA_HEAVY_ASSERT(m_Count > 0);
4441  return m_pArray[0];
4442  }
4443  T& back()
4444  {
4445  VMA_HEAVY_ASSERT(m_Count > 0);
4446  return m_pArray[m_Count - 1];
4447  }
4448  const T& back() const
4449  {
4450  VMA_HEAVY_ASSERT(m_Count > 0);
4451  return m_pArray[m_Count - 1];
4452  }
4453 
4454  void reserve(size_t newCapacity, bool freeMemory = false)
4455  {
4456  newCapacity = VMA_MAX(newCapacity, m_Count);
4457 
4458  if((newCapacity < m_Capacity) && !freeMemory)
4459  {
4460  newCapacity = m_Capacity;
4461  }
4462 
4463  if(newCapacity != m_Capacity)
4464  {
4465  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4466  if(m_Count != 0)
4467  {
4468  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4469  }
4470  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4471  m_Capacity = newCapacity;
4472  m_pArray = newArray;
4473  }
4474  }
4475 
4476  void resize(size_t newCount, bool freeMemory = false)
4477  {
4478  size_t newCapacity = m_Capacity;
4479  if(newCount > m_Capacity)
4480  {
4481  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4482  }
4483  else if(freeMemory)
4484  {
4485  newCapacity = newCount;
4486  }
4487 
4488  if(newCapacity != m_Capacity)
4489  {
4490  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4491  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4492  if(elementsToCopy != 0)
4493  {
4494  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4495  }
4496  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4497  m_Capacity = newCapacity;
4498  m_pArray = newArray;
4499  }
4500 
4501  m_Count = newCount;
4502  }
4503 
4504  void clear(bool freeMemory = false)
4505  {
4506  resize(0, freeMemory);
4507  }
4508 
4509  void insert(size_t index, const T& src)
4510  {
4511  VMA_HEAVY_ASSERT(index <= m_Count);
4512  const size_t oldCount = size();
4513  resize(oldCount + 1);
4514  if(index < oldCount)
4515  {
4516  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4517  }
4518  m_pArray[index] = src;
4519  }
4520 
4521  void remove(size_t index)
4522  {
4523  VMA_HEAVY_ASSERT(index < m_Count);
4524  const size_t oldCount = size();
4525  if(index < oldCount - 1)
4526  {
4527  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4528  }
4529  resize(oldCount - 1);
4530  }
4531 
4532  void push_back(const T& src)
4533  {
4534  const size_t newIndex = size();
4535  resize(newIndex + 1);
4536  m_pArray[newIndex] = src;
4537  }
4538 
4539  void pop_back()
4540  {
4541  VMA_HEAVY_ASSERT(m_Count > 0);
4542  resize(size() - 1);
4543  }
4544 
4545  void push_front(const T& src)
4546  {
4547  insert(0, src);
4548  }
4549 
4550  void pop_front()
4551  {
4552  VMA_HEAVY_ASSERT(m_Count > 0);
4553  remove(0);
4554  }
4555 
4556  typedef T* iterator;
4557 
4558  iterator begin() { return m_pArray; }
4559  iterator end() { return m_pArray + m_Count; }
4560 
4561 private:
4562  AllocatorT m_Allocator;
4563  T* m_pArray;
4564  size_t m_Count;
4565  size_t m_Capacity;
4566 };
4567 
4568 template<typename T, typename allocatorT>
4569 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4570 {
4571  vec.insert(index, item);
4572 }
4573 
4574 template<typename T, typename allocatorT>
4575 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4576 {
4577  vec.remove(index);
4578 }
4579 
4580 #endif // #if VMA_USE_STL_VECTOR
4581 
4582 template<typename CmpLess, typename VectorT>
4583 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4584 {
4585  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4586  vector.data(),
4587  vector.data() + vector.size(),
4588  value,
4589  CmpLess()) - vector.data();
4590  VmaVectorInsert(vector, indexToInsert, value);
4591  return indexToInsert;
4592 }
4593 
4594 template<typename CmpLess, typename VectorT>
4595 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4596 {
4597  CmpLess comparator;
4598  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4599  vector.begin(),
4600  vector.end(),
4601  value,
4602  comparator);
4603  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4604  {
4605  size_t indexToRemove = it - vector.begin();
4606  VmaVectorRemove(vector, indexToRemove);
4607  return true;
4608  }
4609  return false;
4610 }
4611 
4613 // class VmaPoolAllocator
4614 
4615 /*
4616 Allocator for objects of type T using a list of arrays (pools) to speed up
4617 allocation. Number of elements that can be allocated is not bounded because
4618 allocator can create multiple blocks.
4619 */
4620 template<typename T>
4621 class VmaPoolAllocator
4622 {
4623  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4624 public:
4625  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4626  ~VmaPoolAllocator();
4627  T* Alloc();
4628  void Free(T* ptr);
4629 
4630 private:
4631  union Item
4632  {
4633  uint32_t NextFreeIndex;
4634  alignas(T) char Value[sizeof(T)];
4635  };
4636 
4637  struct ItemBlock
4638  {
4639  Item* pItems;
4640  uint32_t Capacity;
4641  uint32_t FirstFreeIndex;
4642  };
4643 
4644  const VkAllocationCallbacks* m_pAllocationCallbacks;
4645  const uint32_t m_FirstBlockCapacity;
4646  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4647 
4648  ItemBlock& CreateNewBlock();
4649 };
4650 
4651 template<typename T>
4652 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4653  m_pAllocationCallbacks(pAllocationCallbacks),
4654  m_FirstBlockCapacity(firstBlockCapacity),
4655  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4656 {
4657  VMA_ASSERT(m_FirstBlockCapacity > 1);
4658 }
4659 
4660 template<typename T>
4661 VmaPoolAllocator<T>::~VmaPoolAllocator()
4662 {
4663  for(size_t i = m_ItemBlocks.size(); i--; )
4664  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4665  m_ItemBlocks.clear();
4666 }
4667 
4668 template<typename T>
4669 T* VmaPoolAllocator<T>::Alloc()
4670 {
4671  for(size_t i = m_ItemBlocks.size(); i--; )
4672  {
4673  ItemBlock& block = m_ItemBlocks[i];
4674  // This block has some free items: Use first one.
4675  if(block.FirstFreeIndex != UINT32_MAX)
4676  {
4677  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4678  block.FirstFreeIndex = pItem->NextFreeIndex;
4679  T* result = (T*)&pItem->Value;
4680  new(result)T(); // Explicit constructor call.
4681  return result;
4682  }
4683  }
4684 
4685  // No block has free item: Create new one and use it.
4686  ItemBlock& newBlock = CreateNewBlock();
4687  Item* const pItem = &newBlock.pItems[0];
4688  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4689  T* result = (T*)&pItem->Value;
4690  new(result)T(); // Explicit constructor call.
4691  return result;
4692 }
4693 
4694 template<typename T>
4695 void VmaPoolAllocator<T>::Free(T* ptr)
4696 {
4697  // Search all memory blocks to find ptr.
4698  for(size_t i = m_ItemBlocks.size(); i--; )
4699  {
4700  ItemBlock& block = m_ItemBlocks[i];
4701 
4702  // Casting to union.
4703  Item* pItemPtr;
4704  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4705 
4706  // Check if pItemPtr is in address range of this block.
4707  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4708  {
4709  ptr->~T(); // Explicit destructor call.
4710  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4711  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4712  block.FirstFreeIndex = index;
4713  return;
4714  }
4715  }
4716  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4717 }
4718 
4719 template<typename T>
4720 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4721 {
4722  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4723  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4724 
4725  const ItemBlock newBlock = {
4726  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4727  newBlockCapacity,
4728  0 };
4729 
4730  m_ItemBlocks.push_back(newBlock);
4731 
4732  // Setup singly-linked list of all free items in this block.
4733  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4734  newBlock.pItems[i].NextFreeIndex = i + 1;
4735  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4736  return m_ItemBlocks.back();
4737 }
4738 
4740 // class VmaRawList, VmaList
4741 
4742 #if VMA_USE_STL_LIST
4743 
4744 #define VmaList std::list
4745 
4746 #else // #if VMA_USE_STL_LIST
4747 
4748 template<typename T>
4749 struct VmaListItem
4750 {
4751  VmaListItem* pPrev;
4752  VmaListItem* pNext;
4753  T Value;
4754 };
4755 
4756 // Doubly linked list.
4757 template<typename T>
4758 class VmaRawList
4759 {
4760  VMA_CLASS_NO_COPY(VmaRawList)
4761 public:
4762  typedef VmaListItem<T> ItemType;
4763 
4764  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4765  ~VmaRawList();
4766  void Clear();
4767 
4768  size_t GetCount() const { return m_Count; }
4769  bool IsEmpty() const { return m_Count == 0; }
4770 
4771  ItemType* Front() { return m_pFront; }
4772  const ItemType* Front() const { return m_pFront; }
4773  ItemType* Back() { return m_pBack; }
4774  const ItemType* Back() const { return m_pBack; }
4775 
4776  ItemType* PushBack();
4777  ItemType* PushFront();
4778  ItemType* PushBack(const T& value);
4779  ItemType* PushFront(const T& value);
4780  void PopBack();
4781  void PopFront();
4782 
4783  // Item can be null - it means PushBack.
4784  ItemType* InsertBefore(ItemType* pItem);
4785  // Item can be null - it means PushFront.
4786  ItemType* InsertAfter(ItemType* pItem);
4787 
4788  ItemType* InsertBefore(ItemType* pItem, const T& value);
4789  ItemType* InsertAfter(ItemType* pItem, const T& value);
4790 
4791  void Remove(ItemType* pItem);
4792 
4793 private:
4794  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4795  VmaPoolAllocator<ItemType> m_ItemAllocator;
4796  ItemType* m_pFront;
4797  ItemType* m_pBack;
4798  size_t m_Count;
4799 };
4800 
4801 template<typename T>
4802 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4803  m_pAllocationCallbacks(pAllocationCallbacks),
4804  m_ItemAllocator(pAllocationCallbacks, 128),
4805  m_pFront(VMA_NULL),
4806  m_pBack(VMA_NULL),
4807  m_Count(0)
4808 {
4809 }
4810 
4811 template<typename T>
4812 VmaRawList<T>::~VmaRawList()
4813 {
4814  // Intentionally not calling Clear, because that would be unnecessary
4815  // computations to return all items to m_ItemAllocator as free.
4816 }
4817 
4818 template<typename T>
4819 void VmaRawList<T>::Clear()
4820 {
4821  if(IsEmpty() == false)
4822  {
4823  ItemType* pItem = m_pBack;
4824  while(pItem != VMA_NULL)
4825  {
4826  ItemType* const pPrevItem = pItem->pPrev;
4827  m_ItemAllocator.Free(pItem);
4828  pItem = pPrevItem;
4829  }
4830  m_pFront = VMA_NULL;
4831  m_pBack = VMA_NULL;
4832  m_Count = 0;
4833  }
4834 }
4835 
4836 template<typename T>
4837 VmaListItem<T>* VmaRawList<T>::PushBack()
4838 {
4839  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4840  pNewItem->pNext = VMA_NULL;
4841  if(IsEmpty())
4842  {
4843  pNewItem->pPrev = VMA_NULL;
4844  m_pFront = pNewItem;
4845  m_pBack = pNewItem;
4846  m_Count = 1;
4847  }
4848  else
4849  {
4850  pNewItem->pPrev = m_pBack;
4851  m_pBack->pNext = pNewItem;
4852  m_pBack = pNewItem;
4853  ++m_Count;
4854  }
4855  return pNewItem;
4856 }
4857 
4858 template<typename T>
4859 VmaListItem<T>* VmaRawList<T>::PushFront()
4860 {
4861  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4862  pNewItem->pPrev = VMA_NULL;
4863  if(IsEmpty())
4864  {
4865  pNewItem->pNext = VMA_NULL;
4866  m_pFront = pNewItem;
4867  m_pBack = pNewItem;
4868  m_Count = 1;
4869  }
4870  else
4871  {
4872  pNewItem->pNext = m_pFront;
4873  m_pFront->pPrev = pNewItem;
4874  m_pFront = pNewItem;
4875  ++m_Count;
4876  }
4877  return pNewItem;
4878 }
4879 
4880 template<typename T>
4881 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4882 {
4883  ItemType* const pNewItem = PushBack();
4884  pNewItem->Value = value;
4885  return pNewItem;
4886 }
4887 
4888 template<typename T>
4889 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4890 {
4891  ItemType* const pNewItem = PushFront();
4892  pNewItem->Value = value;
4893  return pNewItem;
4894 }
4895 
4896 template<typename T>
4897 void VmaRawList<T>::PopBack()
4898 {
4899  VMA_HEAVY_ASSERT(m_Count > 0);
4900  ItemType* const pBackItem = m_pBack;
4901  ItemType* const pPrevItem = pBackItem->pPrev;
4902  if(pPrevItem != VMA_NULL)
4903  {
4904  pPrevItem->pNext = VMA_NULL;
4905  }
4906  m_pBack = pPrevItem;
4907  m_ItemAllocator.Free(pBackItem);
4908  --m_Count;
4909 }
4910 
4911 template<typename T>
4912 void VmaRawList<T>::PopFront()
4913 {
4914  VMA_HEAVY_ASSERT(m_Count > 0);
4915  ItemType* const pFrontItem = m_pFront;
4916  ItemType* const pNextItem = pFrontItem->pNext;
4917  if(pNextItem != VMA_NULL)
4918  {
4919  pNextItem->pPrev = VMA_NULL;
4920  }
4921  m_pFront = pNextItem;
4922  m_ItemAllocator.Free(pFrontItem);
4923  --m_Count;
4924 }
4925 
4926 template<typename T>
4927 void VmaRawList<T>::Remove(ItemType* pItem)
4928 {
4929  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4930  VMA_HEAVY_ASSERT(m_Count > 0);
4931 
4932  if(pItem->pPrev != VMA_NULL)
4933  {
4934  pItem->pPrev->pNext = pItem->pNext;
4935  }
4936  else
4937  {
4938  VMA_HEAVY_ASSERT(m_pFront == pItem);
4939  m_pFront = pItem->pNext;
4940  }
4941 
4942  if(pItem->pNext != VMA_NULL)
4943  {
4944  pItem->pNext->pPrev = pItem->pPrev;
4945  }
4946  else
4947  {
4948  VMA_HEAVY_ASSERT(m_pBack == pItem);
4949  m_pBack = pItem->pPrev;
4950  }
4951 
4952  m_ItemAllocator.Free(pItem);
4953  --m_Count;
4954 }
4955 
4956 template<typename T>
4957 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4958 {
4959  if(pItem != VMA_NULL)
4960  {
4961  ItemType* const prevItem = pItem->pPrev;
4962  ItemType* const newItem = m_ItemAllocator.Alloc();
4963  newItem->pPrev = prevItem;
4964  newItem->pNext = pItem;
4965  pItem->pPrev = newItem;
4966  if(prevItem != VMA_NULL)
4967  {
4968  prevItem->pNext = newItem;
4969  }
4970  else
4971  {
4972  VMA_HEAVY_ASSERT(m_pFront == pItem);
4973  m_pFront = newItem;
4974  }
4975  ++m_Count;
4976  return newItem;
4977  }
4978  else
4979  return PushBack();
4980 }
4981 
4982 template<typename T>
4983 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4984 {
4985  if(pItem != VMA_NULL)
4986  {
4987  ItemType* const nextItem = pItem->pNext;
4988  ItemType* const newItem = m_ItemAllocator.Alloc();
4989  newItem->pNext = nextItem;
4990  newItem->pPrev = pItem;
4991  pItem->pNext = newItem;
4992  if(nextItem != VMA_NULL)
4993  {
4994  nextItem->pPrev = newItem;
4995  }
4996  else
4997  {
4998  VMA_HEAVY_ASSERT(m_pBack == pItem);
4999  m_pBack = newItem;
5000  }
5001  ++m_Count;
5002  return newItem;
5003  }
5004  else
5005  return PushFront();
5006 }
5007 
5008 template<typename T>
5009 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5010 {
5011  ItemType* const newItem = InsertBefore(pItem);
5012  newItem->Value = value;
5013  return newItem;
5014 }
5015 
5016 template<typename T>
5017 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5018 {
5019  ItemType* const newItem = InsertAfter(pItem);
5020  newItem->Value = value;
5021  return newItem;
5022 }
5023 
5024 template<typename T, typename AllocatorT>
5025 class VmaList
5026 {
5027  VMA_CLASS_NO_COPY(VmaList)
5028 public:
5029  class iterator
5030  {
5031  public:
5032  iterator() :
5033  m_pList(VMA_NULL),
5034  m_pItem(VMA_NULL)
5035  {
5036  }
5037 
5038  T& operator*() const
5039  {
5040  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5041  return m_pItem->Value;
5042  }
5043  T* operator->() const
5044  {
5045  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5046  return &m_pItem->Value;
5047  }
5048 
5049  iterator& operator++()
5050  {
5051  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5052  m_pItem = m_pItem->pNext;
5053  return *this;
5054  }
5055  iterator& operator--()
5056  {
5057  if(m_pItem != VMA_NULL)
5058  {
5059  m_pItem = m_pItem->pPrev;
5060  }
5061  else
5062  {
5063  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5064  m_pItem = m_pList->Back();
5065  }
5066  return *this;
5067  }
5068 
5069  iterator operator++(int)
5070  {
5071  iterator result = *this;
5072  ++*this;
5073  return result;
5074  }
5075  iterator operator--(int)
5076  {
5077  iterator result = *this;
5078  --*this;
5079  return result;
5080  }
5081 
5082  bool operator==(const iterator& rhs) const
5083  {
5084  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5085  return m_pItem == rhs.m_pItem;
5086  }
5087  bool operator!=(const iterator& rhs) const
5088  {
5089  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5090  return m_pItem != rhs.m_pItem;
5091  }
5092 
5093  private:
5094  VmaRawList<T>* m_pList;
5095  VmaListItem<T>* m_pItem;
5096 
5097  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5098  m_pList(pList),
5099  m_pItem(pItem)
5100  {
5101  }
5102 
5103  friend class VmaList<T, AllocatorT>;
5104  };
5105 
5106  class const_iterator
5107  {
5108  public:
5109  const_iterator() :
5110  m_pList(VMA_NULL),
5111  m_pItem(VMA_NULL)
5112  {
5113  }
5114 
5115  const_iterator(const iterator& src) :
5116  m_pList(src.m_pList),
5117  m_pItem(src.m_pItem)
5118  {
5119  }
5120 
5121  const T& operator*() const
5122  {
5123  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5124  return m_pItem->Value;
5125  }
5126  const T* operator->() const
5127  {
5128  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5129  return &m_pItem->Value;
5130  }
5131 
5132  const_iterator& operator++()
5133  {
5134  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5135  m_pItem = m_pItem->pNext;
5136  return *this;
5137  }
5138  const_iterator& operator--()
5139  {
5140  if(m_pItem != VMA_NULL)
5141  {
5142  m_pItem = m_pItem->pPrev;
5143  }
5144  else
5145  {
5146  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5147  m_pItem = m_pList->Back();
5148  }
5149  return *this;
5150  }
5151 
5152  const_iterator operator++(int)
5153  {
5154  const_iterator result = *this;
5155  ++*this;
5156  return result;
5157  }
5158  const_iterator operator--(int)
5159  {
5160  const_iterator result = *this;
5161  --*this;
5162  return result;
5163  }
5164 
5165  bool operator==(const const_iterator& rhs) const
5166  {
5167  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5168  return m_pItem == rhs.m_pItem;
5169  }
5170  bool operator!=(const const_iterator& rhs) const
5171  {
5172  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5173  return m_pItem != rhs.m_pItem;
5174  }
5175 
5176  private:
5177  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5178  m_pList(pList),
5179  m_pItem(pItem)
5180  {
5181  }
5182 
5183  const VmaRawList<T>* m_pList;
5184  const VmaListItem<T>* m_pItem;
5185 
5186  friend class VmaList<T, AllocatorT>;
5187  };
5188 
5189  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5190 
5191  bool empty() const { return m_RawList.IsEmpty(); }
5192  size_t size() const { return m_RawList.GetCount(); }
5193 
5194  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5195  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5196 
5197  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5198  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5199 
5200  void clear() { m_RawList.Clear(); }
5201  void push_back(const T& value) { m_RawList.PushBack(value); }
5202  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5203  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5204 
5205 private:
5206  VmaRawList<T> m_RawList;
5207 };
5208 
5209 #endif // #if VMA_USE_STL_LIST
5210 
5212 // class VmaMap
5213 
5214 // Unused in this version.
5215 #if 0
5216 
5217 #if VMA_USE_STL_UNORDERED_MAP
5218 
5219 #define VmaPair std::pair
5220 
5221 #define VMA_MAP_TYPE(KeyT, ValueT) \
5222  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5223 
5224 #else // #if VMA_USE_STL_UNORDERED_MAP
5225 
5226 template<typename T1, typename T2>
5227 struct VmaPair
5228 {
5229  T1 first;
5230  T2 second;
5231 
5232  VmaPair() : first(), second() { }
5233  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5234 };
5235 
5236 /* Class compatible with subset of interface of std::unordered_map.
5237 KeyT, ValueT must be POD because they will be stored in VmaVector.
5238 */
5239 template<typename KeyT, typename ValueT>
5240 class VmaMap
5241 {
5242 public:
5243  typedef VmaPair<KeyT, ValueT> PairType;
5244  typedef PairType* iterator;
5245 
5246  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5247 
5248  iterator begin() { return m_Vector.begin(); }
5249  iterator end() { return m_Vector.end(); }
5250 
5251  void insert(const PairType& pair);
5252  iterator find(const KeyT& key);
5253  void erase(iterator it);
5254 
5255 private:
5256  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5257 };
5258 
5259 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5260 
5261 template<typename FirstT, typename SecondT>
5262 struct VmaPairFirstLess
5263 {
5264  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5265  {
5266  return lhs.first < rhs.first;
5267  }
5268  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5269  {
5270  return lhs.first < rhsFirst;
5271  }
5272 };
5273 
5274 template<typename KeyT, typename ValueT>
5275 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5276 {
5277  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5278  m_Vector.data(),
5279  m_Vector.data() + m_Vector.size(),
5280  pair,
5281  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5282  VmaVectorInsert(m_Vector, indexToInsert, pair);
5283 }
5284 
5285 template<typename KeyT, typename ValueT>
5286 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5287 {
5288  PairType* it = VmaBinaryFindFirstNotLess(
5289  m_Vector.data(),
5290  m_Vector.data() + m_Vector.size(),
5291  key,
5292  VmaPairFirstLess<KeyT, ValueT>());
5293  if((it != m_Vector.end()) && (it->first == key))
5294  {
5295  return it;
5296  }
5297  else
5298  {
5299  return m_Vector.end();
5300  }
5301 }
5302 
5303 template<typename KeyT, typename ValueT>
5304 void VmaMap<KeyT, ValueT>::erase(iterator it)
5305 {
5306  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5307 }
5308 
5309 #endif // #if VMA_USE_STL_UNORDERED_MAP
5310 
5311 #endif // #if 0
5312 
5314 
5315 class VmaDeviceMemoryBlock;
5316 
5317 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5318 
5319 struct VmaAllocation_T
5320 {
5321 private:
5322  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5323 
5324  enum FLAGS
5325  {
5326  FLAG_USER_DATA_STRING = 0x01,
5327  };
5328 
5329 public:
5330  enum ALLOCATION_TYPE
5331  {
5332  ALLOCATION_TYPE_NONE,
5333  ALLOCATION_TYPE_BLOCK,
5334  ALLOCATION_TYPE_DEDICATED,
5335  };
5336 
5337  /*
5338  This struct is allocated using VmaPoolAllocator.
5339  */
5340 
5341  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5342  {
5343  m_Alignment = 1;
5344  m_Size = 0;
5345  m_MemoryTypeIndex = 0;
5346  m_pUserData = VMA_NULL;
5347  m_LastUseFrameIndex = currentFrameIndex;
5348  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5349  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5350  m_MapCount = 0;
5351  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5352 
5353 #if VMA_STATS_STRING_ENABLED
5354  m_CreationFrameIndex = currentFrameIndex;
5355  m_BufferImageUsage = 0;
5356 #endif
5357  }
5358 
5359  void Dtor()
5360  {
5361  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5362 
5363  // Check if owned string was freed.
5364  VMA_ASSERT(m_pUserData == VMA_NULL);
5365  }
5366 
5367  void InitBlockAllocation(
5368  VmaDeviceMemoryBlock* block,
5369  VkDeviceSize offset,
5370  VkDeviceSize alignment,
5371  VkDeviceSize size,
5372  uint32_t memoryTypeIndex,
5373  VmaSuballocationType suballocationType,
5374  bool mapped,
5375  bool canBecomeLost)
5376  {
5377  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5378  VMA_ASSERT(block != VMA_NULL);
5379  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5380  m_Alignment = alignment;
5381  m_Size = size;
5382  m_MemoryTypeIndex = memoryTypeIndex;
5383  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5384  m_SuballocationType = (uint8_t)suballocationType;
5385  m_BlockAllocation.m_Block = block;
5386  m_BlockAllocation.m_Offset = offset;
5387  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5388  }
5389 
5390  void InitLost()
5391  {
5392  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5393  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5394  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5395  m_MemoryTypeIndex = 0;
5396  m_BlockAllocation.m_Block = VMA_NULL;
5397  m_BlockAllocation.m_Offset = 0;
5398  m_BlockAllocation.m_CanBecomeLost = true;
5399  }
5400 
5401  void ChangeBlockAllocation(
5402  VmaAllocator hAllocator,
5403  VmaDeviceMemoryBlock* block,
5404  VkDeviceSize offset);
5405 
5406  void ChangeOffset(VkDeviceSize newOffset);
5407 
5408  // pMappedData not null means allocation is created with MAPPED flag.
5409  void InitDedicatedAllocation(
5410  uint32_t memoryTypeIndex,
5411  VkDeviceMemory hMemory,
5412  VmaSuballocationType suballocationType,
5413  void* pMappedData,
5414  VkDeviceSize size)
5415  {
5416  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5417  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5418  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5419  m_Alignment = 0;
5420  m_Size = size;
5421  m_MemoryTypeIndex = memoryTypeIndex;
5422  m_SuballocationType = (uint8_t)suballocationType;
5423  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5424  m_DedicatedAllocation.m_hMemory = hMemory;
5425  m_DedicatedAllocation.m_pMappedData = pMappedData;
5426  }
5427 
5428  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5429  VkDeviceSize GetAlignment() const { return m_Alignment; }
5430  VkDeviceSize GetSize() const { return m_Size; }
5431  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5432  void* GetUserData() const { return m_pUserData; }
5433  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5434  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5435 
5436  VmaDeviceMemoryBlock* GetBlock() const
5437  {
5438  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5439  return m_BlockAllocation.m_Block;
5440  }
5441  VkDeviceSize GetOffset() const;
5442  VkDeviceMemory GetMemory() const;
5443  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5444  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5445  void* GetMappedData() const;
5446  bool CanBecomeLost() const;
5447 
5448  uint32_t GetLastUseFrameIndex() const
5449  {
5450  return m_LastUseFrameIndex.load();
5451  }
5452  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5453  {
5454  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5455  }
5456  /*
5457  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5458  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5459  - Else, returns false.
5460 
5461  If hAllocation is already lost, assert - you should not call it then.
5462  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5463  */
5464  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5465 
5466  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5467  {
5468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5469  outInfo.blockCount = 1;
5470  outInfo.allocationCount = 1;
5471  outInfo.unusedRangeCount = 0;
5472  outInfo.usedBytes = m_Size;
5473  outInfo.unusedBytes = 0;
5474  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5475  outInfo.unusedRangeSizeMin = UINT64_MAX;
5476  outInfo.unusedRangeSizeMax = 0;
5477  }
5478 
5479  void BlockAllocMap();
5480  void BlockAllocUnmap();
5481  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5482  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5483 
5484 #if VMA_STATS_STRING_ENABLED
5485  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5486  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5487 
5488  void InitBufferImageUsage(uint32_t bufferImageUsage)
5489  {
5490  VMA_ASSERT(m_BufferImageUsage == 0);
5491  m_BufferImageUsage = bufferImageUsage;
5492  }
5493 
5494  void PrintParameters(class VmaJsonWriter& json) const;
5495 #endif
5496 
5497 private:
5498  VkDeviceSize m_Alignment;
5499  VkDeviceSize m_Size;
5500  void* m_pUserData;
5501  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5502  uint32_t m_MemoryTypeIndex;
5503  uint8_t m_Type; // ALLOCATION_TYPE
5504  uint8_t m_SuballocationType; // VmaSuballocationType
5505  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5506  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5507  uint8_t m_MapCount;
5508  uint8_t m_Flags; // enum FLAGS
5509 
5510  // Allocation out of VmaDeviceMemoryBlock.
5511  struct BlockAllocation
5512  {
5513  VmaDeviceMemoryBlock* m_Block;
5514  VkDeviceSize m_Offset;
5515  bool m_CanBecomeLost;
5516  };
5517 
5518  // Allocation for an object that has its own private VkDeviceMemory.
5519  struct DedicatedAllocation
5520  {
5521  VkDeviceMemory m_hMemory;
5522  void* m_pMappedData; // Not null means memory is mapped.
5523  };
5524 
5525  union
5526  {
5527  // Allocation out of VmaDeviceMemoryBlock.
5528  BlockAllocation m_BlockAllocation;
5529  // Allocation for an object that has its own private VkDeviceMemory.
5530  DedicatedAllocation m_DedicatedAllocation;
5531  };
5532 
5533 #if VMA_STATS_STRING_ENABLED
5534  uint32_t m_CreationFrameIndex;
5535  uint32_t m_BufferImageUsage; // 0 if unknown.
5536 #endif
5537 
5538  void FreeUserDataString(VmaAllocator hAllocator);
5539 };
5540 
5541 /*
5542 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5543 allocated memory block or free.
5544 */
5545 struct VmaSuballocation
5546 {
5547  VkDeviceSize offset;
5548  VkDeviceSize size;
5549  VmaAllocation hAllocation;
5550  VmaSuballocationType type;
5551 };
5552 
5553 // Comparator for offsets.
5554 struct VmaSuballocationOffsetLess
5555 {
5556  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5557  {
5558  return lhs.offset < rhs.offset;
5559  }
5560 };
5561 struct VmaSuballocationOffsetGreater
5562 {
5563  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5564  {
5565  return lhs.offset > rhs.offset;
5566  }
5567 };
5568 
5569 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5570 
5571 // Cost of one additional allocation lost, as equivalent in bytes.
5572 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5573 
5574 enum class VmaAllocationRequestType
5575 {
5576  Normal,
5577  // Used by "Linear" algorithm.
5578  UpperAddress,
5579  EndOf1st,
5580  EndOf2nd,
5581 };
5582 
5583 /*
5584 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5585 
5586 If canMakeOtherLost was false:
5587 - item points to a FREE suballocation.
5588 - itemsToMakeLostCount is 0.
5589 
5590 If canMakeOtherLost was true:
5591 - item points to first of sequence of suballocations, which are either FREE,
5592  or point to VmaAllocations that can become lost.
5593 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5594  the requested allocation to succeed.
5595 */
5596 struct VmaAllocationRequest
5597 {
5598  VkDeviceSize offset;
5599  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5600  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5601  VmaSuballocationList::iterator item;
5602  size_t itemsToMakeLostCount;
5603  void* customData;
5604  VmaAllocationRequestType type;
5605 
5606  VkDeviceSize CalcCost() const
5607  {
5608  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5609  }
5610 };
5611 
5612 /*
5613 Data structure used for bookkeeping of allocations and unused ranges of memory
5614 in a single VkDeviceMemory block.
5615 */
5616 class VmaBlockMetadata
5617 {
5618 public:
5619  VmaBlockMetadata(VmaAllocator hAllocator);
5620  virtual ~VmaBlockMetadata() { }
5621  virtual void Init(VkDeviceSize size) { m_Size = size; }
5622 
5623  // Validates all data structures inside this object. If not valid, returns false.
5624  virtual bool Validate() const = 0;
5625  VkDeviceSize GetSize() const { return m_Size; }
5626  virtual size_t GetAllocationCount() const = 0;
5627  virtual VkDeviceSize GetSumFreeSize() const = 0;
5628  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5629  // Returns true if this block is empty - contains only single free suballocation.
5630  virtual bool IsEmpty() const = 0;
5631 
5632  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5633  // Shouldn't modify blockCount.
5634  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5635 
5636 #if VMA_STATS_STRING_ENABLED
5637  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5638 #endif
5639 
5640  // Tries to find a place for suballocation with given parameters inside this block.
5641  // If succeeded, fills pAllocationRequest and returns true.
5642  // If failed, returns false.
5643  virtual bool CreateAllocationRequest(
5644  uint32_t currentFrameIndex,
5645  uint32_t frameInUseCount,
5646  VkDeviceSize bufferImageGranularity,
5647  VkDeviceSize allocSize,
5648  VkDeviceSize allocAlignment,
5649  bool upperAddress,
5650  VmaSuballocationType allocType,
5651  bool canMakeOtherLost,
5652  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5653  uint32_t strategy,
5654  VmaAllocationRequest* pAllocationRequest) = 0;
5655 
5656  virtual bool MakeRequestedAllocationsLost(
5657  uint32_t currentFrameIndex,
5658  uint32_t frameInUseCount,
5659  VmaAllocationRequest* pAllocationRequest) = 0;
5660 
5661  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5662 
5663  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5664 
5665  // Makes actual allocation based on request. Request must already be checked and valid.
5666  virtual void Alloc(
5667  const VmaAllocationRequest& request,
5668  VmaSuballocationType type,
5669  VkDeviceSize allocSize,
5670  VmaAllocation hAllocation) = 0;
5671 
5672  // Frees suballocation assigned to given memory region.
5673  virtual void Free(const VmaAllocation allocation) = 0;
5674  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5675 
5676 protected:
5677  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5678 
5679 #if VMA_STATS_STRING_ENABLED
5680  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5681  VkDeviceSize unusedBytes,
5682  size_t allocationCount,
5683  size_t unusedRangeCount) const;
5684  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5685  VkDeviceSize offset,
5686  VmaAllocation hAllocation) const;
5687  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5688  VkDeviceSize offset,
5689  VkDeviceSize size) const;
5690  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5691 #endif
5692 
5693 private:
5694  VkDeviceSize m_Size;
5695  const VkAllocationCallbacks* m_pAllocationCallbacks;
5696 };
5697 
5698 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5699  VMA_ASSERT(0 && "Validation failed: " #cond); \
5700  return false; \
5701  } } while(false)
5702 
5703 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5704 {
5705  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5706 public:
5707  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5708  virtual ~VmaBlockMetadata_Generic();
5709  virtual void Init(VkDeviceSize size);
5710 
5711  virtual bool Validate() const;
5712  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5713  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5714  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5715  virtual bool IsEmpty() const;
5716 
5717  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5718  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5719 
5720 #if VMA_STATS_STRING_ENABLED
5721  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5722 #endif
5723 
5724  virtual bool CreateAllocationRequest(
5725  uint32_t currentFrameIndex,
5726  uint32_t frameInUseCount,
5727  VkDeviceSize bufferImageGranularity,
5728  VkDeviceSize allocSize,
5729  VkDeviceSize allocAlignment,
5730  bool upperAddress,
5731  VmaSuballocationType allocType,
5732  bool canMakeOtherLost,
5733  uint32_t strategy,
5734  VmaAllocationRequest* pAllocationRequest);
5735 
5736  virtual bool MakeRequestedAllocationsLost(
5737  uint32_t currentFrameIndex,
5738  uint32_t frameInUseCount,
5739  VmaAllocationRequest* pAllocationRequest);
5740 
5741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5742 
5743  virtual VkResult CheckCorruption(const void* pBlockData);
5744 
5745  virtual void Alloc(
5746  const VmaAllocationRequest& request,
5747  VmaSuballocationType type,
5748  VkDeviceSize allocSize,
5749  VmaAllocation hAllocation);
5750 
5751  virtual void Free(const VmaAllocation allocation);
5752  virtual void FreeAtOffset(VkDeviceSize offset);
5753 
5755  // For defragmentation
5756 
5757  bool IsBufferImageGranularityConflictPossible(
5758  VkDeviceSize bufferImageGranularity,
5759  VmaSuballocationType& inOutPrevSuballocType) const;
5760 
5761 private:
5762  friend class VmaDefragmentationAlgorithm_Generic;
5763  friend class VmaDefragmentationAlgorithm_Fast;
5764 
5765  uint32_t m_FreeCount;
5766  VkDeviceSize m_SumFreeSize;
5767  VmaSuballocationList m_Suballocations;
5768  // Suballocations that are free and have size greater than certain threshold.
5769  // Sorted by size, ascending.
5770  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5771 
5772  bool ValidateFreeSuballocationList() const;
5773 
5774  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5775  // If yes, fills pOffset and returns true. If no, returns false.
5776  bool CheckAllocation(
5777  uint32_t currentFrameIndex,
5778  uint32_t frameInUseCount,
5779  VkDeviceSize bufferImageGranularity,
5780  VkDeviceSize allocSize,
5781  VkDeviceSize allocAlignment,
5782  VmaSuballocationType allocType,
5783  VmaSuballocationList::const_iterator suballocItem,
5784  bool canMakeOtherLost,
5785  VkDeviceSize* pOffset,
5786  size_t* itemsToMakeLostCount,
5787  VkDeviceSize* pSumFreeSize,
5788  VkDeviceSize* pSumItemSize) const;
5789  // Given free suballocation, it merges it with following one, which must also be free.
5790  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5791  // Releases given suballocation, making it free.
5792  // Merges it with adjacent free suballocations if applicable.
5793  // Returns iterator to new free suballocation at this place.
5794  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5795  // Given free suballocation, it inserts it into sorted list of
5796  // m_FreeSuballocationsBySize if it's suitable.
5797  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5798  // Given free suballocation, it removes it from sorted list of
5799  // m_FreeSuballocationsBySize if it's suitable.
5800  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5801 };
5802 
5803 /*
5804 Allocations and their references in internal data structure look like this:
5805 
5806 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5807 
5808  0 +-------+
5809  | |
5810  | |
5811  | |
5812  +-------+
5813  | Alloc | 1st[m_1stNullItemsBeginCount]
5814  +-------+
5815  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5816  +-------+
5817  | ... |
5818  +-------+
5819  | Alloc | 1st[1st.size() - 1]
5820  +-------+
5821  | |
5822  | |
5823  | |
5824 GetSize() +-------+
5825 
5826 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5827 
5828  0 +-------+
5829  | Alloc | 2nd[0]
5830  +-------+
5831  | Alloc | 2nd[1]
5832  +-------+
5833  | ... |
5834  +-------+
5835  | Alloc | 2nd[2nd.size() - 1]
5836  +-------+
5837  | |
5838  | |
5839  | |
5840  +-------+
5841  | Alloc | 1st[m_1stNullItemsBeginCount]
5842  +-------+
5843  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5844  +-------+
5845  | ... |
5846  +-------+
5847  | Alloc | 1st[1st.size() - 1]
5848  +-------+
5849  | |
5850 GetSize() +-------+
5851 
5852 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5853 
5854  0 +-------+
5855  | |
5856  | |
5857  | |
5858  +-------+
5859  | Alloc | 1st[m_1stNullItemsBeginCount]
5860  +-------+
5861  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5862  +-------+
5863  | ... |
5864  +-------+
5865  | Alloc | 1st[1st.size() - 1]
5866  +-------+
5867  | |
5868  | |
5869  | |
5870  +-------+
5871  | Alloc | 2nd[2nd.size() - 1]
5872  +-------+
5873  | ... |
5874  +-------+
5875  | Alloc | 2nd[1]
5876  +-------+
5877  | Alloc | 2nd[0]
5878 GetSize() +-------+
5879 
5880 */
5881 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5882 {
5883  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5884 public:
5885  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5886  virtual ~VmaBlockMetadata_Linear();
5887  virtual void Init(VkDeviceSize size);
5888 
5889  virtual bool Validate() const;
5890  virtual size_t GetAllocationCount() const;
5891  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5892  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5893  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5894 
5895  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5896  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5897 
5898 #if VMA_STATS_STRING_ENABLED
5899  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5900 #endif
5901 
5902  virtual bool CreateAllocationRequest(
5903  uint32_t currentFrameIndex,
5904  uint32_t frameInUseCount,
5905  VkDeviceSize bufferImageGranularity,
5906  VkDeviceSize allocSize,
5907  VkDeviceSize allocAlignment,
5908  bool upperAddress,
5909  VmaSuballocationType allocType,
5910  bool canMakeOtherLost,
5911  uint32_t strategy,
5912  VmaAllocationRequest* pAllocationRequest);
5913 
5914  virtual bool MakeRequestedAllocationsLost(
5915  uint32_t currentFrameIndex,
5916  uint32_t frameInUseCount,
5917  VmaAllocationRequest* pAllocationRequest);
5918 
5919  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5920 
5921  virtual VkResult CheckCorruption(const void* pBlockData);
5922 
5923  virtual void Alloc(
5924  const VmaAllocationRequest& request,
5925  VmaSuballocationType type,
5926  VkDeviceSize allocSize,
5927  VmaAllocation hAllocation);
5928 
5929  virtual void Free(const VmaAllocation allocation);
5930  virtual void FreeAtOffset(VkDeviceSize offset);
5931 
5932 private:
5933  /*
5934  There are two suballocation vectors, used in ping-pong way.
5935  The one with index m_1stVectorIndex is called 1st.
5936  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5937  2nd can be non-empty only when 1st is not empty.
5938  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5939  */
5940  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5941 
5942  enum SECOND_VECTOR_MODE
5943  {
5944  SECOND_VECTOR_EMPTY,
5945  /*
5946  Suballocations in 2nd vector are created later than the ones in 1st, but they
5947  all have smaller offset.
5948  */
5949  SECOND_VECTOR_RING_BUFFER,
5950  /*
5951  Suballocations in 2nd vector are upper side of double stack.
5952  They all have offsets higher than those in 1st vector.
5953  Top of this stack means smaller offsets, but higher indices in this vector.
5954  */
5955  SECOND_VECTOR_DOUBLE_STACK,
5956  };
5957 
5958  VkDeviceSize m_SumFreeSize;
5959  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5960  uint32_t m_1stVectorIndex;
5961  SECOND_VECTOR_MODE m_2ndVectorMode;
5962 
5963  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5964  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5965  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5966  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5967 
5968  // Number of items in 1st vector with hAllocation = null at the beginning.
5969  size_t m_1stNullItemsBeginCount;
5970  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5971  size_t m_1stNullItemsMiddleCount;
5972  // Number of items in 2nd vector with hAllocation = null.
5973  size_t m_2ndNullItemsCount;
5974 
5975  bool ShouldCompact1st() const;
5976  void CleanupAfterFree();
5977 
5978  bool CreateAllocationRequest_LowerAddress(
5979  uint32_t currentFrameIndex,
5980  uint32_t frameInUseCount,
5981  VkDeviceSize bufferImageGranularity,
5982  VkDeviceSize allocSize,
5983  VkDeviceSize allocAlignment,
5984  VmaSuballocationType allocType,
5985  bool canMakeOtherLost,
5986  uint32_t strategy,
5987  VmaAllocationRequest* pAllocationRequest);
5988  bool CreateAllocationRequest_UpperAddress(
5989  uint32_t currentFrameIndex,
5990  uint32_t frameInUseCount,
5991  VkDeviceSize bufferImageGranularity,
5992  VkDeviceSize allocSize,
5993  VkDeviceSize allocAlignment,
5994  VmaSuballocationType allocType,
5995  bool canMakeOtherLost,
5996  uint32_t strategy,
5997  VmaAllocationRequest* pAllocationRequest);
5998 };
5999 
6000 /*
6001 - GetSize() is the original size of allocated memory block.
6002 - m_UsableSize is this size aligned down to a power of two.
6003  All allocations and calculations happen relative to m_UsableSize.
6004 - GetUnusableSize() is the difference between them.
6005  It is repoted as separate, unused range, not available for allocations.
6006 
6007 Node at level 0 has size = m_UsableSize.
6008 Each next level contains nodes with size 2 times smaller than current level.
6009 m_LevelCount is the maximum number of levels to use in the current object.
6010 */
6011 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6012 {
6013  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6014 public:
6015  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6016  virtual ~VmaBlockMetadata_Buddy();
6017  virtual void Init(VkDeviceSize size);
6018 
6019  virtual bool Validate() const;
6020  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6021  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6022  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6023  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6024 
6025  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6026  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6027 
6028 #if VMA_STATS_STRING_ENABLED
6029  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6030 #endif
6031 
6032  virtual bool CreateAllocationRequest(
6033  uint32_t currentFrameIndex,
6034  uint32_t frameInUseCount,
6035  VkDeviceSize bufferImageGranularity,
6036  VkDeviceSize allocSize,
6037  VkDeviceSize allocAlignment,
6038  bool upperAddress,
6039  VmaSuballocationType allocType,
6040  bool canMakeOtherLost,
6041  uint32_t strategy,
6042  VmaAllocationRequest* pAllocationRequest);
6043 
6044  virtual bool MakeRequestedAllocationsLost(
6045  uint32_t currentFrameIndex,
6046  uint32_t frameInUseCount,
6047  VmaAllocationRequest* pAllocationRequest);
6048 
6049  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6050 
6051  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6052 
6053  virtual void Alloc(
6054  const VmaAllocationRequest& request,
6055  VmaSuballocationType type,
6056  VkDeviceSize allocSize,
6057  VmaAllocation hAllocation);
6058 
6059  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6060  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6061 
6062 private:
6063  static const VkDeviceSize MIN_NODE_SIZE = 32;
6064  static const size_t MAX_LEVELS = 30;
6065 
6066  struct ValidationContext
6067  {
6068  size_t calculatedAllocationCount;
6069  size_t calculatedFreeCount;
6070  VkDeviceSize calculatedSumFreeSize;
6071 
6072  ValidationContext() :
6073  calculatedAllocationCount(0),
6074  calculatedFreeCount(0),
6075  calculatedSumFreeSize(0) { }
6076  };
6077 
6078  struct Node
6079  {
6080  VkDeviceSize offset;
6081  enum TYPE
6082  {
6083  TYPE_FREE,
6084  TYPE_ALLOCATION,
6085  TYPE_SPLIT,
6086  TYPE_COUNT
6087  } type;
6088  Node* parent;
6089  Node* buddy;
6090 
6091  union
6092  {
6093  struct
6094  {
6095  Node* prev;
6096  Node* next;
6097  } free;
6098  struct
6099  {
6100  VmaAllocation alloc;
6101  } allocation;
6102  struct
6103  {
6104  Node* leftChild;
6105  } split;
6106  };
6107  };
6108 
6109  // Size of the memory block aligned down to a power of two.
6110  VkDeviceSize m_UsableSize;
6111  uint32_t m_LevelCount;
6112 
6113  Node* m_Root;
6114  struct {
6115  Node* front;
6116  Node* back;
6117  } m_FreeList[MAX_LEVELS];
6118  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6119  size_t m_AllocationCount;
6120  // Number of nodes in the tree with type == TYPE_FREE.
6121  size_t m_FreeCount;
6122  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6123  VkDeviceSize m_SumFreeSize;
6124 
6125  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6126  void DeleteNode(Node* node);
6127  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6128  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6129  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6130  // Alloc passed just for validation. Can be null.
6131  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6132  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6133  // Adds node to the front of FreeList at given level.
6134  // node->type must be FREE.
6135  // node->free.prev, next can be undefined.
6136  void AddToFreeListFront(uint32_t level, Node* node);
6137  // Removes node from FreeList at given level.
6138  // node->type must be FREE.
6139  // node->free.prev, next stay untouched.
6140  void RemoveFromFreeList(uint32_t level, Node* node);
6141 
6142 #if VMA_STATS_STRING_ENABLED
6143  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6144 #endif
6145 };
6146 
6147 /*
6148 Represents a single block of device memory (`VkDeviceMemory`) with all the
6149 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6150 
6151 Thread-safety: This class must be externally synchronized.
6152 */
6153 class VmaDeviceMemoryBlock
6154 {
6155  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6156 public:
6157  VmaBlockMetadata* m_pMetadata;
6158 
6159  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6160 
6161  ~VmaDeviceMemoryBlock()
6162  {
6163  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6164  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6165  }
6166 
6167  // Always call after construction.
6168  void Init(
6169  VmaAllocator hAllocator,
6170  VmaPool hParentPool,
6171  uint32_t newMemoryTypeIndex,
6172  VkDeviceMemory newMemory,
6173  VkDeviceSize newSize,
6174  uint32_t id,
6175  uint32_t algorithm);
6176  // Always call before destruction.
6177  void Destroy(VmaAllocator allocator);
6178 
6179  VmaPool GetParentPool() const { return m_hParentPool; }
6180  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6181  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6182  uint32_t GetId() const { return m_Id; }
6183  void* GetMappedData() const { return m_pMappedData; }
6184 
6185  // Validates all data structures inside this object. If not valid, returns false.
6186  bool Validate() const;
6187 
6188  VkResult CheckCorruption(VmaAllocator hAllocator);
6189 
6190  // ppData can be null.
6191  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6192  void Unmap(VmaAllocator hAllocator, uint32_t count);
6193 
6194  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6195  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6196 
6197  VkResult BindBufferMemory(
6198  const VmaAllocator hAllocator,
6199  const VmaAllocation hAllocation,
6200  VkDeviceSize allocationLocalOffset,
6201  VkBuffer hBuffer,
6202  const void* pNext);
6203  VkResult BindImageMemory(
6204  const VmaAllocator hAllocator,
6205  const VmaAllocation hAllocation,
6206  VkDeviceSize allocationLocalOffset,
6207  VkImage hImage,
6208  const void* pNext);
6209 
6210 private:
6211  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6212  uint32_t m_MemoryTypeIndex;
6213  uint32_t m_Id;
6214  VkDeviceMemory m_hMemory;
6215 
6216  /*
6217  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6218  Also protects m_MapCount, m_pMappedData.
6219  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6220  */
6221  VMA_MUTEX m_Mutex;
6222  uint32_t m_MapCount;
6223  void* m_pMappedData;
6224 };
6225 
6226 struct VmaPointerLess
6227 {
6228  bool operator()(const void* lhs, const void* rhs) const
6229  {
6230  return lhs < rhs;
6231  }
6232 };
6233 
6234 struct VmaDefragmentationMove
6235 {
6236  size_t srcBlockIndex;
6237  size_t dstBlockIndex;
6238  VkDeviceSize srcOffset;
6239  VkDeviceSize dstOffset;
6240  VkDeviceSize size;
6241 };
6242 
6243 class VmaDefragmentationAlgorithm;
6244 
6245 /*
6246 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6247 Vulkan memory type.
6248 
6249 Synchronized internally with a mutex.
6250 */
6251 struct VmaBlockVector
6252 {
6253  VMA_CLASS_NO_COPY(VmaBlockVector)
6254 public:
6255  VmaBlockVector(
6256  VmaAllocator hAllocator,
6257  VmaPool hParentPool,
6258  uint32_t memoryTypeIndex,
6259  VkDeviceSize preferredBlockSize,
6260  size_t minBlockCount,
6261  size_t maxBlockCount,
6262  VkDeviceSize bufferImageGranularity,
6263  uint32_t frameInUseCount,
6264  bool explicitBlockSize,
6265  uint32_t algorithm);
6266  ~VmaBlockVector();
6267 
6268  VkResult CreateMinBlocks();
6269 
6270  VmaAllocator GetAllocator() const { return m_hAllocator; }
6271  VmaPool GetParentPool() const { return m_hParentPool; }
6272  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6273  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6274  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6275  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6276  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6277  uint32_t GetAlgorithm() const { return m_Algorithm; }
6278 
6279  void GetPoolStats(VmaPoolStats* pStats);
6280 
6281  bool IsEmpty();
6282  bool IsCorruptionDetectionEnabled() const;
6283 
6284  VkResult Allocate(
6285  uint32_t currentFrameIndex,
6286  VkDeviceSize size,
6287  VkDeviceSize alignment,
6288  const VmaAllocationCreateInfo& createInfo,
6289  VmaSuballocationType suballocType,
6290  size_t allocationCount,
6291  VmaAllocation* pAllocations);
6292 
6293  void Free(const VmaAllocation hAllocation);
6294 
6295  // Adds statistics of this BlockVector to pStats.
6296  void AddStats(VmaStats* pStats);
6297 
6298 #if VMA_STATS_STRING_ENABLED
6299  void PrintDetailedMap(class VmaJsonWriter& json);
6300 #endif
6301 
6302  void MakePoolAllocationsLost(
6303  uint32_t currentFrameIndex,
6304  size_t* pLostAllocationCount);
6305  VkResult CheckCorruption();
6306 
6307  // Saves results in pCtx->res.
6308  void Defragment(
6309  class VmaBlockVectorDefragmentationContext* pCtx,
6310  VmaDefragmentationStats* pStats,
6311  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6312  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6313  VkCommandBuffer commandBuffer);
6314  void DefragmentationEnd(
6315  class VmaBlockVectorDefragmentationContext* pCtx,
6316  VmaDefragmentationStats* pStats);
6317 
6319  // To be used only while the m_Mutex is locked. Used during defragmentation.
6320 
6321  size_t GetBlockCount() const { return m_Blocks.size(); }
6322  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6323  size_t CalcAllocationCount() const;
6324  bool IsBufferImageGranularityConflictPossible() const;
6325 
6326 private:
6327  friend class VmaDefragmentationAlgorithm_Generic;
6328 
6329  const VmaAllocator m_hAllocator;
6330  const VmaPool m_hParentPool;
6331  const uint32_t m_MemoryTypeIndex;
6332  const VkDeviceSize m_PreferredBlockSize;
6333  const size_t m_MinBlockCount;
6334  const size_t m_MaxBlockCount;
6335  const VkDeviceSize m_BufferImageGranularity;
6336  const uint32_t m_FrameInUseCount;
6337  const bool m_ExplicitBlockSize;
6338  const uint32_t m_Algorithm;
6339  VMA_RW_MUTEX m_Mutex;
6340 
6341  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6342  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6343  bool m_HasEmptyBlock;
6344  // Incrementally sorted by sumFreeSize, ascending.
6345  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6346  uint32_t m_NextBlockId;
6347 
6348  VkDeviceSize CalcMaxBlockSize() const;
6349 
6350  // Finds and removes given block from vector.
6351  void Remove(VmaDeviceMemoryBlock* pBlock);
6352 
6353  // Performs single step in sorting m_Blocks. They may not be fully sorted
6354  // after this call.
6355  void IncrementallySortBlocks();
6356 
6357  VkResult AllocatePage(
6358  uint32_t currentFrameIndex,
6359  VkDeviceSize size,
6360  VkDeviceSize alignment,
6361  const VmaAllocationCreateInfo& createInfo,
6362  VmaSuballocationType suballocType,
6363  VmaAllocation* pAllocation);
6364 
6365  // To be used only without CAN_MAKE_OTHER_LOST flag.
6366  VkResult AllocateFromBlock(
6367  VmaDeviceMemoryBlock* pBlock,
6368  uint32_t currentFrameIndex,
6369  VkDeviceSize size,
6370  VkDeviceSize alignment,
6371  VmaAllocationCreateFlags allocFlags,
6372  void* pUserData,
6373  VmaSuballocationType suballocType,
6374  uint32_t strategy,
6375  VmaAllocation* pAllocation);
6376 
6377  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6378 
6379  // Saves result to pCtx->res.
6380  void ApplyDefragmentationMovesCpu(
6381  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6382  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6383  // Saves result to pCtx->res.
6384  void ApplyDefragmentationMovesGpu(
6385  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6386  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6387  VkCommandBuffer commandBuffer);
6388 
6389  /*
6390  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6391  - updated with new data.
6392  */
6393  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6394 
6395  void UpdateHasEmptyBlock();
6396 };
6397 
6398 struct VmaPool_T
6399 {
6400  VMA_CLASS_NO_COPY(VmaPool_T)
6401 public:
6402  VmaBlockVector m_BlockVector;
6403 
6404  VmaPool_T(
6405  VmaAllocator hAllocator,
6406  const VmaPoolCreateInfo& createInfo,
6407  VkDeviceSize preferredBlockSize);
6408  ~VmaPool_T();
6409 
6410  uint32_t GetId() const { return m_Id; }
6411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6412 
6413  const char* GetName() const { return m_Name; }
6414  void SetName(const char* pName);
6415 
6416 #if VMA_STATS_STRING_ENABLED
6417  //void PrintDetailedMap(class VmaStringBuilder& sb);
6418 #endif
6419 
6420 private:
6421  uint32_t m_Id;
6422  char* m_Name;
6423 };
6424 
6425 /*
6426 Performs defragmentation:
6427 
6428 - Updates `pBlockVector->m_pMetadata`.
6429 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6430 - Does not move actual data, only returns requested moves as `moves`.
6431 */
6432 class VmaDefragmentationAlgorithm
6433 {
6434  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6435 public:
6436  VmaDefragmentationAlgorithm(
6437  VmaAllocator hAllocator,
6438  VmaBlockVector* pBlockVector,
6439  uint32_t currentFrameIndex) :
6440  m_hAllocator(hAllocator),
6441  m_pBlockVector(pBlockVector),
6442  m_CurrentFrameIndex(currentFrameIndex)
6443  {
6444  }
6445  virtual ~VmaDefragmentationAlgorithm()
6446  {
6447  }
6448 
6449  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6450  virtual void AddAll() = 0;
6451 
6452  virtual VkResult Defragment(
6453  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6454  VkDeviceSize maxBytesToMove,
6455  uint32_t maxAllocationsToMove) = 0;
6456 
6457  virtual VkDeviceSize GetBytesMoved() const = 0;
6458  virtual uint32_t GetAllocationsMoved() const = 0;
6459 
6460 protected:
6461  VmaAllocator const m_hAllocator;
6462  VmaBlockVector* const m_pBlockVector;
6463  const uint32_t m_CurrentFrameIndex;
6464 
6465  struct AllocationInfo
6466  {
6467  VmaAllocation m_hAllocation;
6468  VkBool32* m_pChanged;
6469 
6470  AllocationInfo() :
6471  m_hAllocation(VK_NULL_HANDLE),
6472  m_pChanged(VMA_NULL)
6473  {
6474  }
6475  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6476  m_hAllocation(hAlloc),
6477  m_pChanged(pChanged)
6478  {
6479  }
6480  };
6481 };
6482 
6483 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6484 {
6485  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6486 public:
6487  VmaDefragmentationAlgorithm_Generic(
6488  VmaAllocator hAllocator,
6489  VmaBlockVector* pBlockVector,
6490  uint32_t currentFrameIndex,
6491  bool overlappingMoveSupported);
6492  virtual ~VmaDefragmentationAlgorithm_Generic();
6493 
6494  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6495  virtual void AddAll() { m_AllAllocations = true; }
6496 
6497  virtual VkResult Defragment(
6498  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6499  VkDeviceSize maxBytesToMove,
6500  uint32_t maxAllocationsToMove);
6501 
6502  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6503  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6504 
6505 private:
6506  uint32_t m_AllocationCount;
6507  bool m_AllAllocations;
6508 
6509  VkDeviceSize m_BytesMoved;
6510  uint32_t m_AllocationsMoved;
6511 
6512  struct AllocationInfoSizeGreater
6513  {
6514  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6515  {
6516  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6517  }
6518  };
6519 
6520  struct AllocationInfoOffsetGreater
6521  {
6522  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6523  {
6524  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6525  }
6526  };
6527 
6528  struct BlockInfo
6529  {
6530  size_t m_OriginalBlockIndex;
6531  VmaDeviceMemoryBlock* m_pBlock;
6532  bool m_HasNonMovableAllocations;
6533  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6534 
6535  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6536  m_OriginalBlockIndex(SIZE_MAX),
6537  m_pBlock(VMA_NULL),
6538  m_HasNonMovableAllocations(true),
6539  m_Allocations(pAllocationCallbacks)
6540  {
6541  }
6542 
6543  void CalcHasNonMovableAllocations()
6544  {
6545  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6546  const size_t defragmentAllocCount = m_Allocations.size();
6547  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6548  }
6549 
6550  void SortAllocationsBySizeDescending()
6551  {
6552  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6553  }
6554 
6555  void SortAllocationsByOffsetDescending()
6556  {
6557  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6558  }
6559  };
6560 
6561  struct BlockPointerLess
6562  {
6563  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6564  {
6565  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6566  }
6567  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6568  {
6569  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6570  }
6571  };
6572 
6573  // 1. Blocks with some non-movable allocations go first.
6574  // 2. Blocks with smaller sumFreeSize go first.
6575  struct BlockInfoCompareMoveDestination
6576  {
6577  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6578  {
6579  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6580  {
6581  return true;
6582  }
6583  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6584  {
6585  return false;
6586  }
6587  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6588  {
6589  return true;
6590  }
6591  return false;
6592  }
6593  };
6594 
6595  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6596  BlockInfoVector m_Blocks;
6597 
6598  VkResult DefragmentRound(
6599  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6600  VkDeviceSize maxBytesToMove,
6601  uint32_t maxAllocationsToMove);
6602 
6603  size_t CalcBlocksWithNonMovableCount() const;
6604 
6605  static bool MoveMakesSense(
6606  size_t dstBlockIndex, VkDeviceSize dstOffset,
6607  size_t srcBlockIndex, VkDeviceSize srcOffset);
6608 };
6609 
6610 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6611 {
6612  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6613 public:
6614  VmaDefragmentationAlgorithm_Fast(
6615  VmaAllocator hAllocator,
6616  VmaBlockVector* pBlockVector,
6617  uint32_t currentFrameIndex,
6618  bool overlappingMoveSupported);
6619  virtual ~VmaDefragmentationAlgorithm_Fast();
6620 
6621  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6622  virtual void AddAll() { m_AllAllocations = true; }
6623 
6624  virtual VkResult Defragment(
6625  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6626  VkDeviceSize maxBytesToMove,
6627  uint32_t maxAllocationsToMove);
6628 
6629  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6630  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6631 
6632 private:
6633  struct BlockInfo
6634  {
6635  size_t origBlockIndex;
6636  };
6637 
6638  class FreeSpaceDatabase
6639  {
6640  public:
6641  FreeSpaceDatabase()
6642  {
6643  FreeSpace s = {};
6644  s.blockInfoIndex = SIZE_MAX;
6645  for(size_t i = 0; i < MAX_COUNT; ++i)
6646  {
6647  m_FreeSpaces[i] = s;
6648  }
6649  }
6650 
6651  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6652  {
6653  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6654  {
6655  return;
6656  }
6657 
6658  // Find first invalid or the smallest structure.
6659  size_t bestIndex = SIZE_MAX;
6660  for(size_t i = 0; i < MAX_COUNT; ++i)
6661  {
6662  // Empty structure.
6663  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6664  {
6665  bestIndex = i;
6666  break;
6667  }
6668  if(m_FreeSpaces[i].size < size &&
6669  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6670  {
6671  bestIndex = i;
6672  }
6673  }
6674 
6675  if(bestIndex != SIZE_MAX)
6676  {
6677  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6678  m_FreeSpaces[bestIndex].offset = offset;
6679  m_FreeSpaces[bestIndex].size = size;
6680  }
6681  }
6682 
6683  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6684  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6685  {
6686  size_t bestIndex = SIZE_MAX;
6687  VkDeviceSize bestFreeSpaceAfter = 0;
6688  for(size_t i = 0; i < MAX_COUNT; ++i)
6689  {
6690  // Structure is valid.
6691  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6692  {
6693  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6694  // Allocation fits into this structure.
6695  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6696  {
6697  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6698  (dstOffset + size);
6699  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6700  {
6701  bestIndex = i;
6702  bestFreeSpaceAfter = freeSpaceAfter;
6703  }
6704  }
6705  }
6706  }
6707 
6708  if(bestIndex != SIZE_MAX)
6709  {
6710  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6711  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6712 
6713  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6714  {
6715  // Leave this structure for remaining empty space.
6716  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6717  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6718  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6719  }
6720  else
6721  {
6722  // This structure becomes invalid.
6723  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6724  }
6725 
6726  return true;
6727  }
6728 
6729  return false;
6730  }
6731 
6732  private:
6733  static const size_t MAX_COUNT = 4;
6734 
6735  struct FreeSpace
6736  {
6737  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6738  VkDeviceSize offset;
6739  VkDeviceSize size;
6740  } m_FreeSpaces[MAX_COUNT];
6741  };
6742 
6743  const bool m_OverlappingMoveSupported;
6744 
6745  uint32_t m_AllocationCount;
6746  bool m_AllAllocations;
6747 
6748  VkDeviceSize m_BytesMoved;
6749  uint32_t m_AllocationsMoved;
6750 
6751  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6752 
6753  void PreprocessMetadata();
6754  void PostprocessMetadata();
6755  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6756 };
6757 
6758 struct VmaBlockDefragmentationContext
6759 {
6760  enum BLOCK_FLAG
6761  {
6762  BLOCK_FLAG_USED = 0x00000001,
6763  };
6764  uint32_t flags;
6765  VkBuffer hBuffer;
6766 };
6767 
6768 class VmaBlockVectorDefragmentationContext
6769 {
6770  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6771 public:
6772  VkResult res;
6773  bool mutexLocked;
6774  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6775 
6776  VmaBlockVectorDefragmentationContext(
6777  VmaAllocator hAllocator,
6778  VmaPool hCustomPool, // Optional.
6779  VmaBlockVector* pBlockVector,
6780  uint32_t currFrameIndex);
6781  ~VmaBlockVectorDefragmentationContext();
6782 
6783  VmaPool GetCustomPool() const { return m_hCustomPool; }
6784  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6785  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6786 
6787  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6788  void AddAll() { m_AllAllocations = true; }
6789 
6790  void Begin(bool overlappingMoveSupported);
6791 
6792 private:
6793  const VmaAllocator m_hAllocator;
6794  // Null if not from custom pool.
6795  const VmaPool m_hCustomPool;
6796  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6797  VmaBlockVector* const m_pBlockVector;
6798  const uint32_t m_CurrFrameIndex;
6799  // Owner of this object.
6800  VmaDefragmentationAlgorithm* m_pAlgorithm;
6801 
6802  struct AllocInfo
6803  {
6804  VmaAllocation hAlloc;
6805  VkBool32* pChanged;
6806  };
6807  // Used between constructor and Begin.
6808  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6809  bool m_AllAllocations;
6810 };
6811 
6812 struct VmaDefragmentationContext_T
6813 {
6814 private:
6815  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6816 public:
6817  VmaDefragmentationContext_T(
6818  VmaAllocator hAllocator,
6819  uint32_t currFrameIndex,
6820  uint32_t flags,
6821  VmaDefragmentationStats* pStats);
6822  ~VmaDefragmentationContext_T();
6823 
6824  void AddPools(uint32_t poolCount, VmaPool* pPools);
6825  void AddAllocations(
6826  uint32_t allocationCount,
6827  VmaAllocation* pAllocations,
6828  VkBool32* pAllocationsChanged);
6829 
6830  /*
6831  Returns:
6832  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6833  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6834  - Negative value if error occured and object can be destroyed immediately.
6835  */
6836  VkResult Defragment(
6837  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6838  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6839  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6840 
6841 private:
6842  const VmaAllocator m_hAllocator;
6843  const uint32_t m_CurrFrameIndex;
6844  const uint32_t m_Flags;
6845  VmaDefragmentationStats* const m_pStats;
6846  // Owner of these objects.
6847  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6848  // Owner of these objects.
6849  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6850 };
6851 
6852 #if VMA_RECORDING_ENABLED
6853 
6854 class VmaRecorder
6855 {
6856 public:
6857  VmaRecorder();
6858  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6859  void WriteConfiguration(
6860  const VkPhysicalDeviceProperties& devProps,
6861  const VkPhysicalDeviceMemoryProperties& memProps,
6862  uint32_t vulkanApiVersion,
6863  bool dedicatedAllocationExtensionEnabled,
6864  bool bindMemory2ExtensionEnabled,
6865  bool memoryBudgetExtensionEnabled);
6866  ~VmaRecorder();
6867 
6868  void RecordCreateAllocator(uint32_t frameIndex);
6869  void RecordDestroyAllocator(uint32_t frameIndex);
6870  void RecordCreatePool(uint32_t frameIndex,
6871  const VmaPoolCreateInfo& createInfo,
6872  VmaPool pool);
6873  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6874  void RecordAllocateMemory(uint32_t frameIndex,
6875  const VkMemoryRequirements& vkMemReq,
6876  const VmaAllocationCreateInfo& createInfo,
6877  VmaAllocation allocation);
6878  void RecordAllocateMemoryPages(uint32_t frameIndex,
6879  const VkMemoryRequirements& vkMemReq,
6880  const VmaAllocationCreateInfo& createInfo,
6881  uint64_t allocationCount,
6882  const VmaAllocation* pAllocations);
6883  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6884  const VkMemoryRequirements& vkMemReq,
6885  bool requiresDedicatedAllocation,
6886  bool prefersDedicatedAllocation,
6887  const VmaAllocationCreateInfo& createInfo,
6888  VmaAllocation allocation);
6889  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6890  const VkMemoryRequirements& vkMemReq,
6891  bool requiresDedicatedAllocation,
6892  bool prefersDedicatedAllocation,
6893  const VmaAllocationCreateInfo& createInfo,
6894  VmaAllocation allocation);
6895  void RecordFreeMemory(uint32_t frameIndex,
6896  VmaAllocation allocation);
6897  void RecordFreeMemoryPages(uint32_t frameIndex,
6898  uint64_t allocationCount,
6899  const VmaAllocation* pAllocations);
6900  void RecordSetAllocationUserData(uint32_t frameIndex,
6901  VmaAllocation allocation,
6902  const void* pUserData);
6903  void RecordCreateLostAllocation(uint32_t frameIndex,
6904  VmaAllocation allocation);
6905  void RecordMapMemory(uint32_t frameIndex,
6906  VmaAllocation allocation);
6907  void RecordUnmapMemory(uint32_t frameIndex,
6908  VmaAllocation allocation);
6909  void RecordFlushAllocation(uint32_t frameIndex,
6910  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6911  void RecordInvalidateAllocation(uint32_t frameIndex,
6912  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6913  void RecordCreateBuffer(uint32_t frameIndex,
6914  const VkBufferCreateInfo& bufCreateInfo,
6915  const VmaAllocationCreateInfo& allocCreateInfo,
6916  VmaAllocation allocation);
6917  void RecordCreateImage(uint32_t frameIndex,
6918  const VkImageCreateInfo& imageCreateInfo,
6919  const VmaAllocationCreateInfo& allocCreateInfo,
6920  VmaAllocation allocation);
6921  void RecordDestroyBuffer(uint32_t frameIndex,
6922  VmaAllocation allocation);
6923  void RecordDestroyImage(uint32_t frameIndex,
6924  VmaAllocation allocation);
6925  void RecordTouchAllocation(uint32_t frameIndex,
6926  VmaAllocation allocation);
6927  void RecordGetAllocationInfo(uint32_t frameIndex,
6928  VmaAllocation allocation);
6929  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6930  VmaPool pool);
6931  void RecordDefragmentationBegin(uint32_t frameIndex,
6932  const VmaDefragmentationInfo2& info,
6934  void RecordDefragmentationEnd(uint32_t frameIndex,
6936  void RecordSetPoolName(uint32_t frameIndex,
6937  VmaPool pool,
6938  const char* name);
6939 
6940 private:
6941  struct CallParams
6942  {
6943  uint32_t threadId;
6944  double time;
6945  };
6946 
6947  class UserDataString
6948  {
6949  public:
6950  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6951  const char* GetString() const { return m_Str; }
6952 
6953  private:
6954  char m_PtrStr[17];
6955  const char* m_Str;
6956  };
6957 
6958  bool m_UseMutex;
6959  VmaRecordFlags m_Flags;
6960  FILE* m_File;
6961  VMA_MUTEX m_FileMutex;
6962  int64_t m_Freq;
6963  int64_t m_StartCounter;
6964 
6965  void GetBasicParams(CallParams& outParams);
6966 
6967  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6968  template<typename T>
6969  void PrintPointerList(uint64_t count, const T* pItems)
6970  {
6971  if(count)
6972  {
6973  fprintf(m_File, "%p", pItems[0]);
6974  for(uint64_t i = 1; i < count; ++i)
6975  {
6976  fprintf(m_File, " %p", pItems[i]);
6977  }
6978  }
6979  }
6980 
6981  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6982  void Flush();
6983 };
6984 
6985 #endif // #if VMA_RECORDING_ENABLED
6986 
6987 /*
6988 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6989 */
6990 class VmaAllocationObjectAllocator
6991 {
6992  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6993 public:
6994  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6995 
6996  VmaAllocation Allocate();
6997  void Free(VmaAllocation hAlloc);
6998 
6999 private:
7000  VMA_MUTEX m_Mutex;
7001  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7002 };
7003 
7004 struct VmaCurrentBudgetData
7005 {
7006  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7007  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7008 
7009 #if VMA_MEMORY_BUDGET
7010  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7011  VMA_RW_MUTEX m_BudgetMutex;
7012  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7013  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7014  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7015 #endif // #if VMA_MEMORY_BUDGET
7016 
7017  VmaCurrentBudgetData()
7018  {
7019  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7020  {
7021  m_BlockBytes[heapIndex] = 0;
7022  m_AllocationBytes[heapIndex] = 0;
7023 #if VMA_MEMORY_BUDGET
7024  m_VulkanUsage[heapIndex] = 0;
7025  m_VulkanBudget[heapIndex] = 0;
7026  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7027 #endif
7028  }
7029 
7030 #if VMA_MEMORY_BUDGET
7031  m_OperationsSinceBudgetFetch = 0;
7032 #endif
7033  }
7034 
7035  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7036  {
7037  m_AllocationBytes[heapIndex] += allocationSize;
7038 #if VMA_MEMORY_BUDGET
7039  ++m_OperationsSinceBudgetFetch;
7040 #endif
7041  }
7042 
7043  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7044  {
7045  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7046  m_AllocationBytes[heapIndex] -= allocationSize;
7047 #if VMA_MEMORY_BUDGET
7048  ++m_OperationsSinceBudgetFetch;
7049 #endif
7050  }
7051 };
7052 
7053 // Main allocator object.
7054 struct VmaAllocator_T
7055 {
7056  VMA_CLASS_NO_COPY(VmaAllocator_T)
7057 public:
7058  bool m_UseMutex;
7059  uint32_t m_VulkanApiVersion;
7060  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7061  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7062  bool m_UseExtMemoryBudget;
7063  VkDevice m_hDevice;
7064  VkInstance m_hInstance;
7065  bool m_AllocationCallbacksSpecified;
7066  VkAllocationCallbacks m_AllocationCallbacks;
7067  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7068  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7069 
7070  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7071  uint32_t m_HeapSizeLimitMask;
7072 
7073  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7074  VkPhysicalDeviceMemoryProperties m_MemProps;
7075 
7076  // Default pools.
7077  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7078 
7079  // Each vector is sorted by memory (handle value).
7080  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7081  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7082  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7083 
7084  VmaCurrentBudgetData m_Budget;
7085 
7086  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7087  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7088  ~VmaAllocator_T();
7089 
7090  const VkAllocationCallbacks* GetAllocationCallbacks() const
7091  {
7092  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7093  }
7094  const VmaVulkanFunctions& GetVulkanFunctions() const
7095  {
7096  return m_VulkanFunctions;
7097  }
7098 
7099  VkDeviceSize GetBufferImageGranularity() const
7100  {
7101  return VMA_MAX(
7102  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7103  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7104  }
7105 
7106  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7107  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7108 
7109  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7110  {
7111  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7112  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7113  }
7114  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7115  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7116  {
7117  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7118  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7119  }
7120  // Minimum alignment for all allocations in specific memory type.
7121  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7122  {
7123  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7124  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7125  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7126  }
7127 
7128  bool IsIntegratedGpu() const
7129  {
7130  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7131  }
7132 
7133 #if VMA_RECORDING_ENABLED
7134  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7135 #endif
7136 
7137  void GetBufferMemoryRequirements(
7138  VkBuffer hBuffer,
7139  VkMemoryRequirements& memReq,
7140  bool& requiresDedicatedAllocation,
7141  bool& prefersDedicatedAllocation) const;
7142  void GetImageMemoryRequirements(
7143  VkImage hImage,
7144  VkMemoryRequirements& memReq,
7145  bool& requiresDedicatedAllocation,
7146  bool& prefersDedicatedAllocation) const;
7147 
7148  // Main allocation function.
7149  VkResult AllocateMemory(
7150  const VkMemoryRequirements& vkMemReq,
7151  bool requiresDedicatedAllocation,
7152  bool prefersDedicatedAllocation,
7153  VkBuffer dedicatedBuffer,
7154  VkImage dedicatedImage,
7155  const VmaAllocationCreateInfo& createInfo,
7156  VmaSuballocationType suballocType,
7157  size_t allocationCount,
7158  VmaAllocation* pAllocations);
7159 
7160  // Main deallocation function.
7161  void FreeMemory(
7162  size_t allocationCount,
7163  const VmaAllocation* pAllocations);
7164 
7165  VkResult ResizeAllocation(
7166  const VmaAllocation alloc,
7167  VkDeviceSize newSize);
7168 
7169  void CalculateStats(VmaStats* pStats);
7170 
7171  void GetBudget(
7172  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7173 
7174 #if VMA_STATS_STRING_ENABLED
7175  void PrintDetailedMap(class VmaJsonWriter& json);
7176 #endif
7177 
7178  VkResult DefragmentationBegin(
7179  const VmaDefragmentationInfo2& info,
7180  VmaDefragmentationStats* pStats,
7181  VmaDefragmentationContext* pContext);
7182  VkResult DefragmentationEnd(
7183  VmaDefragmentationContext context);
7184 
7185  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7186  bool TouchAllocation(VmaAllocation hAllocation);
7187 
7188  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7189  void DestroyPool(VmaPool pool);
7190  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7191 
7192  void SetCurrentFrameIndex(uint32_t frameIndex);
7193  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7194 
7195  void MakePoolAllocationsLost(
7196  VmaPool hPool,
7197  size_t* pLostAllocationCount);
7198  VkResult CheckPoolCorruption(VmaPool hPool);
7199  VkResult CheckCorruption(uint32_t memoryTypeBits);
7200 
7201  void CreateLostAllocation(VmaAllocation* pAllocation);
7202 
7203  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7204  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7205  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7206  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7207  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7208  VkResult BindVulkanBuffer(
7209  VkDeviceMemory memory,
7210  VkDeviceSize memoryOffset,
7211  VkBuffer buffer,
7212  const void* pNext);
7213  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7214  VkResult BindVulkanImage(
7215  VkDeviceMemory memory,
7216  VkDeviceSize memoryOffset,
7217  VkImage image,
7218  const void* pNext);
7219 
7220  VkResult Map(VmaAllocation hAllocation, void** ppData);
7221  void Unmap(VmaAllocation hAllocation);
7222 
7223  VkResult BindBufferMemory(
7224  VmaAllocation hAllocation,
7225  VkDeviceSize allocationLocalOffset,
7226  VkBuffer hBuffer,
7227  const void* pNext);
7228  VkResult BindImageMemory(
7229  VmaAllocation hAllocation,
7230  VkDeviceSize allocationLocalOffset,
7231  VkImage hImage,
7232  const void* pNext);
7233 
7234  void FlushOrInvalidateAllocation(
7235  VmaAllocation hAllocation,
7236  VkDeviceSize offset, VkDeviceSize size,
7237  VMA_CACHE_OPERATION op);
7238 
7239  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7240 
7241  /*
7242  Returns bit mask of memory types that can support defragmentation on GPU as
7243  they support creation of required buffer for copy operations.
7244  */
7245  uint32_t GetGpuDefragmentationMemoryTypeBits();
7246 
7247 private:
7248  VkDeviceSize m_PreferredLargeHeapBlockSize;
7249 
7250  VkPhysicalDevice m_PhysicalDevice;
7251  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7252  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7253 
7254  VMA_RW_MUTEX m_PoolsMutex;
7255  // Protected by m_PoolsMutex. Sorted by pointer value.
7256  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7257  uint32_t m_NextPoolId;
7258 
7259  VmaVulkanFunctions m_VulkanFunctions;
7260 
7261 #if VMA_RECORDING_ENABLED
7262  VmaRecorder* m_pRecorder;
7263 #endif
7264 
7265  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7266 
7267  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7268 
7269  VkResult AllocateMemoryOfType(
7270  VkDeviceSize size,
7271  VkDeviceSize alignment,
7272  bool dedicatedAllocation,
7273  VkBuffer dedicatedBuffer,
7274  VkImage dedicatedImage,
7275  const VmaAllocationCreateInfo& createInfo,
7276  uint32_t memTypeIndex,
7277  VmaSuballocationType suballocType,
7278  size_t allocationCount,
7279  VmaAllocation* pAllocations);
7280 
7281  // Helper function only to be used inside AllocateDedicatedMemory.
7282  VkResult AllocateDedicatedMemoryPage(
7283  VkDeviceSize size,
7284  VmaSuballocationType suballocType,
7285  uint32_t memTypeIndex,
7286  const VkMemoryAllocateInfo& allocInfo,
7287  bool map,
7288  bool isUserDataString,
7289  void* pUserData,
7290  VmaAllocation* pAllocation);
7291 
7292  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7293  VkResult AllocateDedicatedMemory(
7294  VkDeviceSize size,
7295  VmaSuballocationType suballocType,
7296  uint32_t memTypeIndex,
7297  bool withinBudget,
7298  bool map,
7299  bool isUserDataString,
7300  void* pUserData,
7301  VkBuffer dedicatedBuffer,
7302  VkImage dedicatedImage,
7303  size_t allocationCount,
7304  VmaAllocation* pAllocations);
7305 
7306  void FreeDedicatedMemory(const VmaAllocation allocation);
7307 
7308  /*
7309  Calculates and returns bit mask of memory types that can support defragmentation
7310  on GPU as they support creation of required buffer for copy operations.
7311  */
7312  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7313 
7314 #if VMA_MEMORY_BUDGET
7315  void UpdateVulkanBudget();
7316 #endif // #if VMA_MEMORY_BUDGET
7317 };
7318 
7320 // Memory allocation #2 after VmaAllocator_T definition
7321 
7322 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7323 {
7324  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7325 }
7326 
7327 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7328 {
7329  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7330 }
7331 
7332 template<typename T>
7333 static T* VmaAllocate(VmaAllocator hAllocator)
7334 {
7335  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7336 }
7337 
7338 template<typename T>
7339 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7340 {
7341  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7342 }
7343 
7344 template<typename T>
7345 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7346 {
7347  if(ptr != VMA_NULL)
7348  {
7349  ptr->~T();
7350  VmaFree(hAllocator, ptr);
7351  }
7352 }
7353 
7354 template<typename T>
7355 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7356 {
7357  if(ptr != VMA_NULL)
7358  {
7359  for(size_t i = count; i--; )
7360  ptr[i].~T();
7361  VmaFree(hAllocator, ptr);
7362  }
7363 }
7364 
7366 // VmaStringBuilder
7367 
7368 #if VMA_STATS_STRING_ENABLED
7369 
7370 class VmaStringBuilder
7371 {
7372 public:
7373  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7374  size_t GetLength() const { return m_Data.size(); }
7375  const char* GetData() const { return m_Data.data(); }
7376 
7377  void Add(char ch) { m_Data.push_back(ch); }
7378  void Add(const char* pStr);
7379  void AddNewLine() { Add('\n'); }
7380  void AddNumber(uint32_t num);
7381  void AddNumber(uint64_t num);
7382  void AddPointer(const void* ptr);
7383 
7384 private:
7385  VmaVector< char, VmaStlAllocator<char> > m_Data;
7386 };
7387 
7388 void VmaStringBuilder::Add(const char* pStr)
7389 {
7390  const size_t strLen = strlen(pStr);
7391  if(strLen > 0)
7392  {
7393  const size_t oldCount = m_Data.size();
7394  m_Data.resize(oldCount + strLen);
7395  memcpy(m_Data.data() + oldCount, pStr, strLen);
7396  }
7397 }
7398 
7399 void VmaStringBuilder::AddNumber(uint32_t num)
7400 {
7401  char buf[11];
7402  buf[10] = '\0';
7403  char *p = &buf[10];
7404  do
7405  {
7406  *--p = '0' + (num % 10);
7407  num /= 10;
7408  }
7409  while(num);
7410  Add(p);
7411 }
7412 
7413 void VmaStringBuilder::AddNumber(uint64_t num)
7414 {
7415  char buf[21];
7416  buf[20] = '\0';
7417  char *p = &buf[20];
7418  do
7419  {
7420  *--p = '0' + (num % 10);
7421  num /= 10;
7422  }
7423  while(num);
7424  Add(p);
7425 }
7426 
7427 void VmaStringBuilder::AddPointer(const void* ptr)
7428 {
7429  char buf[21];
7430  VmaPtrToStr(buf, sizeof(buf), ptr);
7431  Add(buf);
7432 }
7433 
7434 #endif // #if VMA_STATS_STRING_ENABLED
7435 
7437 // VmaJsonWriter
7438 
7439 #if VMA_STATS_STRING_ENABLED
7440 
7441 class VmaJsonWriter
7442 {
7443  VMA_CLASS_NO_COPY(VmaJsonWriter)
7444 public:
7445  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7446  ~VmaJsonWriter();
7447 
7448  void BeginObject(bool singleLine = false);
7449  void EndObject();
7450 
7451  void BeginArray(bool singleLine = false);
7452  void EndArray();
7453 
7454  void WriteString(const char* pStr);
7455  void BeginString(const char* pStr = VMA_NULL);
7456  void ContinueString(const char* pStr);
7457  void ContinueString(uint32_t n);
7458  void ContinueString(uint64_t n);
7459  void ContinueString_Pointer(const void* ptr);
7460  void EndString(const char* pStr = VMA_NULL);
7461 
7462  void WriteNumber(uint32_t n);
7463  void WriteNumber(uint64_t n);
7464  void WriteBool(bool b);
7465  void WriteNull();
7466 
7467 private:
7468  static const char* const INDENT;
7469 
7470  enum COLLECTION_TYPE
7471  {
7472  COLLECTION_TYPE_OBJECT,
7473  COLLECTION_TYPE_ARRAY,
7474  };
7475  struct StackItem
7476  {
7477  COLLECTION_TYPE type;
7478  uint32_t valueCount;
7479  bool singleLineMode;
7480  };
7481 
7482  VmaStringBuilder& m_SB;
7483  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7484  bool m_InsideString;
7485 
7486  void BeginValue(bool isString);
7487  void WriteIndent(bool oneLess = false);
7488 };
7489 
7490 const char* const VmaJsonWriter::INDENT = " ";
7491 
7492 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7493  m_SB(sb),
7494  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7495  m_InsideString(false)
7496 {
7497 }
7498 
7499 VmaJsonWriter::~VmaJsonWriter()
7500 {
7501  VMA_ASSERT(!m_InsideString);
7502  VMA_ASSERT(m_Stack.empty());
7503 }
7504 
7505 void VmaJsonWriter::BeginObject(bool singleLine)
7506 {
7507  VMA_ASSERT(!m_InsideString);
7508 
7509  BeginValue(false);
7510  m_SB.Add('{');
7511 
7512  StackItem item;
7513  item.type = COLLECTION_TYPE_OBJECT;
7514  item.valueCount = 0;
7515  item.singleLineMode = singleLine;
7516  m_Stack.push_back(item);
7517 }
7518 
7519 void VmaJsonWriter::EndObject()
7520 {
7521  VMA_ASSERT(!m_InsideString);
7522 
7523  WriteIndent(true);
7524  m_SB.Add('}');
7525 
7526  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7527  m_Stack.pop_back();
7528 }
7529 
7530 void VmaJsonWriter::BeginArray(bool singleLine)
7531 {
7532  VMA_ASSERT(!m_InsideString);
7533 
7534  BeginValue(false);
7535  m_SB.Add('[');
7536 
7537  StackItem item;
7538  item.type = COLLECTION_TYPE_ARRAY;
7539  item.valueCount = 0;
7540  item.singleLineMode = singleLine;
7541  m_Stack.push_back(item);
7542 }
7543 
7544 void VmaJsonWriter::EndArray()
7545 {
7546  VMA_ASSERT(!m_InsideString);
7547 
7548  WriteIndent(true);
7549  m_SB.Add(']');
7550 
7551  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7552  m_Stack.pop_back();
7553 }
7554 
7555 void VmaJsonWriter::WriteString(const char* pStr)
7556 {
7557  BeginString(pStr);
7558  EndString();
7559 }
7560 
7561 void VmaJsonWriter::BeginString(const char* pStr)
7562 {
7563  VMA_ASSERT(!m_InsideString);
7564 
7565  BeginValue(true);
7566  m_SB.Add('"');
7567  m_InsideString = true;
7568  if(pStr != VMA_NULL && pStr[0] != '\0')
7569  {
7570  ContinueString(pStr);
7571  }
7572 }
7573 
7574 void VmaJsonWriter::ContinueString(const char* pStr)
7575 {
7576  VMA_ASSERT(m_InsideString);
7577 
7578  const size_t strLen = strlen(pStr);
7579  for(size_t i = 0; i < strLen; ++i)
7580  {
7581  char ch = pStr[i];
7582  if(ch == '\\')
7583  {
7584  m_SB.Add("\\\\");
7585  }
7586  else if(ch == '"')
7587  {
7588  m_SB.Add("\\\"");
7589  }
7590  else if(ch >= 32)
7591  {
7592  m_SB.Add(ch);
7593  }
7594  else switch(ch)
7595  {
7596  case '\b':
7597  m_SB.Add("\\b");
7598  break;
7599  case '\f':
7600  m_SB.Add("\\f");
7601  break;
7602  case '\n':
7603  m_SB.Add("\\n");
7604  break;
7605  case '\r':
7606  m_SB.Add("\\r");
7607  break;
7608  case '\t':
7609  m_SB.Add("\\t");
7610  break;
7611  default:
7612  VMA_ASSERT(0 && "Character not currently supported.");
7613  break;
7614  }
7615  }
7616 }
7617 
7618 void VmaJsonWriter::ContinueString(uint32_t n)
7619 {
7620  VMA_ASSERT(m_InsideString);
7621  m_SB.AddNumber(n);
7622 }
7623 
7624 void VmaJsonWriter::ContinueString(uint64_t n)
7625 {
7626  VMA_ASSERT(m_InsideString);
7627  m_SB.AddNumber(n);
7628 }
7629 
7630 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7631 {
7632  VMA_ASSERT(m_InsideString);
7633  m_SB.AddPointer(ptr);
7634 }
7635 
7636 void VmaJsonWriter::EndString(const char* pStr)
7637 {
7638  VMA_ASSERT(m_InsideString);
7639  if(pStr != VMA_NULL && pStr[0] != '\0')
7640  {
7641  ContinueString(pStr);
7642  }
7643  m_SB.Add('"');
7644  m_InsideString = false;
7645 }
7646 
7647 void VmaJsonWriter::WriteNumber(uint32_t n)
7648 {
7649  VMA_ASSERT(!m_InsideString);
7650  BeginValue(false);
7651  m_SB.AddNumber(n);
7652 }
7653 
7654 void VmaJsonWriter::WriteNumber(uint64_t n)
7655 {
7656  VMA_ASSERT(!m_InsideString);
7657  BeginValue(false);
7658  m_SB.AddNumber(n);
7659 }
7660 
7661 void VmaJsonWriter::WriteBool(bool b)
7662 {
7663  VMA_ASSERT(!m_InsideString);
7664  BeginValue(false);
7665  m_SB.Add(b ? "true" : "false");
7666 }
7667 
7668 void VmaJsonWriter::WriteNull()
7669 {
7670  VMA_ASSERT(!m_InsideString);
7671  BeginValue(false);
7672  m_SB.Add("null");
7673 }
7674 
7675 void VmaJsonWriter::BeginValue(bool isString)
7676 {
7677  if(!m_Stack.empty())
7678  {
7679  StackItem& currItem = m_Stack.back();
7680  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7681  currItem.valueCount % 2 == 0)
7682  {
7683  VMA_ASSERT(isString);
7684  }
7685 
7686  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7687  currItem.valueCount % 2 != 0)
7688  {
7689  m_SB.Add(": ");
7690  }
7691  else if(currItem.valueCount > 0)
7692  {
7693  m_SB.Add(", ");
7694  WriteIndent();
7695  }
7696  else
7697  {
7698  WriteIndent();
7699  }
7700  ++currItem.valueCount;
7701  }
7702 }
7703 
7704 void VmaJsonWriter::WriteIndent(bool oneLess)
7705 {
7706  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7707  {
7708  m_SB.AddNewLine();
7709 
7710  size_t count = m_Stack.size();
7711  if(count > 0 && oneLess)
7712  {
7713  --count;
7714  }
7715  for(size_t i = 0; i < count; ++i)
7716  {
7717  m_SB.Add(INDENT);
7718  }
7719  }
7720 }
7721 
7722 #endif // #if VMA_STATS_STRING_ENABLED
7723 
7725 
7726 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7727 {
7728  if(IsUserDataString())
7729  {
7730  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7731 
7732  FreeUserDataString(hAllocator);
7733 
7734  if(pUserData != VMA_NULL)
7735  {
7736  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
7737  }
7738  }
7739  else
7740  {
7741  m_pUserData = pUserData;
7742  }
7743 }
7744 
7745 void VmaAllocation_T::ChangeBlockAllocation(
7746  VmaAllocator hAllocator,
7747  VmaDeviceMemoryBlock* block,
7748  VkDeviceSize offset)
7749 {
7750  VMA_ASSERT(block != VMA_NULL);
7751  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7752 
7753  // Move mapping reference counter from old block to new block.
7754  if(block != m_BlockAllocation.m_Block)
7755  {
7756  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7757  if(IsPersistentMap())
7758  ++mapRefCount;
7759  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7760  block->Map(hAllocator, mapRefCount, VMA_NULL);
7761  }
7762 
7763  m_BlockAllocation.m_Block = block;
7764  m_BlockAllocation.m_Offset = offset;
7765 }
7766 
7767 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7768 {
7769  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7770  m_BlockAllocation.m_Offset = newOffset;
7771 }
7772 
7773 VkDeviceSize VmaAllocation_T::GetOffset() const
7774 {
7775  switch(m_Type)
7776  {
7777  case ALLOCATION_TYPE_BLOCK:
7778  return m_BlockAllocation.m_Offset;
7779  case ALLOCATION_TYPE_DEDICATED:
7780  return 0;
7781  default:
7782  VMA_ASSERT(0);
7783  return 0;
7784  }
7785 }
7786 
7787 VkDeviceMemory VmaAllocation_T::GetMemory() const
7788 {
7789  switch(m_Type)
7790  {
7791  case ALLOCATION_TYPE_BLOCK:
7792  return m_BlockAllocation.m_Block->GetDeviceMemory();
7793  case ALLOCATION_TYPE_DEDICATED:
7794  return m_DedicatedAllocation.m_hMemory;
7795  default:
7796  VMA_ASSERT(0);
7797  return VK_NULL_HANDLE;
7798  }
7799 }
7800 
7801 void* VmaAllocation_T::GetMappedData() const
7802 {
7803  switch(m_Type)
7804  {
7805  case ALLOCATION_TYPE_BLOCK:
7806  if(m_MapCount != 0)
7807  {
7808  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7809  VMA_ASSERT(pBlockData != VMA_NULL);
7810  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7811  }
7812  else
7813  {
7814  return VMA_NULL;
7815  }
7816  break;
7817  case ALLOCATION_TYPE_DEDICATED:
7818  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7819  return m_DedicatedAllocation.m_pMappedData;
7820  default:
7821  VMA_ASSERT(0);
7822  return VMA_NULL;
7823  }
7824 }
7825 
7826 bool VmaAllocation_T::CanBecomeLost() const
7827 {
7828  switch(m_Type)
7829  {
7830  case ALLOCATION_TYPE_BLOCK:
7831  return m_BlockAllocation.m_CanBecomeLost;
7832  case ALLOCATION_TYPE_DEDICATED:
7833  return false;
7834  default:
7835  VMA_ASSERT(0);
7836  return false;
7837  }
7838 }
7839 
7840 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7841 {
7842  VMA_ASSERT(CanBecomeLost());
7843 
7844  /*
7845  Warning: This is a carefully designed algorithm.
7846  Do not modify unless you really know what you're doing :)
7847  */
7848  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7849  for(;;)
7850  {
7851  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7852  {
7853  VMA_ASSERT(0);
7854  return false;
7855  }
7856  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7857  {
7858  return false;
7859  }
7860  else // Last use time earlier than current time.
7861  {
7862  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7863  {
7864  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7865  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7866  return true;
7867  }
7868  }
7869  }
7870 }
7871 
7872 #if VMA_STATS_STRING_ENABLED
7873 
7874 // Correspond to values of enum VmaSuballocationType.
7875 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7876  "FREE",
7877  "UNKNOWN",
7878  "BUFFER",
7879  "IMAGE_UNKNOWN",
7880  "IMAGE_LINEAR",
7881  "IMAGE_OPTIMAL",
7882 };
7883 
7884 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7885 {
7886  json.WriteString("Type");
7887  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7888 
7889  json.WriteString("Size");
7890  json.WriteNumber(m_Size);
7891 
7892  if(m_pUserData != VMA_NULL)
7893  {
7894  json.WriteString("UserData");
7895  if(IsUserDataString())
7896  {
7897  json.WriteString((const char*)m_pUserData);
7898  }
7899  else
7900  {
7901  json.BeginString();
7902  json.ContinueString_Pointer(m_pUserData);
7903  json.EndString();
7904  }
7905  }
7906 
7907  json.WriteString("CreationFrameIndex");
7908  json.WriteNumber(m_CreationFrameIndex);
7909 
7910  json.WriteString("LastUseFrameIndex");
7911  json.WriteNumber(GetLastUseFrameIndex());
7912 
7913  if(m_BufferImageUsage != 0)
7914  {
7915  json.WriteString("Usage");
7916  json.WriteNumber(m_BufferImageUsage);
7917  }
7918 }
7919 
7920 #endif
7921 
7922 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7923 {
7924  VMA_ASSERT(IsUserDataString());
7925  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
7926  m_pUserData = VMA_NULL;
7927 }
7928 
7929 void VmaAllocation_T::BlockAllocMap()
7930 {
7931  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7932 
7933  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7934  {
7935  ++m_MapCount;
7936  }
7937  else
7938  {
7939  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7940  }
7941 }
7942 
7943 void VmaAllocation_T::BlockAllocUnmap()
7944 {
7945  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7946 
7947  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7948  {
7949  --m_MapCount;
7950  }
7951  else
7952  {
7953  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7954  }
7955 }
7956 
7957 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7958 {
7959  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7960 
7961  if(m_MapCount != 0)
7962  {
7963  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7964  {
7965  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7966  *ppData = m_DedicatedAllocation.m_pMappedData;
7967  ++m_MapCount;
7968  return VK_SUCCESS;
7969  }
7970  else
7971  {
7972  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7973  return VK_ERROR_MEMORY_MAP_FAILED;
7974  }
7975  }
7976  else
7977  {
7978  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7979  hAllocator->m_hDevice,
7980  m_DedicatedAllocation.m_hMemory,
7981  0, // offset
7982  VK_WHOLE_SIZE,
7983  0, // flags
7984  ppData);
7985  if(result == VK_SUCCESS)
7986  {
7987  m_DedicatedAllocation.m_pMappedData = *ppData;
7988  m_MapCount = 1;
7989  }
7990  return result;
7991  }
7992 }
7993 
7994 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7995 {
7996  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7997 
7998  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7999  {
8000  --m_MapCount;
8001  if(m_MapCount == 0)
8002  {
8003  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8004  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8005  hAllocator->m_hDevice,
8006  m_DedicatedAllocation.m_hMemory);
8007  }
8008  }
8009  else
8010  {
8011  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8012  }
8013 }
8014 
8015 #if VMA_STATS_STRING_ENABLED
8016 
8017 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8018 {
8019  json.BeginObject();
8020 
8021  json.WriteString("Blocks");
8022  json.WriteNumber(stat.blockCount);
8023 
8024  json.WriteString("Allocations");
8025  json.WriteNumber(stat.allocationCount);
8026 
8027  json.WriteString("UnusedRanges");
8028  json.WriteNumber(stat.unusedRangeCount);
8029 
8030  json.WriteString("UsedBytes");
8031  json.WriteNumber(stat.usedBytes);
8032 
8033  json.WriteString("UnusedBytes");
8034  json.WriteNumber(stat.unusedBytes);
8035 
8036  if(stat.allocationCount > 1)
8037  {
8038  json.WriteString("AllocationSize");
8039  json.BeginObject(true);
8040  json.WriteString("Min");
8041  json.WriteNumber(stat.allocationSizeMin);
8042  json.WriteString("Avg");
8043  json.WriteNumber(stat.allocationSizeAvg);
8044  json.WriteString("Max");
8045  json.WriteNumber(stat.allocationSizeMax);
8046  json.EndObject();
8047  }
8048 
8049  if(stat.unusedRangeCount > 1)
8050  {
8051  json.WriteString("UnusedRangeSize");
8052  json.BeginObject(true);
8053  json.WriteString("Min");
8054  json.WriteNumber(stat.unusedRangeSizeMin);
8055  json.WriteString("Avg");
8056  json.WriteNumber(stat.unusedRangeSizeAvg);
8057  json.WriteString("Max");
8058  json.WriteNumber(stat.unusedRangeSizeMax);
8059  json.EndObject();
8060  }
8061 
8062  json.EndObject();
8063 }
8064 
8065 #endif // #if VMA_STATS_STRING_ENABLED
8066 
8067 struct VmaSuballocationItemSizeLess
8068 {
8069  bool operator()(
8070  const VmaSuballocationList::iterator lhs,
8071  const VmaSuballocationList::iterator rhs) const
8072  {
8073  return lhs->size < rhs->size;
8074  }
8075  bool operator()(
8076  const VmaSuballocationList::iterator lhs,
8077  VkDeviceSize rhsSize) const
8078  {
8079  return lhs->size < rhsSize;
8080  }
8081 };
8082 
8083 
8085 // class VmaBlockMetadata
8086 
8087 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8088  m_Size(0),
8089  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8090 {
8091 }
8092 
8093 #if VMA_STATS_STRING_ENABLED
8094 
8095 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8096  VkDeviceSize unusedBytes,
8097  size_t allocationCount,
8098  size_t unusedRangeCount) const
8099 {
8100  json.BeginObject();
8101 
8102  json.WriteString("TotalBytes");
8103  json.WriteNumber(GetSize());
8104 
8105  json.WriteString("UnusedBytes");
8106  json.WriteNumber(unusedBytes);
8107 
8108  json.WriteString("Allocations");
8109  json.WriteNumber((uint64_t)allocationCount);
8110 
8111  json.WriteString("UnusedRanges");
8112  json.WriteNumber((uint64_t)unusedRangeCount);
8113 
8114  json.WriteString("Suballocations");
8115  json.BeginArray();
8116 }
8117 
8118 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8119  VkDeviceSize offset,
8120  VmaAllocation hAllocation) const
8121 {
8122  json.BeginObject(true);
8123 
8124  json.WriteString("Offset");
8125  json.WriteNumber(offset);
8126 
8127  hAllocation->PrintParameters(json);
8128 
8129  json.EndObject();
8130 }
8131 
8132 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8133  VkDeviceSize offset,
8134  VkDeviceSize size) const
8135 {
8136  json.BeginObject(true);
8137 
8138  json.WriteString("Offset");
8139  json.WriteNumber(offset);
8140 
8141  json.WriteString("Type");
8142  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8143 
8144  json.WriteString("Size");
8145  json.WriteNumber(size);
8146 
8147  json.EndObject();
8148 }
8149 
8150 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8151 {
8152  json.EndArray();
8153  json.EndObject();
8154 }
8155 
8156 #endif // #if VMA_STATS_STRING_ENABLED
8157 
8159 // class VmaBlockMetadata_Generic
8160 
8161 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8162  VmaBlockMetadata(hAllocator),
8163  m_FreeCount(0),
8164  m_SumFreeSize(0),
8165  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8166  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8167 {
8168 }
8169 
8170 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8171 {
8172 }
8173 
8174 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8175 {
8176  VmaBlockMetadata::Init(size);
8177 
8178  m_FreeCount = 1;
8179  m_SumFreeSize = size;
8180 
8181  VmaSuballocation suballoc = {};
8182  suballoc.offset = 0;
8183  suballoc.size = size;
8184  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8185  suballoc.hAllocation = VK_NULL_HANDLE;
8186 
8187  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8188  m_Suballocations.push_back(suballoc);
8189  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8190  --suballocItem;
8191  m_FreeSuballocationsBySize.push_back(suballocItem);
8192 }
8193 
8194 bool VmaBlockMetadata_Generic::Validate() const
8195 {
8196  VMA_VALIDATE(!m_Suballocations.empty());
8197 
8198  // Expected offset of new suballocation as calculated from previous ones.
8199  VkDeviceSize calculatedOffset = 0;
8200  // Expected number of free suballocations as calculated from traversing their list.
8201  uint32_t calculatedFreeCount = 0;
8202  // Expected sum size of free suballocations as calculated from traversing their list.
8203  VkDeviceSize calculatedSumFreeSize = 0;
8204  // Expected number of free suballocations that should be registered in
8205  // m_FreeSuballocationsBySize calculated from traversing their list.
8206  size_t freeSuballocationsToRegister = 0;
8207  // True if previous visited suballocation was free.
8208  bool prevFree = false;
8209 
8210  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8211  suballocItem != m_Suballocations.cend();
8212  ++suballocItem)
8213  {
8214  const VmaSuballocation& subAlloc = *suballocItem;
8215 
8216  // Actual offset of this suballocation doesn't match expected one.
8217  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8218 
8219  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8220  // Two adjacent free suballocations are invalid. They should be merged.
8221  VMA_VALIDATE(!prevFree || !currFree);
8222 
8223  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8224 
8225  if(currFree)
8226  {
8227  calculatedSumFreeSize += subAlloc.size;
8228  ++calculatedFreeCount;
8229  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8230  {
8231  ++freeSuballocationsToRegister;
8232  }
8233 
8234  // Margin required between allocations - every free space must be at least that large.
8235  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8236  }
8237  else
8238  {
8239  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8240  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8241 
8242  // Margin required between allocations - previous allocation must be free.
8243  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8244  }
8245 
8246  calculatedOffset += subAlloc.size;
8247  prevFree = currFree;
8248  }
8249 
8250  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8251  // match expected one.
8252  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8253 
8254  VkDeviceSize lastSize = 0;
8255  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8256  {
8257  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8258 
8259  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8260  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8261  // They must be sorted by size ascending.
8262  VMA_VALIDATE(suballocItem->size >= lastSize);
8263 
8264  lastSize = suballocItem->size;
8265  }
8266 
8267  // Check if totals match calculacted values.
8268  VMA_VALIDATE(ValidateFreeSuballocationList());
8269  VMA_VALIDATE(calculatedOffset == GetSize());
8270  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8271  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8272 
8273  return true;
8274 }
8275 
8276 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8277 {
8278  if(!m_FreeSuballocationsBySize.empty())
8279  {
8280  return m_FreeSuballocationsBySize.back()->size;
8281  }
8282  else
8283  {
8284  return 0;
8285  }
8286 }
8287 
8288 bool VmaBlockMetadata_Generic::IsEmpty() const
8289 {
8290  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8291 }
8292 
8293 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8294 {
8295  outInfo.blockCount = 1;
8296 
8297  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8298  outInfo.allocationCount = rangeCount - m_FreeCount;
8299  outInfo.unusedRangeCount = m_FreeCount;
8300 
8301  outInfo.unusedBytes = m_SumFreeSize;
8302  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8303 
8304  outInfo.allocationSizeMin = UINT64_MAX;
8305  outInfo.allocationSizeMax = 0;
8306  outInfo.unusedRangeSizeMin = UINT64_MAX;
8307  outInfo.unusedRangeSizeMax = 0;
8308 
8309  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8310  suballocItem != m_Suballocations.cend();
8311  ++suballocItem)
8312  {
8313  const VmaSuballocation& suballoc = *suballocItem;
8314  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8315  {
8316  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8317  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8318  }
8319  else
8320  {
8321  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8322  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8323  }
8324  }
8325 }
8326 
8327 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8328 {
8329  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8330 
8331  inoutStats.size += GetSize();
8332  inoutStats.unusedSize += m_SumFreeSize;
8333  inoutStats.allocationCount += rangeCount - m_FreeCount;
8334  inoutStats.unusedRangeCount += m_FreeCount;
8335  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8336 }
8337 
8338 #if VMA_STATS_STRING_ENABLED
8339 
8340 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8341 {
8342  PrintDetailedMap_Begin(json,
8343  m_SumFreeSize, // unusedBytes
8344  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8345  m_FreeCount); // unusedRangeCount
8346 
8347  size_t i = 0;
8348  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8349  suballocItem != m_Suballocations.cend();
8350  ++suballocItem, ++i)
8351  {
8352  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8353  {
8354  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8355  }
8356  else
8357  {
8358  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8359  }
8360  }
8361 
8362  PrintDetailedMap_End(json);
8363 }
8364 
8365 #endif // #if VMA_STATS_STRING_ENABLED
8366 
8367 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8368  uint32_t currentFrameIndex,
8369  uint32_t frameInUseCount,
8370  VkDeviceSize bufferImageGranularity,
8371  VkDeviceSize allocSize,
8372  VkDeviceSize allocAlignment,
8373  bool upperAddress,
8374  VmaSuballocationType allocType,
8375  bool canMakeOtherLost,
8376  uint32_t strategy,
8377  VmaAllocationRequest* pAllocationRequest)
8378 {
8379  VMA_ASSERT(allocSize > 0);
8380  VMA_ASSERT(!upperAddress);
8381  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8382  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8383  VMA_HEAVY_ASSERT(Validate());
8384 
8385  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8386 
8387  // There is not enough total free space in this block to fullfill the request: Early return.
8388  if(canMakeOtherLost == false &&
8389  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8390  {
8391  return false;
8392  }
8393 
8394  // New algorithm, efficiently searching freeSuballocationsBySize.
8395  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8396  if(freeSuballocCount > 0)
8397  {
8399  {
8400  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8401  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8402  m_FreeSuballocationsBySize.data(),
8403  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8404  allocSize + 2 * VMA_DEBUG_MARGIN,
8405  VmaSuballocationItemSizeLess());
8406  size_t index = it - m_FreeSuballocationsBySize.data();
8407  for(; index < freeSuballocCount; ++index)
8408  {
8409  if(CheckAllocation(
8410  currentFrameIndex,
8411  frameInUseCount,
8412  bufferImageGranularity,
8413  allocSize,
8414  allocAlignment,
8415  allocType,
8416  m_FreeSuballocationsBySize[index],
8417  false, // canMakeOtherLost
8418  &pAllocationRequest->offset,
8419  &pAllocationRequest->itemsToMakeLostCount,
8420  &pAllocationRequest->sumFreeSize,
8421  &pAllocationRequest->sumItemSize))
8422  {
8423  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8424  return true;
8425  }
8426  }
8427  }
8428  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8429  {
8430  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8431  it != m_Suballocations.end();
8432  ++it)
8433  {
8434  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8435  currentFrameIndex,
8436  frameInUseCount,
8437  bufferImageGranularity,
8438  allocSize,
8439  allocAlignment,
8440  allocType,
8441  it,
8442  false, // canMakeOtherLost
8443  &pAllocationRequest->offset,
8444  &pAllocationRequest->itemsToMakeLostCount,
8445  &pAllocationRequest->sumFreeSize,
8446  &pAllocationRequest->sumItemSize))
8447  {
8448  pAllocationRequest->item = it;
8449  return true;
8450  }
8451  }
8452  }
8453  else // WORST_FIT, FIRST_FIT
8454  {
8455  // Search staring from biggest suballocations.
8456  for(size_t index = freeSuballocCount; index--; )
8457  {
8458  if(CheckAllocation(
8459  currentFrameIndex,
8460  frameInUseCount,
8461  bufferImageGranularity,
8462  allocSize,
8463  allocAlignment,
8464  allocType,
8465  m_FreeSuballocationsBySize[index],
8466  false, // canMakeOtherLost
8467  &pAllocationRequest->offset,
8468  &pAllocationRequest->itemsToMakeLostCount,
8469  &pAllocationRequest->sumFreeSize,
8470  &pAllocationRequest->sumItemSize))
8471  {
8472  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8473  return true;
8474  }
8475  }
8476  }
8477  }
8478 
8479  if(canMakeOtherLost)
8480  {
8481  // Brute-force algorithm. TODO: Come up with something better.
8482 
8483  bool found = false;
8484  VmaAllocationRequest tmpAllocRequest = {};
8485  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8486  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8487  suballocIt != m_Suballocations.end();
8488  ++suballocIt)
8489  {
8490  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8491  suballocIt->hAllocation->CanBecomeLost())
8492  {
8493  if(CheckAllocation(
8494  currentFrameIndex,
8495  frameInUseCount,
8496  bufferImageGranularity,
8497  allocSize,
8498  allocAlignment,
8499  allocType,
8500  suballocIt,
8501  canMakeOtherLost,
8502  &tmpAllocRequest.offset,
8503  &tmpAllocRequest.itemsToMakeLostCount,
8504  &tmpAllocRequest.sumFreeSize,
8505  &tmpAllocRequest.sumItemSize))
8506  {
8508  {
8509  *pAllocationRequest = tmpAllocRequest;
8510  pAllocationRequest->item = suballocIt;
8511  break;
8512  }
8513  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8514  {
8515  *pAllocationRequest = tmpAllocRequest;
8516  pAllocationRequest->item = suballocIt;
8517  found = true;
8518  }
8519  }
8520  }
8521  }
8522 
8523  return found;
8524  }
8525 
8526  return false;
8527 }
8528 
8529 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8530  uint32_t currentFrameIndex,
8531  uint32_t frameInUseCount,
8532  VmaAllocationRequest* pAllocationRequest)
8533 {
8534  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8535 
8536  while(pAllocationRequest->itemsToMakeLostCount > 0)
8537  {
8538  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8539  {
8540  ++pAllocationRequest->item;
8541  }
8542  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8543  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8544  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8545  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8546  {
8547  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8548  --pAllocationRequest->itemsToMakeLostCount;
8549  }
8550  else
8551  {
8552  return false;
8553  }
8554  }
8555 
8556  VMA_HEAVY_ASSERT(Validate());
8557  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8558  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8559 
8560  return true;
8561 }
8562 
8563 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8564 {
8565  uint32_t lostAllocationCount = 0;
8566  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8567  it != m_Suballocations.end();
8568  ++it)
8569  {
8570  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8571  it->hAllocation->CanBecomeLost() &&
8572  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8573  {
8574  it = FreeSuballocation(it);
8575  ++lostAllocationCount;
8576  }
8577  }
8578  return lostAllocationCount;
8579 }
8580 
8581 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8582 {
8583  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8584  it != m_Suballocations.end();
8585  ++it)
8586  {
8587  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8588  {
8589  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8590  {
8591  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8592  return VK_ERROR_VALIDATION_FAILED_EXT;
8593  }
8594  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8595  {
8596  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8597  return VK_ERROR_VALIDATION_FAILED_EXT;
8598  }
8599  }
8600  }
8601 
8602  return VK_SUCCESS;
8603 }
8604 
8605 void VmaBlockMetadata_Generic::Alloc(
8606  const VmaAllocationRequest& request,
8607  VmaSuballocationType type,
8608  VkDeviceSize allocSize,
8609  VmaAllocation hAllocation)
8610 {
8611  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8612  VMA_ASSERT(request.item != m_Suballocations.end());
8613  VmaSuballocation& suballoc = *request.item;
8614  // Given suballocation is a free block.
8615  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8616  // Given offset is inside this suballocation.
8617  VMA_ASSERT(request.offset >= suballoc.offset);
8618  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8619  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8620  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8621 
8622  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8623  // it to become used.
8624  UnregisterFreeSuballocation(request.item);
8625 
8626  suballoc.offset = request.offset;
8627  suballoc.size = allocSize;
8628  suballoc.type = type;
8629  suballoc.hAllocation = hAllocation;
8630 
8631  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8632  if(paddingEnd)
8633  {
8634  VmaSuballocation paddingSuballoc = {};
8635  paddingSuballoc.offset = request.offset + allocSize;
8636  paddingSuballoc.size = paddingEnd;
8637  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8638  VmaSuballocationList::iterator next = request.item;
8639  ++next;
8640  const VmaSuballocationList::iterator paddingEndItem =
8641  m_Suballocations.insert(next, paddingSuballoc);
8642  RegisterFreeSuballocation(paddingEndItem);
8643  }
8644 
8645  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8646  if(paddingBegin)
8647  {
8648  VmaSuballocation paddingSuballoc = {};
8649  paddingSuballoc.offset = request.offset - paddingBegin;
8650  paddingSuballoc.size = paddingBegin;
8651  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8652  const VmaSuballocationList::iterator paddingBeginItem =
8653  m_Suballocations.insert(request.item, paddingSuballoc);
8654  RegisterFreeSuballocation(paddingBeginItem);
8655  }
8656 
8657  // Update totals.
8658  m_FreeCount = m_FreeCount - 1;
8659  if(paddingBegin > 0)
8660  {
8661  ++m_FreeCount;
8662  }
8663  if(paddingEnd > 0)
8664  {
8665  ++m_FreeCount;
8666  }
8667  m_SumFreeSize -= allocSize;
8668 }
8669 
8670 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8671 {
8672  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8673  suballocItem != m_Suballocations.end();
8674  ++suballocItem)
8675  {
8676  VmaSuballocation& suballoc = *suballocItem;
8677  if(suballoc.hAllocation == allocation)
8678  {
8679  FreeSuballocation(suballocItem);
8680  VMA_HEAVY_ASSERT(Validate());
8681  return;
8682  }
8683  }
8684  VMA_ASSERT(0 && "Not found!");
8685 }
8686 
8687 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8688 {
8689  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8690  suballocItem != m_Suballocations.end();
8691  ++suballocItem)
8692  {
8693  VmaSuballocation& suballoc = *suballocItem;
8694  if(suballoc.offset == offset)
8695  {
8696  FreeSuballocation(suballocItem);
8697  return;
8698  }
8699  }
8700  VMA_ASSERT(0 && "Not found!");
8701 }
8702 
8703 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8704 {
8705  VkDeviceSize lastSize = 0;
8706  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8707  {
8708  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8709 
8710  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8711  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8712  VMA_VALIDATE(it->size >= lastSize);
8713  lastSize = it->size;
8714  }
8715  return true;
8716 }
8717 
8718 bool VmaBlockMetadata_Generic::CheckAllocation(
8719  uint32_t currentFrameIndex,
8720  uint32_t frameInUseCount,
8721  VkDeviceSize bufferImageGranularity,
8722  VkDeviceSize allocSize,
8723  VkDeviceSize allocAlignment,
8724  VmaSuballocationType allocType,
8725  VmaSuballocationList::const_iterator suballocItem,
8726  bool canMakeOtherLost,
8727  VkDeviceSize* pOffset,
8728  size_t* itemsToMakeLostCount,
8729  VkDeviceSize* pSumFreeSize,
8730  VkDeviceSize* pSumItemSize) const
8731 {
8732  VMA_ASSERT(allocSize > 0);
8733  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8734  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8735  VMA_ASSERT(pOffset != VMA_NULL);
8736 
8737  *itemsToMakeLostCount = 0;
8738  *pSumFreeSize = 0;
8739  *pSumItemSize = 0;
8740 
8741  if(canMakeOtherLost)
8742  {
8743  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8744  {
8745  *pSumFreeSize = suballocItem->size;
8746  }
8747  else
8748  {
8749  if(suballocItem->hAllocation->CanBecomeLost() &&
8750  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8751  {
8752  ++*itemsToMakeLostCount;
8753  *pSumItemSize = suballocItem->size;
8754  }
8755  else
8756  {
8757  return false;
8758  }
8759  }
8760 
8761  // Remaining size is too small for this request: Early return.
8762  if(GetSize() - suballocItem->offset < allocSize)
8763  {
8764  return false;
8765  }
8766 
8767  // Start from offset equal to beginning of this suballocation.
8768  *pOffset = suballocItem->offset;
8769 
8770  // Apply VMA_DEBUG_MARGIN at the beginning.
8771  if(VMA_DEBUG_MARGIN > 0)
8772  {
8773  *pOffset += VMA_DEBUG_MARGIN;
8774  }
8775 
8776  // Apply alignment.
8777  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8778 
8779  // Check previous suballocations for BufferImageGranularity conflicts.
8780  // Make bigger alignment if necessary.
8781  if(bufferImageGranularity > 1)
8782  {
8783  bool bufferImageGranularityConflict = false;
8784  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8785  while(prevSuballocItem != m_Suballocations.cbegin())
8786  {
8787  --prevSuballocItem;
8788  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8789  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8790  {
8791  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8792  {
8793  bufferImageGranularityConflict = true;
8794  break;
8795  }
8796  }
8797  else
8798  // Already on previous page.
8799  break;
8800  }
8801  if(bufferImageGranularityConflict)
8802  {
8803  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8804  }
8805  }
8806 
8807  // Now that we have final *pOffset, check if we are past suballocItem.
8808  // If yes, return false - this function should be called for another suballocItem as starting point.
8809  if(*pOffset >= suballocItem->offset + suballocItem->size)
8810  {
8811  return false;
8812  }
8813 
8814  // Calculate padding at the beginning based on current offset.
8815  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8816 
8817  // Calculate required margin at the end.
8818  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8819 
8820  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8821  // Another early return check.
8822  if(suballocItem->offset + totalSize > GetSize())
8823  {
8824  return false;
8825  }
8826 
8827  // Advance lastSuballocItem until desired size is reached.
8828  // Update itemsToMakeLostCount.
8829  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8830  if(totalSize > suballocItem->size)
8831  {
8832  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8833  while(remainingSize > 0)
8834  {
8835  ++lastSuballocItem;
8836  if(lastSuballocItem == m_Suballocations.cend())
8837  {
8838  return false;
8839  }
8840  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8841  {
8842  *pSumFreeSize += lastSuballocItem->size;
8843  }
8844  else
8845  {
8846  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8847  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8848  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8849  {
8850  ++*itemsToMakeLostCount;
8851  *pSumItemSize += lastSuballocItem->size;
8852  }
8853  else
8854  {
8855  return false;
8856  }
8857  }
8858  remainingSize = (lastSuballocItem->size < remainingSize) ?
8859  remainingSize - lastSuballocItem->size : 0;
8860  }
8861  }
8862 
8863  // Check next suballocations for BufferImageGranularity conflicts.
8864  // If conflict exists, we must mark more allocations lost or fail.
8865  if(bufferImageGranularity > 1)
8866  {
8867  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8868  ++nextSuballocItem;
8869  while(nextSuballocItem != m_Suballocations.cend())
8870  {
8871  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8872  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8873  {
8874  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8875  {
8876  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8877  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8878  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8879  {
8880  ++*itemsToMakeLostCount;
8881  }
8882  else
8883  {
8884  return false;
8885  }
8886  }
8887  }
8888  else
8889  {
8890  // Already on next page.
8891  break;
8892  }
8893  ++nextSuballocItem;
8894  }
8895  }
8896  }
8897  else
8898  {
8899  const VmaSuballocation& suballoc = *suballocItem;
8900  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8901 
8902  *pSumFreeSize = suballoc.size;
8903 
8904  // Size of this suballocation is too small for this request: Early return.
8905  if(suballoc.size < allocSize)
8906  {
8907  return false;
8908  }
8909 
8910  // Start from offset equal to beginning of this suballocation.
8911  *pOffset = suballoc.offset;
8912 
8913  // Apply VMA_DEBUG_MARGIN at the beginning.
8914  if(VMA_DEBUG_MARGIN > 0)
8915  {
8916  *pOffset += VMA_DEBUG_MARGIN;
8917  }
8918 
8919  // Apply alignment.
8920  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8921 
8922  // Check previous suballocations for BufferImageGranularity conflicts.
8923  // Make bigger alignment if necessary.
8924  if(bufferImageGranularity > 1)
8925  {
8926  bool bufferImageGranularityConflict = false;
8927  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8928  while(prevSuballocItem != m_Suballocations.cbegin())
8929  {
8930  --prevSuballocItem;
8931  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8932  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8933  {
8934  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8935  {
8936  bufferImageGranularityConflict = true;
8937  break;
8938  }
8939  }
8940  else
8941  // Already on previous page.
8942  break;
8943  }
8944  if(bufferImageGranularityConflict)
8945  {
8946  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8947  }
8948  }
8949 
8950  // Calculate padding at the beginning based on current offset.
8951  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8952 
8953  // Calculate required margin at the end.
8954  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8955 
8956  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8957  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8958  {
8959  return false;
8960  }
8961 
8962  // Check next suballocations for BufferImageGranularity conflicts.
8963  // If conflict exists, allocation cannot be made here.
8964  if(bufferImageGranularity > 1)
8965  {
8966  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8967  ++nextSuballocItem;
8968  while(nextSuballocItem != m_Suballocations.cend())
8969  {
8970  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8971  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8972  {
8973  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8974  {
8975  return false;
8976  }
8977  }
8978  else
8979  {
8980  // Already on next page.
8981  break;
8982  }
8983  ++nextSuballocItem;
8984  }
8985  }
8986  }
8987 
8988  // All tests passed: Success. pOffset is already filled.
8989  return true;
8990 }
8991 
8992 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8993 {
8994  VMA_ASSERT(item != m_Suballocations.end());
8995  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8996 
8997  VmaSuballocationList::iterator nextItem = item;
8998  ++nextItem;
8999  VMA_ASSERT(nextItem != m_Suballocations.end());
9000  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9001 
9002  item->size += nextItem->size;
9003  --m_FreeCount;
9004  m_Suballocations.erase(nextItem);
9005 }
9006 
9007 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9008 {
9009  // Change this suballocation to be marked as free.
9010  VmaSuballocation& suballoc = *suballocItem;
9011  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9012  suballoc.hAllocation = VK_NULL_HANDLE;
9013 
9014  // Update totals.
9015  ++m_FreeCount;
9016  m_SumFreeSize += suballoc.size;
9017 
9018  // Merge with previous and/or next suballocation if it's also free.
9019  bool mergeWithNext = false;
9020  bool mergeWithPrev = false;
9021 
9022  VmaSuballocationList::iterator nextItem = suballocItem;
9023  ++nextItem;
9024  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9025  {
9026  mergeWithNext = true;
9027  }
9028 
9029  VmaSuballocationList::iterator prevItem = suballocItem;
9030  if(suballocItem != m_Suballocations.begin())
9031  {
9032  --prevItem;
9033  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9034  {
9035  mergeWithPrev = true;
9036  }
9037  }
9038 
9039  if(mergeWithNext)
9040  {
9041  UnregisterFreeSuballocation(nextItem);
9042  MergeFreeWithNext(suballocItem);
9043  }
9044 
9045  if(mergeWithPrev)
9046  {
9047  UnregisterFreeSuballocation(prevItem);
9048  MergeFreeWithNext(prevItem);
9049  RegisterFreeSuballocation(prevItem);
9050  return prevItem;
9051  }
9052  else
9053  {
9054  RegisterFreeSuballocation(suballocItem);
9055  return suballocItem;
9056  }
9057 }
9058 
9059 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9060 {
9061  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9062  VMA_ASSERT(item->size > 0);
9063 
9064  // You may want to enable this validation at the beginning or at the end of
9065  // this function, depending on what do you want to check.
9066  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9067 
9068  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9069  {
9070  if(m_FreeSuballocationsBySize.empty())
9071  {
9072  m_FreeSuballocationsBySize.push_back(item);
9073  }
9074  else
9075  {
9076  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9077  }
9078  }
9079 
9080  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9081 }
9082 
9083 
9084 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9085 {
9086  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9087  VMA_ASSERT(item->size > 0);
9088 
9089  // You may want to enable this validation at the beginning or at the end of
9090  // this function, depending on what do you want to check.
9091  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9092 
9093  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9094  {
9095  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9096  m_FreeSuballocationsBySize.data(),
9097  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9098  item,
9099  VmaSuballocationItemSizeLess());
9100  for(size_t index = it - m_FreeSuballocationsBySize.data();
9101  index < m_FreeSuballocationsBySize.size();
9102  ++index)
9103  {
9104  if(m_FreeSuballocationsBySize[index] == item)
9105  {
9106  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9107  return;
9108  }
9109  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9110  }
9111  VMA_ASSERT(0 && "Not found.");
9112  }
9113 
9114  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9115 }
9116 
9117 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9118  VkDeviceSize bufferImageGranularity,
9119  VmaSuballocationType& inOutPrevSuballocType) const
9120 {
9121  if(bufferImageGranularity == 1 || IsEmpty())
9122  {
9123  return false;
9124  }
9125 
9126  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9127  bool typeConflictFound = false;
9128  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9129  it != m_Suballocations.cend();
9130  ++it)
9131  {
9132  const VmaSuballocationType suballocType = it->type;
9133  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9134  {
9135  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9136  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9137  {
9138  typeConflictFound = true;
9139  }
9140  inOutPrevSuballocType = suballocType;
9141  }
9142  }
9143 
9144  return typeConflictFound || minAlignment >= bufferImageGranularity;
9145 }
9146 
9148 // class VmaBlockMetadata_Linear
9149 
9150 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9151  VmaBlockMetadata(hAllocator),
9152  m_SumFreeSize(0),
9153  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9154  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9155  m_1stVectorIndex(0),
9156  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9157  m_1stNullItemsBeginCount(0),
9158  m_1stNullItemsMiddleCount(0),
9159  m_2ndNullItemsCount(0)
9160 {
9161 }
9162 
9163 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9164 {
9165 }
9166 
9167 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9168 {
9169  VmaBlockMetadata::Init(size);
9170  m_SumFreeSize = size;
9171 }
9172 
9173 bool VmaBlockMetadata_Linear::Validate() const
9174 {
9175  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9176  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9177 
9178  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9179  VMA_VALIDATE(!suballocations1st.empty() ||
9180  suballocations2nd.empty() ||
9181  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9182 
9183  if(!suballocations1st.empty())
9184  {
9185  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9186  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9187  // Null item at the end should be just pop_back().
9188  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9189  }
9190  if(!suballocations2nd.empty())
9191  {
9192  // Null item at the end should be just pop_back().
9193  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9194  }
9195 
9196  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9197  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9198 
9199  VkDeviceSize sumUsedSize = 0;
9200  const size_t suballoc1stCount = suballocations1st.size();
9201  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9202 
9203  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9204  {
9205  const size_t suballoc2ndCount = suballocations2nd.size();
9206  size_t nullItem2ndCount = 0;
9207  for(size_t i = 0; i < suballoc2ndCount; ++i)
9208  {
9209  const VmaSuballocation& suballoc = suballocations2nd[i];
9210  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9211 
9212  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9213  VMA_VALIDATE(suballoc.offset >= offset);
9214 
9215  if(!currFree)
9216  {
9217  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9218  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9219  sumUsedSize += suballoc.size;
9220  }
9221  else
9222  {
9223  ++nullItem2ndCount;
9224  }
9225 
9226  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9227  }
9228 
9229  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9230  }
9231 
9232  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9233  {
9234  const VmaSuballocation& suballoc = suballocations1st[i];
9235  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9236  suballoc.hAllocation == VK_NULL_HANDLE);
9237  }
9238 
9239  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9240 
9241  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9242  {
9243  const VmaSuballocation& suballoc = suballocations1st[i];
9244  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9245 
9246  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9247  VMA_VALIDATE(suballoc.offset >= offset);
9248  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9249 
9250  if(!currFree)
9251  {
9252  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9253  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9254  sumUsedSize += suballoc.size;
9255  }
9256  else
9257  {
9258  ++nullItem1stCount;
9259  }
9260 
9261  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9262  }
9263  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9264 
9265  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9266  {
9267  const size_t suballoc2ndCount = suballocations2nd.size();
9268  size_t nullItem2ndCount = 0;
9269  for(size_t i = suballoc2ndCount; i--; )
9270  {
9271  const VmaSuballocation& suballoc = suballocations2nd[i];
9272  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9273 
9274  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9275  VMA_VALIDATE(suballoc.offset >= offset);
9276 
9277  if(!currFree)
9278  {
9279  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9280  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9281  sumUsedSize += suballoc.size;
9282  }
9283  else
9284  {
9285  ++nullItem2ndCount;
9286  }
9287 
9288  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9289  }
9290 
9291  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9292  }
9293 
9294  VMA_VALIDATE(offset <= GetSize());
9295  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9296 
9297  return true;
9298 }
9299 
9300 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9301 {
9302  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9303  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9304 }
9305 
9306 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9307 {
9308  const VkDeviceSize size = GetSize();
9309 
9310  /*
9311  We don't consider gaps inside allocation vectors with freed allocations because
9312  they are not suitable for reuse in linear allocator. We consider only space that
9313  is available for new allocations.
9314  */
9315  if(IsEmpty())
9316  {
9317  return size;
9318  }
9319 
9320  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9321 
9322  switch(m_2ndVectorMode)
9323  {
9324  case SECOND_VECTOR_EMPTY:
9325  /*
9326  Available space is after end of 1st, as well as before beginning of 1st (which
9327  whould make it a ring buffer).
9328  */
9329  {
9330  const size_t suballocations1stCount = suballocations1st.size();
9331  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9332  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9333  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9334  return VMA_MAX(
9335  firstSuballoc.offset,
9336  size - (lastSuballoc.offset + lastSuballoc.size));
9337  }
9338  break;
9339 
9340  case SECOND_VECTOR_RING_BUFFER:
9341  /*
9342  Available space is only between end of 2nd and beginning of 1st.
9343  */
9344  {
9345  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9346  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9347  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9348  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9349  }
9350  break;
9351 
9352  case SECOND_VECTOR_DOUBLE_STACK:
9353  /*
9354  Available space is only between end of 1st and top of 2nd.
9355  */
9356  {
9357  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9358  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9359  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9360  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9361  }
9362  break;
9363 
9364  default:
9365  VMA_ASSERT(0);
9366  return 0;
9367  }
9368 }
9369 
9370 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9371 {
9372  const VkDeviceSize size = GetSize();
9373  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9374  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9375  const size_t suballoc1stCount = suballocations1st.size();
9376  const size_t suballoc2ndCount = suballocations2nd.size();
9377 
9378  outInfo.blockCount = 1;
9379  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9380  outInfo.unusedRangeCount = 0;
9381  outInfo.usedBytes = 0;
9382  outInfo.allocationSizeMin = UINT64_MAX;
9383  outInfo.allocationSizeMax = 0;
9384  outInfo.unusedRangeSizeMin = UINT64_MAX;
9385  outInfo.unusedRangeSizeMax = 0;
9386 
9387  VkDeviceSize lastOffset = 0;
9388 
9389  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9390  {
9391  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9392  size_t nextAlloc2ndIndex = 0;
9393  while(lastOffset < freeSpace2ndTo1stEnd)
9394  {
9395  // Find next non-null allocation or move nextAllocIndex to the end.
9396  while(nextAlloc2ndIndex < suballoc2ndCount &&
9397  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9398  {
9399  ++nextAlloc2ndIndex;
9400  }
9401 
9402  // Found non-null allocation.
9403  if(nextAlloc2ndIndex < suballoc2ndCount)
9404  {
9405  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9406 
9407  // 1. Process free space before this allocation.
9408  if(lastOffset < suballoc.offset)
9409  {
9410  // There is free space from lastOffset to suballoc.offset.
9411  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9412  ++outInfo.unusedRangeCount;
9413  outInfo.unusedBytes += unusedRangeSize;
9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9415  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9416  }
9417 
9418  // 2. Process this allocation.
9419  // There is allocation with suballoc.offset, suballoc.size.
9420  outInfo.usedBytes += suballoc.size;
9421  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9422  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9423 
9424  // 3. Prepare for next iteration.
9425  lastOffset = suballoc.offset + suballoc.size;
9426  ++nextAlloc2ndIndex;
9427  }
9428  // We are at the end.
9429  else
9430  {
9431  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9432  if(lastOffset < freeSpace2ndTo1stEnd)
9433  {
9434  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9435  ++outInfo.unusedRangeCount;
9436  outInfo.unusedBytes += unusedRangeSize;
9437  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9438  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9439  }
9440 
9441  // End of loop.
9442  lastOffset = freeSpace2ndTo1stEnd;
9443  }
9444  }
9445  }
9446 
9447  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9448  const VkDeviceSize freeSpace1stTo2ndEnd =
9449  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9450  while(lastOffset < freeSpace1stTo2ndEnd)
9451  {
9452  // Find next non-null allocation or move nextAllocIndex to the end.
9453  while(nextAlloc1stIndex < suballoc1stCount &&
9454  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9455  {
9456  ++nextAlloc1stIndex;
9457  }
9458 
9459  // Found non-null allocation.
9460  if(nextAlloc1stIndex < suballoc1stCount)
9461  {
9462  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9463 
9464  // 1. Process free space before this allocation.
9465  if(lastOffset < suballoc.offset)
9466  {
9467  // There is free space from lastOffset to suballoc.offset.
9468  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9469  ++outInfo.unusedRangeCount;
9470  outInfo.unusedBytes += unusedRangeSize;
9471  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9472  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9473  }
9474 
9475  // 2. Process this allocation.
9476  // There is allocation with suballoc.offset, suballoc.size.
9477  outInfo.usedBytes += suballoc.size;
9478  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9479  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9480 
9481  // 3. Prepare for next iteration.
9482  lastOffset = suballoc.offset + suballoc.size;
9483  ++nextAlloc1stIndex;
9484  }
9485  // We are at the end.
9486  else
9487  {
9488  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9489  if(lastOffset < freeSpace1stTo2ndEnd)
9490  {
9491  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9492  ++outInfo.unusedRangeCount;
9493  outInfo.unusedBytes += unusedRangeSize;
9494  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9495  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9496  }
9497 
9498  // End of loop.
9499  lastOffset = freeSpace1stTo2ndEnd;
9500  }
9501  }
9502 
9503  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9504  {
9505  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9506  while(lastOffset < size)
9507  {
9508  // Find next non-null allocation or move nextAllocIndex to the end.
9509  while(nextAlloc2ndIndex != SIZE_MAX &&
9510  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9511  {
9512  --nextAlloc2ndIndex;
9513  }
9514 
9515  // Found non-null allocation.
9516  if(nextAlloc2ndIndex != SIZE_MAX)
9517  {
9518  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9519 
9520  // 1. Process free space before this allocation.
9521  if(lastOffset < suballoc.offset)
9522  {
9523  // There is free space from lastOffset to suballoc.offset.
9524  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9525  ++outInfo.unusedRangeCount;
9526  outInfo.unusedBytes += unusedRangeSize;
9527  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9528  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9529  }
9530 
9531  // 2. Process this allocation.
9532  // There is allocation with suballoc.offset, suballoc.size.
9533  outInfo.usedBytes += suballoc.size;
9534  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9535  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9536 
9537  // 3. Prepare for next iteration.
9538  lastOffset = suballoc.offset + suballoc.size;
9539  --nextAlloc2ndIndex;
9540  }
9541  // We are at the end.
9542  else
9543  {
9544  // There is free space from lastOffset to size.
9545  if(lastOffset < size)
9546  {
9547  const VkDeviceSize unusedRangeSize = size - lastOffset;
9548  ++outInfo.unusedRangeCount;
9549  outInfo.unusedBytes += unusedRangeSize;
9550  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9551  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9552  }
9553 
9554  // End of loop.
9555  lastOffset = size;
9556  }
9557  }
9558  }
9559 
9560  outInfo.unusedBytes = size - outInfo.usedBytes;
9561 }
9562 
9563 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9564 {
9565  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9566  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9567  const VkDeviceSize size = GetSize();
9568  const size_t suballoc1stCount = suballocations1st.size();
9569  const size_t suballoc2ndCount = suballocations2nd.size();
9570 
9571  inoutStats.size += size;
9572 
9573  VkDeviceSize lastOffset = 0;
9574 
9575  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9576  {
9577  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9578  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9579  while(lastOffset < freeSpace2ndTo1stEnd)
9580  {
9581  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9582  while(nextAlloc2ndIndex < suballoc2ndCount &&
9583  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9584  {
9585  ++nextAlloc2ndIndex;
9586  }
9587 
9588  // Found non-null allocation.
9589  if(nextAlloc2ndIndex < suballoc2ndCount)
9590  {
9591  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9592 
9593  // 1. Process free space before this allocation.
9594  if(lastOffset < suballoc.offset)
9595  {
9596  // There is free space from lastOffset to suballoc.offset.
9597  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9598  inoutStats.unusedSize += unusedRangeSize;
9599  ++inoutStats.unusedRangeCount;
9600  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9601  }
9602 
9603  // 2. Process this allocation.
9604  // There is allocation with suballoc.offset, suballoc.size.
9605  ++inoutStats.allocationCount;
9606 
9607  // 3. Prepare for next iteration.
9608  lastOffset = suballoc.offset + suballoc.size;
9609  ++nextAlloc2ndIndex;
9610  }
9611  // We are at the end.
9612  else
9613  {
9614  if(lastOffset < freeSpace2ndTo1stEnd)
9615  {
9616  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9617  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9618  inoutStats.unusedSize += unusedRangeSize;
9619  ++inoutStats.unusedRangeCount;
9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9621  }
9622 
9623  // End of loop.
9624  lastOffset = freeSpace2ndTo1stEnd;
9625  }
9626  }
9627  }
9628 
9629  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9630  const VkDeviceSize freeSpace1stTo2ndEnd =
9631  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9632  while(lastOffset < freeSpace1stTo2ndEnd)
9633  {
9634  // Find next non-null allocation or move nextAllocIndex to the end.
9635  while(nextAlloc1stIndex < suballoc1stCount &&
9636  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9637  {
9638  ++nextAlloc1stIndex;
9639  }
9640 
9641  // Found non-null allocation.
9642  if(nextAlloc1stIndex < suballoc1stCount)
9643  {
9644  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9645 
9646  // 1. Process free space before this allocation.
9647  if(lastOffset < suballoc.offset)
9648  {
9649  // There is free space from lastOffset to suballoc.offset.
9650  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9651  inoutStats.unusedSize += unusedRangeSize;
9652  ++inoutStats.unusedRangeCount;
9653  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9654  }
9655 
9656  // 2. Process this allocation.
9657  // There is allocation with suballoc.offset, suballoc.size.
9658  ++inoutStats.allocationCount;
9659 
9660  // 3. Prepare for next iteration.
9661  lastOffset = suballoc.offset + suballoc.size;
9662  ++nextAlloc1stIndex;
9663  }
9664  // We are at the end.
9665  else
9666  {
9667  if(lastOffset < freeSpace1stTo2ndEnd)
9668  {
9669  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9670  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9671  inoutStats.unusedSize += unusedRangeSize;
9672  ++inoutStats.unusedRangeCount;
9673  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9674  }
9675 
9676  // End of loop.
9677  lastOffset = freeSpace1stTo2ndEnd;
9678  }
9679  }
9680 
9681  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9682  {
9683  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9684  while(lastOffset < size)
9685  {
9686  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9687  while(nextAlloc2ndIndex != SIZE_MAX &&
9688  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9689  {
9690  --nextAlloc2ndIndex;
9691  }
9692 
9693  // Found non-null allocation.
9694  if(nextAlloc2ndIndex != SIZE_MAX)
9695  {
9696  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9697 
9698  // 1. Process free space before this allocation.
9699  if(lastOffset < suballoc.offset)
9700  {
9701  // There is free space from lastOffset to suballoc.offset.
9702  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9703  inoutStats.unusedSize += unusedRangeSize;
9704  ++inoutStats.unusedRangeCount;
9705  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9706  }
9707 
9708  // 2. Process this allocation.
9709  // There is allocation with suballoc.offset, suballoc.size.
9710  ++inoutStats.allocationCount;
9711 
9712  // 3. Prepare for next iteration.
9713  lastOffset = suballoc.offset + suballoc.size;
9714  --nextAlloc2ndIndex;
9715  }
9716  // We are at the end.
9717  else
9718  {
9719  if(lastOffset < size)
9720  {
9721  // There is free space from lastOffset to size.
9722  const VkDeviceSize unusedRangeSize = size - lastOffset;
9723  inoutStats.unusedSize += unusedRangeSize;
9724  ++inoutStats.unusedRangeCount;
9725  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9726  }
9727 
9728  // End of loop.
9729  lastOffset = size;
9730  }
9731  }
9732  }
9733 }
9734 
9735 #if VMA_STATS_STRING_ENABLED
9736 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9737 {
9738  const VkDeviceSize size = GetSize();
9739  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9740  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9741  const size_t suballoc1stCount = suballocations1st.size();
9742  const size_t suballoc2ndCount = suballocations2nd.size();
9743 
9744  // FIRST PASS
9745 
9746  size_t unusedRangeCount = 0;
9747  VkDeviceSize usedBytes = 0;
9748 
9749  VkDeviceSize lastOffset = 0;
9750 
9751  size_t alloc2ndCount = 0;
9752  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9753  {
9754  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9755  size_t nextAlloc2ndIndex = 0;
9756  while(lastOffset < freeSpace2ndTo1stEnd)
9757  {
9758  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9759  while(nextAlloc2ndIndex < suballoc2ndCount &&
9760  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9761  {
9762  ++nextAlloc2ndIndex;
9763  }
9764 
9765  // Found non-null allocation.
9766  if(nextAlloc2ndIndex < suballoc2ndCount)
9767  {
9768  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9769 
9770  // 1. Process free space before this allocation.
9771  if(lastOffset < suballoc.offset)
9772  {
9773  // There is free space from lastOffset to suballoc.offset.
9774  ++unusedRangeCount;
9775  }
9776 
9777  // 2. Process this allocation.
9778  // There is allocation with suballoc.offset, suballoc.size.
9779  ++alloc2ndCount;
9780  usedBytes += suballoc.size;
9781 
9782  // 3. Prepare for next iteration.
9783  lastOffset = suballoc.offset + suballoc.size;
9784  ++nextAlloc2ndIndex;
9785  }
9786  // We are at the end.
9787  else
9788  {
9789  if(lastOffset < freeSpace2ndTo1stEnd)
9790  {
9791  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9792  ++unusedRangeCount;
9793  }
9794 
9795  // End of loop.
9796  lastOffset = freeSpace2ndTo1stEnd;
9797  }
9798  }
9799  }
9800 
9801  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9802  size_t alloc1stCount = 0;
9803  const VkDeviceSize freeSpace1stTo2ndEnd =
9804  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9805  while(lastOffset < freeSpace1stTo2ndEnd)
9806  {
9807  // Find next non-null allocation or move nextAllocIndex to the end.
9808  while(nextAlloc1stIndex < suballoc1stCount &&
9809  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9810  {
9811  ++nextAlloc1stIndex;
9812  }
9813 
9814  // Found non-null allocation.
9815  if(nextAlloc1stIndex < suballoc1stCount)
9816  {
9817  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9818 
9819  // 1. Process free space before this allocation.
9820  if(lastOffset < suballoc.offset)
9821  {
9822  // There is free space from lastOffset to suballoc.offset.
9823  ++unusedRangeCount;
9824  }
9825 
9826  // 2. Process this allocation.
9827  // There is allocation with suballoc.offset, suballoc.size.
9828  ++alloc1stCount;
9829  usedBytes += suballoc.size;
9830 
9831  // 3. Prepare for next iteration.
9832  lastOffset = suballoc.offset + suballoc.size;
9833  ++nextAlloc1stIndex;
9834  }
9835  // We are at the end.
9836  else
9837  {
9838  if(lastOffset < size)
9839  {
9840  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9841  ++unusedRangeCount;
9842  }
9843 
9844  // End of loop.
9845  lastOffset = freeSpace1stTo2ndEnd;
9846  }
9847  }
9848 
9849  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9850  {
9851  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9852  while(lastOffset < size)
9853  {
9854  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9855  while(nextAlloc2ndIndex != SIZE_MAX &&
9856  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9857  {
9858  --nextAlloc2ndIndex;
9859  }
9860 
9861  // Found non-null allocation.
9862  if(nextAlloc2ndIndex != SIZE_MAX)
9863  {
9864  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9865 
9866  // 1. Process free space before this allocation.
9867  if(lastOffset < suballoc.offset)
9868  {
9869  // There is free space from lastOffset to suballoc.offset.
9870  ++unusedRangeCount;
9871  }
9872 
9873  // 2. Process this allocation.
9874  // There is allocation with suballoc.offset, suballoc.size.
9875  ++alloc2ndCount;
9876  usedBytes += suballoc.size;
9877 
9878  // 3. Prepare for next iteration.
9879  lastOffset = suballoc.offset + suballoc.size;
9880  --nextAlloc2ndIndex;
9881  }
9882  // We are at the end.
9883  else
9884  {
9885  if(lastOffset < size)
9886  {
9887  // There is free space from lastOffset to size.
9888  ++unusedRangeCount;
9889  }
9890 
9891  // End of loop.
9892  lastOffset = size;
9893  }
9894  }
9895  }
9896 
9897  const VkDeviceSize unusedBytes = size - usedBytes;
9898  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9899 
9900  // SECOND PASS
9901  lastOffset = 0;
9902 
9903  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9904  {
9905  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9906  size_t nextAlloc2ndIndex = 0;
9907  while(lastOffset < freeSpace2ndTo1stEnd)
9908  {
9909  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9910  while(nextAlloc2ndIndex < suballoc2ndCount &&
9911  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9912  {
9913  ++nextAlloc2ndIndex;
9914  }
9915 
9916  // Found non-null allocation.
9917  if(nextAlloc2ndIndex < suballoc2ndCount)
9918  {
9919  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9920 
9921  // 1. Process free space before this allocation.
9922  if(lastOffset < suballoc.offset)
9923  {
9924  // There is free space from lastOffset to suballoc.offset.
9925  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9926  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9927  }
9928 
9929  // 2. Process this allocation.
9930  // There is allocation with suballoc.offset, suballoc.size.
9931  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9932 
9933  // 3. Prepare for next iteration.
9934  lastOffset = suballoc.offset + suballoc.size;
9935  ++nextAlloc2ndIndex;
9936  }
9937  // We are at the end.
9938  else
9939  {
9940  if(lastOffset < freeSpace2ndTo1stEnd)
9941  {
9942  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9943  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9944  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9945  }
9946 
9947  // End of loop.
9948  lastOffset = freeSpace2ndTo1stEnd;
9949  }
9950  }
9951  }
9952 
9953  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9954  while(lastOffset < freeSpace1stTo2ndEnd)
9955  {
9956  // Find next non-null allocation or move nextAllocIndex to the end.
9957  while(nextAlloc1stIndex < suballoc1stCount &&
9958  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9959  {
9960  ++nextAlloc1stIndex;
9961  }
9962 
9963  // Found non-null allocation.
9964  if(nextAlloc1stIndex < suballoc1stCount)
9965  {
9966  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9967 
9968  // 1. Process free space before this allocation.
9969  if(lastOffset < suballoc.offset)
9970  {
9971  // There is free space from lastOffset to suballoc.offset.
9972  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9973  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9974  }
9975 
9976  // 2. Process this allocation.
9977  // There is allocation with suballoc.offset, suballoc.size.
9978  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9979 
9980  // 3. Prepare for next iteration.
9981  lastOffset = suballoc.offset + suballoc.size;
9982  ++nextAlloc1stIndex;
9983  }
9984  // We are at the end.
9985  else
9986  {
9987  if(lastOffset < freeSpace1stTo2ndEnd)
9988  {
9989  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9990  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9991  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9992  }
9993 
9994  // End of loop.
9995  lastOffset = freeSpace1stTo2ndEnd;
9996  }
9997  }
9998 
9999  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10000  {
10001  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10002  while(lastOffset < size)
10003  {
10004  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10005  while(nextAlloc2ndIndex != SIZE_MAX &&
10006  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10007  {
10008  --nextAlloc2ndIndex;
10009  }
10010 
10011  // Found non-null allocation.
10012  if(nextAlloc2ndIndex != SIZE_MAX)
10013  {
10014  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10015 
10016  // 1. Process free space before this allocation.
10017  if(lastOffset < suballoc.offset)
10018  {
10019  // There is free space from lastOffset to suballoc.offset.
10020  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10021  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10022  }
10023 
10024  // 2. Process this allocation.
10025  // There is allocation with suballoc.offset, suballoc.size.
10026  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10027 
10028  // 3. Prepare for next iteration.
10029  lastOffset = suballoc.offset + suballoc.size;
10030  --nextAlloc2ndIndex;
10031  }
10032  // We are at the end.
10033  else
10034  {
10035  if(lastOffset < size)
10036  {
10037  // There is free space from lastOffset to size.
10038  const VkDeviceSize unusedRangeSize = size - lastOffset;
10039  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10040  }
10041 
10042  // End of loop.
10043  lastOffset = size;
10044  }
10045  }
10046  }
10047 
10048  PrintDetailedMap_End(json);
10049 }
10050 #endif // #if VMA_STATS_STRING_ENABLED
10051 
10052 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10053  uint32_t currentFrameIndex,
10054  uint32_t frameInUseCount,
10055  VkDeviceSize bufferImageGranularity,
10056  VkDeviceSize allocSize,
10057  VkDeviceSize allocAlignment,
10058  bool upperAddress,
10059  VmaSuballocationType allocType,
10060  bool canMakeOtherLost,
10061  uint32_t strategy,
10062  VmaAllocationRequest* pAllocationRequest)
10063 {
10064  VMA_ASSERT(allocSize > 0);
10065  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10066  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10067  VMA_HEAVY_ASSERT(Validate());
10068  return upperAddress ?
10069  CreateAllocationRequest_UpperAddress(
10070  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10071  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10072  CreateAllocationRequest_LowerAddress(
10073  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10074  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10075 }
10076 
10077 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10078  uint32_t currentFrameIndex,
10079  uint32_t frameInUseCount,
10080  VkDeviceSize bufferImageGranularity,
10081  VkDeviceSize allocSize,
10082  VkDeviceSize allocAlignment,
10083  VmaSuballocationType allocType,
10084  bool canMakeOtherLost,
10085  uint32_t strategy,
10086  VmaAllocationRequest* pAllocationRequest)
10087 {
10088  const VkDeviceSize size = GetSize();
10089  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10090  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10091 
10092  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10093  {
10094  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10095  return false;
10096  }
10097 
10098  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10099  if(allocSize > size)
10100  {
10101  return false;
10102  }
10103  VkDeviceSize resultBaseOffset = size - allocSize;
10104  if(!suballocations2nd.empty())
10105  {
10106  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10107  resultBaseOffset = lastSuballoc.offset - allocSize;
10108  if(allocSize > lastSuballoc.offset)
10109  {
10110  return false;
10111  }
10112  }
10113 
10114  // Start from offset equal to end of free space.
10115  VkDeviceSize resultOffset = resultBaseOffset;
10116 
10117  // Apply VMA_DEBUG_MARGIN at the end.
10118  if(VMA_DEBUG_MARGIN > 0)
10119  {
10120  if(resultOffset < VMA_DEBUG_MARGIN)
10121  {
10122  return false;
10123  }
10124  resultOffset -= VMA_DEBUG_MARGIN;
10125  }
10126 
10127  // Apply alignment.
10128  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10129 
10130  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10131  // Make bigger alignment if necessary.
10132  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10133  {
10134  bool bufferImageGranularityConflict = false;
10135  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10136  {
10137  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10138  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10139  {
10140  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10141  {
10142  bufferImageGranularityConflict = true;
10143  break;
10144  }
10145  }
10146  else
10147  // Already on previous page.
10148  break;
10149  }
10150  if(bufferImageGranularityConflict)
10151  {
10152  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10153  }
10154  }
10155 
10156  // There is enough free space.
10157  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10158  suballocations1st.back().offset + suballocations1st.back().size :
10159  0;
10160  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10161  {
10162  // Check previous suballocations for BufferImageGranularity conflicts.
10163  // If conflict exists, allocation cannot be made here.
10164  if(bufferImageGranularity > 1)
10165  {
10166  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10167  {
10168  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10169  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10170  {
10171  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10172  {
10173  return false;
10174  }
10175  }
10176  else
10177  {
10178  // Already on next page.
10179  break;
10180  }
10181  }
10182  }
10183 
10184  // All tests passed: Success.
10185  pAllocationRequest->offset = resultOffset;
10186  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10187  pAllocationRequest->sumItemSize = 0;
10188  // pAllocationRequest->item unused.
10189  pAllocationRequest->itemsToMakeLostCount = 0;
10190  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10191  return true;
10192  }
10193 
10194  return false;
10195 }
10196 
10197 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10198  uint32_t currentFrameIndex,
10199  uint32_t frameInUseCount,
10200  VkDeviceSize bufferImageGranularity,
10201  VkDeviceSize allocSize,
10202  VkDeviceSize allocAlignment,
10203  VmaSuballocationType allocType,
10204  bool canMakeOtherLost,
10205  uint32_t strategy,
10206  VmaAllocationRequest* pAllocationRequest)
10207 {
10208  const VkDeviceSize size = GetSize();
10209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10211 
10212  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10213  {
10214  // Try to allocate at the end of 1st vector.
10215 
10216  VkDeviceSize resultBaseOffset = 0;
10217  if(!suballocations1st.empty())
10218  {
10219  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10220  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10221  }
10222 
10223  // Start from offset equal to beginning of free space.
10224  VkDeviceSize resultOffset = resultBaseOffset;
10225 
10226  // Apply VMA_DEBUG_MARGIN at the beginning.
10227  if(VMA_DEBUG_MARGIN > 0)
10228  {
10229  resultOffset += VMA_DEBUG_MARGIN;
10230  }
10231 
10232  // Apply alignment.
10233  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10234 
10235  // Check previous suballocations for BufferImageGranularity conflicts.
10236  // Make bigger alignment if necessary.
10237  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10238  {
10239  bool bufferImageGranularityConflict = false;
10240  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10241  {
10242  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10243  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10244  {
10245  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10246  {
10247  bufferImageGranularityConflict = true;
10248  break;
10249  }
10250  }
10251  else
10252  // Already on previous page.
10253  break;
10254  }
10255  if(bufferImageGranularityConflict)
10256  {
10257  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10258  }
10259  }
10260 
10261  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10262  suballocations2nd.back().offset : size;
10263 
10264  // There is enough free space at the end after alignment.
10265  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10266  {
10267  // Check next suballocations for BufferImageGranularity conflicts.
10268  // If conflict exists, allocation cannot be made here.
10269  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10270  {
10271  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10272  {
10273  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10274  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10275  {
10276  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10277  {
10278  return false;
10279  }
10280  }
10281  else
10282  {
10283  // Already on previous page.
10284  break;
10285  }
10286  }
10287  }
10288 
10289  // All tests passed: Success.
10290  pAllocationRequest->offset = resultOffset;
10291  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10292  pAllocationRequest->sumItemSize = 0;
10293  // pAllocationRequest->item, customData unused.
10294  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10295  pAllocationRequest->itemsToMakeLostCount = 0;
10296  return true;
10297  }
10298  }
10299 
10300  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10301  // beginning of 1st vector as the end of free space.
10302  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10303  {
10304  VMA_ASSERT(!suballocations1st.empty());
10305 
10306  VkDeviceSize resultBaseOffset = 0;
10307  if(!suballocations2nd.empty())
10308  {
10309  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10310  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10311  }
10312 
10313  // Start from offset equal to beginning of free space.
10314  VkDeviceSize resultOffset = resultBaseOffset;
10315 
10316  // Apply VMA_DEBUG_MARGIN at the beginning.
10317  if(VMA_DEBUG_MARGIN > 0)
10318  {
10319  resultOffset += VMA_DEBUG_MARGIN;
10320  }
10321 
10322  // Apply alignment.
10323  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10324 
10325  // Check previous suballocations for BufferImageGranularity conflicts.
10326  // Make bigger alignment if necessary.
10327  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10328  {
10329  bool bufferImageGranularityConflict = false;
10330  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10331  {
10332  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10333  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10334  {
10335  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10336  {
10337  bufferImageGranularityConflict = true;
10338  break;
10339  }
10340  }
10341  else
10342  // Already on previous page.
10343  break;
10344  }
10345  if(bufferImageGranularityConflict)
10346  {
10347  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10348  }
10349  }
10350 
10351  pAllocationRequest->itemsToMakeLostCount = 0;
10352  pAllocationRequest->sumItemSize = 0;
10353  size_t index1st = m_1stNullItemsBeginCount;
10354 
10355  if(canMakeOtherLost)
10356  {
10357  while(index1st < suballocations1st.size() &&
10358  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10359  {
10360  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10361  const VmaSuballocation& suballoc = suballocations1st[index1st];
10362  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10363  {
10364  // No problem.
10365  }
10366  else
10367  {
10368  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10369  if(suballoc.hAllocation->CanBecomeLost() &&
10370  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10371  {
10372  ++pAllocationRequest->itemsToMakeLostCount;
10373  pAllocationRequest->sumItemSize += suballoc.size;
10374  }
10375  else
10376  {
10377  return false;
10378  }
10379  }
10380  ++index1st;
10381  }
10382 
10383  // Check next suballocations for BufferImageGranularity conflicts.
10384  // If conflict exists, we must mark more allocations lost or fail.
10385  if(bufferImageGranularity > 1)
10386  {
10387  while(index1st < suballocations1st.size())
10388  {
10389  const VmaSuballocation& suballoc = suballocations1st[index1st];
10390  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10391  {
10392  if(suballoc.hAllocation != VK_NULL_HANDLE)
10393  {
10394  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10395  if(suballoc.hAllocation->CanBecomeLost() &&
10396  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10397  {
10398  ++pAllocationRequest->itemsToMakeLostCount;
10399  pAllocationRequest->sumItemSize += suballoc.size;
10400  }
10401  else
10402  {
10403  return false;
10404  }
10405  }
10406  }
10407  else
10408  {
10409  // Already on next page.
10410  break;
10411  }
10412  ++index1st;
10413  }
10414  }
10415 
10416  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10417  if(index1st == suballocations1st.size() &&
10418  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10419  {
10420  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10421  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10422  }
10423  }
10424 
10425  // There is enough free space at the end after alignment.
10426  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10427  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10428  {
10429  // Check next suballocations for BufferImageGranularity conflicts.
10430  // If conflict exists, allocation cannot be made here.
10431  if(bufferImageGranularity > 1)
10432  {
10433  for(size_t nextSuballocIndex = index1st;
10434  nextSuballocIndex < suballocations1st.size();
10435  nextSuballocIndex++)
10436  {
10437  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10438  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10439  {
10440  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10441  {
10442  return false;
10443  }
10444  }
10445  else
10446  {
10447  // Already on next page.
10448  break;
10449  }
10450  }
10451  }
10452 
10453  // All tests passed: Success.
10454  pAllocationRequest->offset = resultOffset;
10455  pAllocationRequest->sumFreeSize =
10456  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10457  - resultBaseOffset
10458  - pAllocationRequest->sumItemSize;
10459  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10460  // pAllocationRequest->item, customData unused.
10461  return true;
10462  }
10463  }
10464 
10465  return false;
10466 }
10467 
10468 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10469  uint32_t currentFrameIndex,
10470  uint32_t frameInUseCount,
10471  VmaAllocationRequest* pAllocationRequest)
10472 {
10473  if(pAllocationRequest->itemsToMakeLostCount == 0)
10474  {
10475  return true;
10476  }
10477 
10478  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10479 
10480  // We always start from 1st.
10481  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10482  size_t index = m_1stNullItemsBeginCount;
10483  size_t madeLostCount = 0;
10484  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10485  {
10486  if(index == suballocations->size())
10487  {
10488  index = 0;
10489  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10490  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10491  {
10492  suballocations = &AccessSuballocations2nd();
10493  }
10494  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10495  // suballocations continues pointing at AccessSuballocations1st().
10496  VMA_ASSERT(!suballocations->empty());
10497  }
10498  VmaSuballocation& suballoc = (*suballocations)[index];
10499  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10500  {
10501  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10502  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10503  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10504  {
10505  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10506  suballoc.hAllocation = VK_NULL_HANDLE;
10507  m_SumFreeSize += suballoc.size;
10508  if(suballocations == &AccessSuballocations1st())
10509  {
10510  ++m_1stNullItemsMiddleCount;
10511  }
10512  else
10513  {
10514  ++m_2ndNullItemsCount;
10515  }
10516  ++madeLostCount;
10517  }
10518  else
10519  {
10520  return false;
10521  }
10522  }
10523  ++index;
10524  }
10525 
10526  CleanupAfterFree();
10527  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10528 
10529  return true;
10530 }
10531 
10532 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10533 {
10534  uint32_t lostAllocationCount = 0;
10535 
10536  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10537  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10538  {
10539  VmaSuballocation& suballoc = suballocations1st[i];
10540  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10541  suballoc.hAllocation->CanBecomeLost() &&
10542  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10543  {
10544  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10545  suballoc.hAllocation = VK_NULL_HANDLE;
10546  ++m_1stNullItemsMiddleCount;
10547  m_SumFreeSize += suballoc.size;
10548  ++lostAllocationCount;
10549  }
10550  }
10551 
10552  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10553  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10554  {
10555  VmaSuballocation& suballoc = suballocations2nd[i];
10556  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10557  suballoc.hAllocation->CanBecomeLost() &&
10558  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10559  {
10560  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10561  suballoc.hAllocation = VK_NULL_HANDLE;
10562  ++m_2ndNullItemsCount;
10563  m_SumFreeSize += suballoc.size;
10564  ++lostAllocationCount;
10565  }
10566  }
10567 
10568  if(lostAllocationCount)
10569  {
10570  CleanupAfterFree();
10571  }
10572 
10573  return lostAllocationCount;
10574 }
10575 
10576 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10577 {
10578  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10579  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10580  {
10581  const VmaSuballocation& suballoc = suballocations1st[i];
10582  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10583  {
10584  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10585  {
10586  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10587  return VK_ERROR_VALIDATION_FAILED_EXT;
10588  }
10589  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10590  {
10591  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10592  return VK_ERROR_VALIDATION_FAILED_EXT;
10593  }
10594  }
10595  }
10596 
10597  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10598  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10599  {
10600  const VmaSuballocation& suballoc = suballocations2nd[i];
10601  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10602  {
10603  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10604  {
10605  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10606  return VK_ERROR_VALIDATION_FAILED_EXT;
10607  }
10608  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10609  {
10610  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10611  return VK_ERROR_VALIDATION_FAILED_EXT;
10612  }
10613  }
10614  }
10615 
10616  return VK_SUCCESS;
10617 }
10618 
10619 void VmaBlockMetadata_Linear::Alloc(
10620  const VmaAllocationRequest& request,
10621  VmaSuballocationType type,
10622  VkDeviceSize allocSize,
10623  VmaAllocation hAllocation)
10624 {
10625  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10626 
10627  switch(request.type)
10628  {
10629  case VmaAllocationRequestType::UpperAddress:
10630  {
10631  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10632  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10633  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10634  suballocations2nd.push_back(newSuballoc);
10635  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10636  }
10637  break;
10638  case VmaAllocationRequestType::EndOf1st:
10639  {
10640  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10641 
10642  VMA_ASSERT(suballocations1st.empty() ||
10643  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10644  // Check if it fits before the end of the block.
10645  VMA_ASSERT(request.offset + allocSize <= GetSize());
10646 
10647  suballocations1st.push_back(newSuballoc);
10648  }
10649  break;
10650  case VmaAllocationRequestType::EndOf2nd:
10651  {
10652  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10653  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10654  VMA_ASSERT(!suballocations1st.empty() &&
10655  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10656  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10657 
10658  switch(m_2ndVectorMode)
10659  {
10660  case SECOND_VECTOR_EMPTY:
10661  // First allocation from second part ring buffer.
10662  VMA_ASSERT(suballocations2nd.empty());
10663  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10664  break;
10665  case SECOND_VECTOR_RING_BUFFER:
10666  // 2-part ring buffer is already started.
10667  VMA_ASSERT(!suballocations2nd.empty());
10668  break;
10669  case SECOND_VECTOR_DOUBLE_STACK:
10670  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10671  break;
10672  default:
10673  VMA_ASSERT(0);
10674  }
10675 
10676  suballocations2nd.push_back(newSuballoc);
10677  }
10678  break;
10679  default:
10680  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10681  }
10682 
10683  m_SumFreeSize -= newSuballoc.size;
10684 }
10685 
10686 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10687 {
10688  FreeAtOffset(allocation->GetOffset());
10689 }
10690 
10691 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10692 {
10693  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10694  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10695 
10696  if(!suballocations1st.empty())
10697  {
10698  // First allocation: Mark it as next empty at the beginning.
10699  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10700  if(firstSuballoc.offset == offset)
10701  {
10702  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10703  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10704  m_SumFreeSize += firstSuballoc.size;
10705  ++m_1stNullItemsBeginCount;
10706  CleanupAfterFree();
10707  return;
10708  }
10709  }
10710 
10711  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10712  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10713  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10714  {
10715  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10716  if(lastSuballoc.offset == offset)
10717  {
10718  m_SumFreeSize += lastSuballoc.size;
10719  suballocations2nd.pop_back();
10720  CleanupAfterFree();
10721  return;
10722  }
10723  }
10724  // Last allocation in 1st vector.
10725  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10726  {
10727  VmaSuballocation& lastSuballoc = suballocations1st.back();
10728  if(lastSuballoc.offset == offset)
10729  {
10730  m_SumFreeSize += lastSuballoc.size;
10731  suballocations1st.pop_back();
10732  CleanupAfterFree();
10733  return;
10734  }
10735  }
10736 
10737  // Item from the middle of 1st vector.
10738  {
10739  VmaSuballocation refSuballoc;
10740  refSuballoc.offset = offset;
10741  // Rest of members stays uninitialized intentionally for better performance.
10742  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10743  suballocations1st.begin() + m_1stNullItemsBeginCount,
10744  suballocations1st.end(),
10745  refSuballoc,
10746  VmaSuballocationOffsetLess());
10747  if(it != suballocations1st.end())
10748  {
10749  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10750  it->hAllocation = VK_NULL_HANDLE;
10751  ++m_1stNullItemsMiddleCount;
10752  m_SumFreeSize += it->size;
10753  CleanupAfterFree();
10754  return;
10755  }
10756  }
10757 
10758  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10759  {
10760  // Item from the middle of 2nd vector.
10761  VmaSuballocation refSuballoc;
10762  refSuballoc.offset = offset;
10763  // Rest of members stays uninitialized intentionally for better performance.
10764  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10765  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10766  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10767  if(it != suballocations2nd.end())
10768  {
10769  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10770  it->hAllocation = VK_NULL_HANDLE;
10771  ++m_2ndNullItemsCount;
10772  m_SumFreeSize += it->size;
10773  CleanupAfterFree();
10774  return;
10775  }
10776  }
10777 
10778  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10779 }
10780 
10781 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10782 {
10783  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10784  const size_t suballocCount = AccessSuballocations1st().size();
10785  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10786 }
10787 
10788 void VmaBlockMetadata_Linear::CleanupAfterFree()
10789 {
10790  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10791  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10792 
10793  if(IsEmpty())
10794  {
10795  suballocations1st.clear();
10796  suballocations2nd.clear();
10797  m_1stNullItemsBeginCount = 0;
10798  m_1stNullItemsMiddleCount = 0;
10799  m_2ndNullItemsCount = 0;
10800  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10801  }
10802  else
10803  {
10804  const size_t suballoc1stCount = suballocations1st.size();
10805  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10806  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10807 
10808  // Find more null items at the beginning of 1st vector.
10809  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10810  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10811  {
10812  ++m_1stNullItemsBeginCount;
10813  --m_1stNullItemsMiddleCount;
10814  }
10815 
10816  // Find more null items at the end of 1st vector.
10817  while(m_1stNullItemsMiddleCount > 0 &&
10818  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10819  {
10820  --m_1stNullItemsMiddleCount;
10821  suballocations1st.pop_back();
10822  }
10823 
10824  // Find more null items at the end of 2nd vector.
10825  while(m_2ndNullItemsCount > 0 &&
10826  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10827  {
10828  --m_2ndNullItemsCount;
10829  suballocations2nd.pop_back();
10830  }
10831 
10832  // Find more null items at the beginning of 2nd vector.
10833  while(m_2ndNullItemsCount > 0 &&
10834  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10835  {
10836  --m_2ndNullItemsCount;
10837  VmaVectorRemove(suballocations2nd, 0);
10838  }
10839 
10840  if(ShouldCompact1st())
10841  {
10842  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10843  size_t srcIndex = m_1stNullItemsBeginCount;
10844  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10845  {
10846  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10847  {
10848  ++srcIndex;
10849  }
10850  if(dstIndex != srcIndex)
10851  {
10852  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10853  }
10854  ++srcIndex;
10855  }
10856  suballocations1st.resize(nonNullItemCount);
10857  m_1stNullItemsBeginCount = 0;
10858  m_1stNullItemsMiddleCount = 0;
10859  }
10860 
10861  // 2nd vector became empty.
10862  if(suballocations2nd.empty())
10863  {
10864  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10865  }
10866 
10867  // 1st vector became empty.
10868  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10869  {
10870  suballocations1st.clear();
10871  m_1stNullItemsBeginCount = 0;
10872 
10873  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10874  {
10875  // Swap 1st with 2nd. Now 2nd is empty.
10876  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10877  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10878  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10879  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10880  {
10881  ++m_1stNullItemsBeginCount;
10882  --m_1stNullItemsMiddleCount;
10883  }
10884  m_2ndNullItemsCount = 0;
10885  m_1stVectorIndex ^= 1;
10886  }
10887  }
10888  }
10889 
10890  VMA_HEAVY_ASSERT(Validate());
10891 }
10892 
10893 
10895 // class VmaBlockMetadata_Buddy
10896 
10897 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10898  VmaBlockMetadata(hAllocator),
10899  m_Root(VMA_NULL),
10900  m_AllocationCount(0),
10901  m_FreeCount(1),
10902  m_SumFreeSize(0)
10903 {
10904  memset(m_FreeList, 0, sizeof(m_FreeList));
10905 }
10906 
10907 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10908 {
10909  DeleteNode(m_Root);
10910 }
10911 
10912 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10913 {
10914  VmaBlockMetadata::Init(size);
10915 
10916  m_UsableSize = VmaPrevPow2(size);
10917  m_SumFreeSize = m_UsableSize;
10918 
10919  // Calculate m_LevelCount.
10920  m_LevelCount = 1;
10921  while(m_LevelCount < MAX_LEVELS &&
10922  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10923  {
10924  ++m_LevelCount;
10925  }
10926 
10927  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10928  rootNode->offset = 0;
10929  rootNode->type = Node::TYPE_FREE;
10930  rootNode->parent = VMA_NULL;
10931  rootNode->buddy = VMA_NULL;
10932 
10933  m_Root = rootNode;
10934  AddToFreeListFront(0, rootNode);
10935 }
10936 
10937 bool VmaBlockMetadata_Buddy::Validate() const
10938 {
10939  // Validate tree.
10940  ValidationContext ctx;
10941  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10942  {
10943  VMA_VALIDATE(false && "ValidateNode failed.");
10944  }
10945  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10946  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10947 
10948  // Validate free node lists.
10949  for(uint32_t level = 0; level < m_LevelCount; ++level)
10950  {
10951  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10952  m_FreeList[level].front->free.prev == VMA_NULL);
10953 
10954  for(Node* node = m_FreeList[level].front;
10955  node != VMA_NULL;
10956  node = node->free.next)
10957  {
10958  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10959 
10960  if(node->free.next == VMA_NULL)
10961  {
10962  VMA_VALIDATE(m_FreeList[level].back == node);
10963  }
10964  else
10965  {
10966  VMA_VALIDATE(node->free.next->free.prev == node);
10967  }
10968  }
10969  }
10970 
10971  // Validate that free lists ar higher levels are empty.
10972  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10973  {
10974  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10975  }
10976 
10977  return true;
10978 }
10979 
10980 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10981 {
10982  for(uint32_t level = 0; level < m_LevelCount; ++level)
10983  {
10984  if(m_FreeList[level].front != VMA_NULL)
10985  {
10986  return LevelToNodeSize(level);
10987  }
10988  }
10989  return 0;
10990 }
10991 
10992 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10993 {
10994  const VkDeviceSize unusableSize = GetUnusableSize();
10995 
10996  outInfo.blockCount = 1;
10997 
10998  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10999  outInfo.usedBytes = outInfo.unusedBytes = 0;
11000 
11001  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11002  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11003  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11004 
11005  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11006 
11007  if(unusableSize > 0)
11008  {
11009  ++outInfo.unusedRangeCount;
11010  outInfo.unusedBytes += unusableSize;
11011  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11012  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11013  }
11014 }
11015 
11016 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11017 {
11018  const VkDeviceSize unusableSize = GetUnusableSize();
11019 
11020  inoutStats.size += GetSize();
11021  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11022  inoutStats.allocationCount += m_AllocationCount;
11023  inoutStats.unusedRangeCount += m_FreeCount;
11024  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11025 
11026  if(unusableSize > 0)
11027  {
11028  ++inoutStats.unusedRangeCount;
11029  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11030  }
11031 }
11032 
11033 #if VMA_STATS_STRING_ENABLED
11034 
11035 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11036 {
11037  // TODO optimize
11038  VmaStatInfo stat;
11039  CalcAllocationStatInfo(stat);
11040 
11041  PrintDetailedMap_Begin(
11042  json,
11043  stat.unusedBytes,
11044  stat.allocationCount,
11045  stat.unusedRangeCount);
11046 
11047  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11048 
11049  const VkDeviceSize unusableSize = GetUnusableSize();
11050  if(unusableSize > 0)
11051  {
11052  PrintDetailedMap_UnusedRange(json,
11053  m_UsableSize, // offset
11054  unusableSize); // size
11055  }
11056 
11057  PrintDetailedMap_End(json);
11058 }
11059 
11060 #endif // #if VMA_STATS_STRING_ENABLED
11061 
11062 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11063  uint32_t currentFrameIndex,
11064  uint32_t frameInUseCount,
11065  VkDeviceSize bufferImageGranularity,
11066  VkDeviceSize allocSize,
11067  VkDeviceSize allocAlignment,
11068  bool upperAddress,
11069  VmaSuballocationType allocType,
11070  bool canMakeOtherLost,
11071  uint32_t strategy,
11072  VmaAllocationRequest* pAllocationRequest)
11073 {
11074  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11075 
11076  // Simple way to respect bufferImageGranularity. May be optimized some day.
11077  // Whenever it might be an OPTIMAL image...
11078  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11079  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11080  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11081  {
11082  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11083  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11084  }
11085 
11086  if(allocSize > m_UsableSize)
11087  {
11088  return false;
11089  }
11090 
11091  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11092  for(uint32_t level = targetLevel + 1; level--; )
11093  {
11094  for(Node* freeNode = m_FreeList[level].front;
11095  freeNode != VMA_NULL;
11096  freeNode = freeNode->free.next)
11097  {
11098  if(freeNode->offset % allocAlignment == 0)
11099  {
11100  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11101  pAllocationRequest->offset = freeNode->offset;
11102  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11103  pAllocationRequest->sumItemSize = 0;
11104  pAllocationRequest->itemsToMakeLostCount = 0;
11105  pAllocationRequest->customData = (void*)(uintptr_t)level;
11106  return true;
11107  }
11108  }
11109  }
11110 
11111  return false;
11112 }
11113 
11114 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11115  uint32_t currentFrameIndex,
11116  uint32_t frameInUseCount,
11117  VmaAllocationRequest* pAllocationRequest)
11118 {
11119  /*
11120  Lost allocations are not supported in buddy allocator at the moment.
11121  Support might be added in the future.
11122  */
11123  return pAllocationRequest->itemsToMakeLostCount == 0;
11124 }
11125 
11126 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11127 {
11128  /*
11129  Lost allocations are not supported in buddy allocator at the moment.
11130  Support might be added in the future.
11131  */
11132  return 0;
11133 }
11134 
11135 void VmaBlockMetadata_Buddy::Alloc(
11136  const VmaAllocationRequest& request,
11137  VmaSuballocationType type,
11138  VkDeviceSize allocSize,
11139  VmaAllocation hAllocation)
11140 {
11141  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11142 
11143  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11144  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11145 
11146  Node* currNode = m_FreeList[currLevel].front;
11147  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11148  while(currNode->offset != request.offset)
11149  {
11150  currNode = currNode->free.next;
11151  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11152  }
11153 
11154  // Go down, splitting free nodes.
11155  while(currLevel < targetLevel)
11156  {
11157  // currNode is already first free node at currLevel.
11158  // Remove it from list of free nodes at this currLevel.
11159  RemoveFromFreeList(currLevel, currNode);
11160 
11161  const uint32_t childrenLevel = currLevel + 1;
11162 
11163  // Create two free sub-nodes.
11164  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11165  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11166 
11167  leftChild->offset = currNode->offset;
11168  leftChild->type = Node::TYPE_FREE;
11169  leftChild->parent = currNode;
11170  leftChild->buddy = rightChild;
11171 
11172  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11173  rightChild->type = Node::TYPE_FREE;
11174  rightChild->parent = currNode;
11175  rightChild->buddy = leftChild;
11176 
11177  // Convert current currNode to split type.
11178  currNode->type = Node::TYPE_SPLIT;
11179  currNode->split.leftChild = leftChild;
11180 
11181  // Add child nodes to free list. Order is important!
11182  AddToFreeListFront(childrenLevel, rightChild);
11183  AddToFreeListFront(childrenLevel, leftChild);
11184 
11185  ++m_FreeCount;
11186  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11187  ++currLevel;
11188  currNode = m_FreeList[currLevel].front;
11189 
11190  /*
11191  We can be sure that currNode, as left child of node previously split,
11192  also fullfills the alignment requirement.
11193  */
11194  }
11195 
11196  // Remove from free list.
11197  VMA_ASSERT(currLevel == targetLevel &&
11198  currNode != VMA_NULL &&
11199  currNode->type == Node::TYPE_FREE);
11200  RemoveFromFreeList(currLevel, currNode);
11201 
11202  // Convert to allocation node.
11203  currNode->type = Node::TYPE_ALLOCATION;
11204  currNode->allocation.alloc = hAllocation;
11205 
11206  ++m_AllocationCount;
11207  --m_FreeCount;
11208  m_SumFreeSize -= allocSize;
11209 }
11210 
11211 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11212 {
11213  if(node->type == Node::TYPE_SPLIT)
11214  {
11215  DeleteNode(node->split.leftChild->buddy);
11216  DeleteNode(node->split.leftChild);
11217  }
11218 
11219  vma_delete(GetAllocationCallbacks(), node);
11220 }
11221 
11222 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11223 {
11224  VMA_VALIDATE(level < m_LevelCount);
11225  VMA_VALIDATE(curr->parent == parent);
11226  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11227  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11228  switch(curr->type)
11229  {
11230  case Node::TYPE_FREE:
11231  // curr->free.prev, next are validated separately.
11232  ctx.calculatedSumFreeSize += levelNodeSize;
11233  ++ctx.calculatedFreeCount;
11234  break;
11235  case Node::TYPE_ALLOCATION:
11236  ++ctx.calculatedAllocationCount;
11237  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11238  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11239  break;
11240  case Node::TYPE_SPLIT:
11241  {
11242  const uint32_t childrenLevel = level + 1;
11243  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11244  const Node* const leftChild = curr->split.leftChild;
11245  VMA_VALIDATE(leftChild != VMA_NULL);
11246  VMA_VALIDATE(leftChild->offset == curr->offset);
11247  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11248  {
11249  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11250  }
11251  const Node* const rightChild = leftChild->buddy;
11252  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11253  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11254  {
11255  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11256  }
11257  }
11258  break;
11259  default:
11260  return false;
11261  }
11262 
11263  return true;
11264 }
11265 
11266 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11267 {
11268  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11269  uint32_t level = 0;
11270  VkDeviceSize currLevelNodeSize = m_UsableSize;
11271  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11272  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11273  {
11274  ++level;
11275  currLevelNodeSize = nextLevelNodeSize;
11276  nextLevelNodeSize = currLevelNodeSize >> 1;
11277  }
11278  return level;
11279 }
11280 
11281 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11282 {
11283  // Find node and level.
11284  Node* node = m_Root;
11285  VkDeviceSize nodeOffset = 0;
11286  uint32_t level = 0;
11287  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11288  while(node->type == Node::TYPE_SPLIT)
11289  {
11290  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11291  if(offset < nodeOffset + nextLevelSize)
11292  {
11293  node = node->split.leftChild;
11294  }
11295  else
11296  {
11297  node = node->split.leftChild->buddy;
11298  nodeOffset += nextLevelSize;
11299  }
11300  ++level;
11301  levelNodeSize = nextLevelSize;
11302  }
11303 
11304  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11305  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11306 
11307  ++m_FreeCount;
11308  --m_AllocationCount;
11309  m_SumFreeSize += alloc->GetSize();
11310 
11311  node->type = Node::TYPE_FREE;
11312 
11313  // Join free nodes if possible.
11314  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11315  {
11316  RemoveFromFreeList(level, node->buddy);
11317  Node* const parent = node->parent;
11318 
11319  vma_delete(GetAllocationCallbacks(), node->buddy);
11320  vma_delete(GetAllocationCallbacks(), node);
11321  parent->type = Node::TYPE_FREE;
11322 
11323  node = parent;
11324  --level;
11325  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11326  --m_FreeCount;
11327  }
11328 
11329  AddToFreeListFront(level, node);
11330 }
11331 
11332 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11333 {
11334  switch(node->type)
11335  {
11336  case Node::TYPE_FREE:
11337  ++outInfo.unusedRangeCount;
11338  outInfo.unusedBytes += levelNodeSize;
11339  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11340  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11341  break;
11342  case Node::TYPE_ALLOCATION:
11343  {
11344  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11345  ++outInfo.allocationCount;
11346  outInfo.usedBytes += allocSize;
11347  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11348  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11349 
11350  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11351  if(unusedRangeSize > 0)
11352  {
11353  ++outInfo.unusedRangeCount;
11354  outInfo.unusedBytes += unusedRangeSize;
11355  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11356  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11357  }
11358  }
11359  break;
11360  case Node::TYPE_SPLIT:
11361  {
11362  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11363  const Node* const leftChild = node->split.leftChild;
11364  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11365  const Node* const rightChild = leftChild->buddy;
11366  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11367  }
11368  break;
11369  default:
11370  VMA_ASSERT(0);
11371  }
11372 }
11373 
11374 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11375 {
11376  VMA_ASSERT(node->type == Node::TYPE_FREE);
11377 
11378  // List is empty.
11379  Node* const frontNode = m_FreeList[level].front;
11380  if(frontNode == VMA_NULL)
11381  {
11382  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11383  node->free.prev = node->free.next = VMA_NULL;
11384  m_FreeList[level].front = m_FreeList[level].back = node;
11385  }
11386  else
11387  {
11388  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11389  node->free.prev = VMA_NULL;
11390  node->free.next = frontNode;
11391  frontNode->free.prev = node;
11392  m_FreeList[level].front = node;
11393  }
11394 }
11395 
11396 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11397 {
11398  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11399 
11400  // It is at the front.
11401  if(node->free.prev == VMA_NULL)
11402  {
11403  VMA_ASSERT(m_FreeList[level].front == node);
11404  m_FreeList[level].front = node->free.next;
11405  }
11406  else
11407  {
11408  Node* const prevFreeNode = node->free.prev;
11409  VMA_ASSERT(prevFreeNode->free.next == node);
11410  prevFreeNode->free.next = node->free.next;
11411  }
11412 
11413  // It is at the back.
11414  if(node->free.next == VMA_NULL)
11415  {
11416  VMA_ASSERT(m_FreeList[level].back == node);
11417  m_FreeList[level].back = node->free.prev;
11418  }
11419  else
11420  {
11421  Node* const nextFreeNode = node->free.next;
11422  VMA_ASSERT(nextFreeNode->free.prev == node);
11423  nextFreeNode->free.prev = node->free.prev;
11424  }
11425 }
11426 
11427 #if VMA_STATS_STRING_ENABLED
11428 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11429 {
11430  switch(node->type)
11431  {
11432  case Node::TYPE_FREE:
11433  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11434  break;
11435  case Node::TYPE_ALLOCATION:
11436  {
11437  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11438  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11439  if(allocSize < levelNodeSize)
11440  {
11441  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11442  }
11443  }
11444  break;
11445  case Node::TYPE_SPLIT:
11446  {
11447  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11448  const Node* const leftChild = node->split.leftChild;
11449  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11450  const Node* const rightChild = leftChild->buddy;
11451  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11452  }
11453  break;
11454  default:
11455  VMA_ASSERT(0);
11456  }
11457 }
11458 #endif // #if VMA_STATS_STRING_ENABLED
11459 
11460 
11462 // class VmaDeviceMemoryBlock
11463 
11464 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11465  m_pMetadata(VMA_NULL),
11466  m_MemoryTypeIndex(UINT32_MAX),
11467  m_Id(0),
11468  m_hMemory(VK_NULL_HANDLE),
11469  m_MapCount(0),
11470  m_pMappedData(VMA_NULL)
11471 {
11472 }
11473 
11474 void VmaDeviceMemoryBlock::Init(
11475  VmaAllocator hAllocator,
11476  VmaPool hParentPool,
11477  uint32_t newMemoryTypeIndex,
11478  VkDeviceMemory newMemory,
11479  VkDeviceSize newSize,
11480  uint32_t id,
11481  uint32_t algorithm)
11482 {
11483  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11484 
11485  m_hParentPool = hParentPool;
11486  m_MemoryTypeIndex = newMemoryTypeIndex;
11487  m_Id = id;
11488  m_hMemory = newMemory;
11489 
11490  switch(algorithm)
11491  {
11493  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11494  break;
11496  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11497  break;
11498  default:
11499  VMA_ASSERT(0);
11500  // Fall-through.
11501  case 0:
11502  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11503  }
11504  m_pMetadata->Init(newSize);
11505 }
11506 
11507 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11508 {
11509  // This is the most important assert in the entire library.
11510  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11511  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11512 
11513  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11514  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11515  m_hMemory = VK_NULL_HANDLE;
11516 
11517  vma_delete(allocator, m_pMetadata);
11518  m_pMetadata = VMA_NULL;
11519 }
11520 
11521 bool VmaDeviceMemoryBlock::Validate() const
11522 {
11523  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11524  (m_pMetadata->GetSize() != 0));
11525 
11526  return m_pMetadata->Validate();
11527 }
11528 
11529 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11530 {
11531  void* pData = nullptr;
11532  VkResult res = Map(hAllocator, 1, &pData);
11533  if(res != VK_SUCCESS)
11534  {
11535  return res;
11536  }
11537 
11538  res = m_pMetadata->CheckCorruption(pData);
11539 
11540  Unmap(hAllocator, 1);
11541 
11542  return res;
11543 }
11544 
11545 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11546 {
11547  if(count == 0)
11548  {
11549  return VK_SUCCESS;
11550  }
11551 
11552  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11553  if(m_MapCount != 0)
11554  {
11555  m_MapCount += count;
11556  VMA_ASSERT(m_pMappedData != VMA_NULL);
11557  if(ppData != VMA_NULL)
11558  {
11559  *ppData = m_pMappedData;
11560  }
11561  return VK_SUCCESS;
11562  }
11563  else
11564  {
11565  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11566  hAllocator->m_hDevice,
11567  m_hMemory,
11568  0, // offset
11569  VK_WHOLE_SIZE,
11570  0, // flags
11571  &m_pMappedData);
11572  if(result == VK_SUCCESS)
11573  {
11574  if(ppData != VMA_NULL)
11575  {
11576  *ppData = m_pMappedData;
11577  }
11578  m_MapCount = count;
11579  }
11580  return result;
11581  }
11582 }
11583 
11584 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11585 {
11586  if(count == 0)
11587  {
11588  return;
11589  }
11590 
11591  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11592  if(m_MapCount >= count)
11593  {
11594  m_MapCount -= count;
11595  if(m_MapCount == 0)
11596  {
11597  m_pMappedData = VMA_NULL;
11598  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11599  }
11600  }
11601  else
11602  {
11603  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11604  }
11605 }
11606 
11607 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11608 {
11609  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11610  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11611 
11612  void* pData;
11613  VkResult res = Map(hAllocator, 1, &pData);
11614  if(res != VK_SUCCESS)
11615  {
11616  return res;
11617  }
11618 
11619  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11620  VmaWriteMagicValue(pData, allocOffset + allocSize);
11621 
11622  Unmap(hAllocator, 1);
11623 
11624  return VK_SUCCESS;
11625 }
11626 
11627 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11628 {
11629  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11630  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11631 
11632  void* pData;
11633  VkResult res = Map(hAllocator, 1, &pData);
11634  if(res != VK_SUCCESS)
11635  {
11636  return res;
11637  }
11638 
11639  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11640  {
11641  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11642  }
11643  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11644  {
11645  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11646  }
11647 
11648  Unmap(hAllocator, 1);
11649 
11650  return VK_SUCCESS;
11651 }
11652 
11653 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11654  const VmaAllocator hAllocator,
11655  const VmaAllocation hAllocation,
11656  VkDeviceSize allocationLocalOffset,
11657  VkBuffer hBuffer,
11658  const void* pNext)
11659 {
11660  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11661  hAllocation->GetBlock() == this);
11662  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11663  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11664  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11665  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11666  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11667  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11668 }
11669 
11670 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11671  const VmaAllocator hAllocator,
11672  const VmaAllocation hAllocation,
11673  VkDeviceSize allocationLocalOffset,
11674  VkImage hImage,
11675  const void* pNext)
11676 {
11677  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11678  hAllocation->GetBlock() == this);
11679  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11680  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11681  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11682  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11683  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11684  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11685 }
11686 
11687 static void InitStatInfo(VmaStatInfo& outInfo)
11688 {
11689  memset(&outInfo, 0, sizeof(outInfo));
11690  outInfo.allocationSizeMin = UINT64_MAX;
11691  outInfo.unusedRangeSizeMin = UINT64_MAX;
11692 }
11693 
11694 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11695 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11696 {
11697  inoutInfo.blockCount += srcInfo.blockCount;
11698  inoutInfo.allocationCount += srcInfo.allocationCount;
11699  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11700  inoutInfo.usedBytes += srcInfo.usedBytes;
11701  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11702  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11703  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11704  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11705  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11706 }
11707 
11708 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11709 {
11710  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11711  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11712  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11713  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11714 }
11715 
11716 VmaPool_T::VmaPool_T(
11717  VmaAllocator hAllocator,
11718  const VmaPoolCreateInfo& createInfo,
11719  VkDeviceSize preferredBlockSize) :
11720  m_BlockVector(
11721  hAllocator,
11722  this, // hParentPool
11723  createInfo.memoryTypeIndex,
11724  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11725  createInfo.minBlockCount,
11726  createInfo.maxBlockCount,
11727  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11728  createInfo.frameInUseCount,
11729  createInfo.blockSize != 0, // explicitBlockSize
11730  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11731  m_Id(0),
11732  m_Name(VMA_NULL)
11733 {
11734 }
11735 
11736 VmaPool_T::~VmaPool_T()
11737 {
11738 }
11739 
11740 void VmaPool_T::SetName(const char* pName)
11741 {
11742  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
11743  VmaFreeString(allocs, m_Name);
11744 
11745  if(pName != VMA_NULL)
11746  {
11747  m_Name = VmaCreateStringCopy(allocs, pName);
11748  }
11749  else
11750  {
11751  m_Name = VMA_NULL;
11752  }
11753 }
11754 
11755 #if VMA_STATS_STRING_ENABLED
11756 
11757 #endif // #if VMA_STATS_STRING_ENABLED
11758 
11759 VmaBlockVector::VmaBlockVector(
11760  VmaAllocator hAllocator,
11761  VmaPool hParentPool,
11762  uint32_t memoryTypeIndex,
11763  VkDeviceSize preferredBlockSize,
11764  size_t minBlockCount,
11765  size_t maxBlockCount,
11766  VkDeviceSize bufferImageGranularity,
11767  uint32_t frameInUseCount,
11768  bool explicitBlockSize,
11769  uint32_t algorithm) :
11770  m_hAllocator(hAllocator),
11771  m_hParentPool(hParentPool),
11772  m_MemoryTypeIndex(memoryTypeIndex),
11773  m_PreferredBlockSize(preferredBlockSize),
11774  m_MinBlockCount(minBlockCount),
11775  m_MaxBlockCount(maxBlockCount),
11776  m_BufferImageGranularity(bufferImageGranularity),
11777  m_FrameInUseCount(frameInUseCount),
11778  m_ExplicitBlockSize(explicitBlockSize),
11779  m_Algorithm(algorithm),
11780  m_HasEmptyBlock(false),
11781  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11782  m_NextBlockId(0)
11783 {
11784 }
11785 
11786 VmaBlockVector::~VmaBlockVector()
11787 {
11788  for(size_t i = m_Blocks.size(); i--; )
11789  {
11790  m_Blocks[i]->Destroy(m_hAllocator);
11791  vma_delete(m_hAllocator, m_Blocks[i]);
11792  }
11793 }
11794 
11795 VkResult VmaBlockVector::CreateMinBlocks()
11796 {
11797  for(size_t i = 0; i < m_MinBlockCount; ++i)
11798  {
11799  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11800  if(res != VK_SUCCESS)
11801  {
11802  return res;
11803  }
11804  }
11805  return VK_SUCCESS;
11806 }
11807 
11808 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11809 {
11810  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11811 
11812  const size_t blockCount = m_Blocks.size();
11813 
11814  pStats->size = 0;
11815  pStats->unusedSize = 0;
11816  pStats->allocationCount = 0;
11817  pStats->unusedRangeCount = 0;
11818  pStats->unusedRangeSizeMax = 0;
11819  pStats->blockCount = blockCount;
11820 
11821  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11822  {
11823  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11824  VMA_ASSERT(pBlock);
11825  VMA_HEAVY_ASSERT(pBlock->Validate());
11826  pBlock->m_pMetadata->AddPoolStats(*pStats);
11827  }
11828 }
11829 
11830 bool VmaBlockVector::IsEmpty()
11831 {
11832  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11833  return m_Blocks.empty();
11834 }
11835 
11836 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11837 {
11838  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11839  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11840  (VMA_DEBUG_MARGIN > 0) &&
11841  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11842  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11843 }
11844 
11845 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11846 
11847 VkResult VmaBlockVector::Allocate(
11848  uint32_t currentFrameIndex,
11849  VkDeviceSize size,
11850  VkDeviceSize alignment,
11851  const VmaAllocationCreateInfo& createInfo,
11852  VmaSuballocationType suballocType,
11853  size_t allocationCount,
11854  VmaAllocation* pAllocations)
11855 {
11856  size_t allocIndex;
11857  VkResult res = VK_SUCCESS;
11858 
11859  if(IsCorruptionDetectionEnabled())
11860  {
11861  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11862  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11863  }
11864 
11865  {
11866  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11867  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11868  {
11869  res = AllocatePage(
11870  currentFrameIndex,
11871  size,
11872  alignment,
11873  createInfo,
11874  suballocType,
11875  pAllocations + allocIndex);
11876  if(res != VK_SUCCESS)
11877  {
11878  break;
11879  }
11880  }
11881  }
11882 
11883  if(res != VK_SUCCESS)
11884  {
11885  // Free all already created allocations.
11886  while(allocIndex--)
11887  {
11888  Free(pAllocations[allocIndex]);
11889  }
11890  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11891  }
11892 
11893  return res;
11894 }
11895 
11896 VkResult VmaBlockVector::AllocatePage(
11897  uint32_t currentFrameIndex,
11898  VkDeviceSize size,
11899  VkDeviceSize alignment,
11900  const VmaAllocationCreateInfo& createInfo,
11901  VmaSuballocationType suballocType,
11902  VmaAllocation* pAllocation)
11903 {
11904  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11905  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11906  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11907  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11908 
11909  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
11910  VkDeviceSize freeMemory;
11911  {
11912  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
11913  VmaBudget heapBudget = {};
11914  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
11915  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
11916  }
11917 
11918  const bool canFallbackToDedicated = !IsCustomPool();
11919  const bool canCreateNewBlock =
11920  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11921  (m_Blocks.size() < m_MaxBlockCount) &&
11922  (freeMemory >= size || !canFallbackToDedicated);
11923  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11924 
11925  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11926  // Which in turn is available only when maxBlockCount = 1.
11927  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11928  {
11929  canMakeOtherLost = false;
11930  }
11931 
11932  // Upper address can only be used with linear allocator and within single memory block.
11933  if(isUpperAddress &&
11934  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11935  {
11936  return VK_ERROR_FEATURE_NOT_PRESENT;
11937  }
11938 
11939  // Validate strategy.
11940  switch(strategy)
11941  {
11942  case 0:
11944  break;
11948  break;
11949  default:
11950  return VK_ERROR_FEATURE_NOT_PRESENT;
11951  }
11952 
11953  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11954  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11955  {
11956  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11957  }
11958 
11959  /*
11960  Under certain condition, this whole section can be skipped for optimization, so
11961  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11962  e.g. for custom pools with linear algorithm.
11963  */
11964  if(!canMakeOtherLost || canCreateNewBlock)
11965  {
11966  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11967  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11969 
11970  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11971  {
11972  // Use only last block.
11973  if(!m_Blocks.empty())
11974  {
11975  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11976  VMA_ASSERT(pCurrBlock);
11977  VkResult res = AllocateFromBlock(
11978  pCurrBlock,
11979  currentFrameIndex,
11980  size,
11981  alignment,
11982  allocFlagsCopy,
11983  createInfo.pUserData,
11984  suballocType,
11985  strategy,
11986  pAllocation);
11987  if(res == VK_SUCCESS)
11988  {
11989  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
11990  return VK_SUCCESS;
11991  }
11992  }
11993  }
11994  else
11995  {
11997  {
11998  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11999  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12000  {
12001  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12002  VMA_ASSERT(pCurrBlock);
12003  VkResult res = AllocateFromBlock(
12004  pCurrBlock,
12005  currentFrameIndex,
12006  size,
12007  alignment,
12008  allocFlagsCopy,
12009  createInfo.pUserData,
12010  suballocType,
12011  strategy,
12012  pAllocation);
12013  if(res == VK_SUCCESS)
12014  {
12015  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12016  return VK_SUCCESS;
12017  }
12018  }
12019  }
12020  else // WORST_FIT, FIRST_FIT
12021  {
12022  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12023  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12024  {
12025  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12026  VMA_ASSERT(pCurrBlock);
12027  VkResult res = AllocateFromBlock(
12028  pCurrBlock,
12029  currentFrameIndex,
12030  size,
12031  alignment,
12032  allocFlagsCopy,
12033  createInfo.pUserData,
12034  suballocType,
12035  strategy,
12036  pAllocation);
12037  if(res == VK_SUCCESS)
12038  {
12039  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12040  return VK_SUCCESS;
12041  }
12042  }
12043  }
12044  }
12045 
12046  // 2. Try to create new block.
12047  if(canCreateNewBlock)
12048  {
12049  // Calculate optimal size for new block.
12050  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12051  uint32_t newBlockSizeShift = 0;
12052  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12053 
12054  if(!m_ExplicitBlockSize)
12055  {
12056  // Allocate 1/8, 1/4, 1/2 as first blocks.
12057  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12058  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12059  {
12060  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12061  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12062  {
12063  newBlockSize = smallerNewBlockSize;
12064  ++newBlockSizeShift;
12065  }
12066  else
12067  {
12068  break;
12069  }
12070  }
12071  }
12072 
12073  size_t newBlockIndex = 0;
12074  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12075  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12076  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12077  if(!m_ExplicitBlockSize)
12078  {
12079  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12080  {
12081  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12082  if(smallerNewBlockSize >= size)
12083  {
12084  newBlockSize = smallerNewBlockSize;
12085  ++newBlockSizeShift;
12086  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12087  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12088  }
12089  else
12090  {
12091  break;
12092  }
12093  }
12094  }
12095 
12096  if(res == VK_SUCCESS)
12097  {
12098  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12099  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12100 
12101  res = AllocateFromBlock(
12102  pBlock,
12103  currentFrameIndex,
12104  size,
12105  alignment,
12106  allocFlagsCopy,
12107  createInfo.pUserData,
12108  suballocType,
12109  strategy,
12110  pAllocation);
12111  if(res == VK_SUCCESS)
12112  {
12113  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12114  return VK_SUCCESS;
12115  }
12116  else
12117  {
12118  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12119  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12120  }
12121  }
12122  }
12123  }
12124 
12125  // 3. Try to allocate from existing blocks with making other allocations lost.
12126  if(canMakeOtherLost)
12127  {
12128  uint32_t tryIndex = 0;
12129  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12130  {
12131  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12132  VmaAllocationRequest bestRequest = {};
12133  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12134 
12135  // 1. Search existing allocations.
12137  {
12138  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12139  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12140  {
12141  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12142  VMA_ASSERT(pCurrBlock);
12143  VmaAllocationRequest currRequest = {};
12144  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12145  currentFrameIndex,
12146  m_FrameInUseCount,
12147  m_BufferImageGranularity,
12148  size,
12149  alignment,
12150  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12151  suballocType,
12152  canMakeOtherLost,
12153  strategy,
12154  &currRequest))
12155  {
12156  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12157  if(pBestRequestBlock == VMA_NULL ||
12158  currRequestCost < bestRequestCost)
12159  {
12160  pBestRequestBlock = pCurrBlock;
12161  bestRequest = currRequest;
12162  bestRequestCost = currRequestCost;
12163 
12164  if(bestRequestCost == 0)
12165  {
12166  break;
12167  }
12168  }
12169  }
12170  }
12171  }
12172  else // WORST_FIT, FIRST_FIT
12173  {
12174  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12175  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12176  {
12177  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12178  VMA_ASSERT(pCurrBlock);
12179  VmaAllocationRequest currRequest = {};
12180  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12181  currentFrameIndex,
12182  m_FrameInUseCount,
12183  m_BufferImageGranularity,
12184  size,
12185  alignment,
12186  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12187  suballocType,
12188  canMakeOtherLost,
12189  strategy,
12190  &currRequest))
12191  {
12192  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12193  if(pBestRequestBlock == VMA_NULL ||
12194  currRequestCost < bestRequestCost ||
12196  {
12197  pBestRequestBlock = pCurrBlock;
12198  bestRequest = currRequest;
12199  bestRequestCost = currRequestCost;
12200 
12201  if(bestRequestCost == 0 ||
12203  {
12204  break;
12205  }
12206  }
12207  }
12208  }
12209  }
12210 
12211  if(pBestRequestBlock != VMA_NULL)
12212  {
12213  if(mapped)
12214  {
12215  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12216  if(res != VK_SUCCESS)
12217  {
12218  return res;
12219  }
12220  }
12221 
12222  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12223  currentFrameIndex,
12224  m_FrameInUseCount,
12225  &bestRequest))
12226  {
12227  // Allocate from this pBlock.
12228  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12229  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12230  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12231  UpdateHasEmptyBlock();
12232  (*pAllocation)->InitBlockAllocation(
12233  pBestRequestBlock,
12234  bestRequest.offset,
12235  alignment,
12236  size,
12237  m_MemoryTypeIndex,
12238  suballocType,
12239  mapped,
12240  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12241  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12242  VMA_DEBUG_LOG(" Returned from existing block");
12243  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12244  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12245  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12246  {
12247  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12248  }
12249  if(IsCorruptionDetectionEnabled())
12250  {
12251  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12252  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12253  }
12254  return VK_SUCCESS;
12255  }
12256  // else: Some allocations must have been touched while we are here. Next try.
12257  }
12258  else
12259  {
12260  // Could not find place in any of the blocks - break outer loop.
12261  break;
12262  }
12263  }
12264  /* Maximum number of tries exceeded - a very unlike event when many other
12265  threads are simultaneously touching allocations making it impossible to make
12266  lost at the same time as we try to allocate. */
12267  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12268  {
12269  return VK_ERROR_TOO_MANY_OBJECTS;
12270  }
12271  }
12272 
12273  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12274 }
12275 
12276 void VmaBlockVector::Free(
12277  const VmaAllocation hAllocation)
12278 {
12279  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12280 
12281  bool budgetExceeded = false;
12282  {
12283  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12284  VmaBudget heapBudget = {};
12285  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12286  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12287  }
12288 
12289  // Scope for lock.
12290  {
12291  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12292 
12293  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12294 
12295  if(IsCorruptionDetectionEnabled())
12296  {
12297  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12298  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12299  }
12300 
12301  if(hAllocation->IsPersistentMap())
12302  {
12303  pBlock->Unmap(m_hAllocator, 1);
12304  }
12305 
12306  pBlock->m_pMetadata->Free(hAllocation);
12307  VMA_HEAVY_ASSERT(pBlock->Validate());
12308 
12309  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12310 
12311  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12312  // pBlock became empty after this deallocation.
12313  if(pBlock->m_pMetadata->IsEmpty())
12314  {
12315  // Already has empty block. We don't want to have two, so delete this one.
12316  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12317  {
12318  pBlockToDelete = pBlock;
12319  Remove(pBlock);
12320  }
12321  // else: We now have an empty block - leave it.
12322  }
12323  // pBlock didn't become empty, but we have another empty block - find and free that one.
12324  // (This is optional, heuristics.)
12325  else if(m_HasEmptyBlock && canDeleteBlock)
12326  {
12327  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12328  if(pLastBlock->m_pMetadata->IsEmpty())
12329  {
12330  pBlockToDelete = pLastBlock;
12331  m_Blocks.pop_back();
12332  }
12333  }
12334 
12335  UpdateHasEmptyBlock();
12336  IncrementallySortBlocks();
12337  }
12338 
12339  // Destruction of a free block. Deferred until this point, outside of mutex
12340  // lock, for performance reason.
12341  if(pBlockToDelete != VMA_NULL)
12342  {
12343  VMA_DEBUG_LOG(" Deleted empty block");
12344  pBlockToDelete->Destroy(m_hAllocator);
12345  vma_delete(m_hAllocator, pBlockToDelete);
12346  }
12347 }
12348 
12349 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12350 {
12351  VkDeviceSize result = 0;
12352  for(size_t i = m_Blocks.size(); i--; )
12353  {
12354  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12355  if(result >= m_PreferredBlockSize)
12356  {
12357  break;
12358  }
12359  }
12360  return result;
12361 }
12362 
12363 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12364 {
12365  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12366  {
12367  if(m_Blocks[blockIndex] == pBlock)
12368  {
12369  VmaVectorRemove(m_Blocks, blockIndex);
12370  return;
12371  }
12372  }
12373  VMA_ASSERT(0);
12374 }
12375 
12376 void VmaBlockVector::IncrementallySortBlocks()
12377 {
12378  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12379  {
12380  // Bubble sort only until first swap.
12381  for(size_t i = 1; i < m_Blocks.size(); ++i)
12382  {
12383  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12384  {
12385  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12386  return;
12387  }
12388  }
12389  }
12390 }
12391 
12392 VkResult VmaBlockVector::AllocateFromBlock(
12393  VmaDeviceMemoryBlock* pBlock,
12394  uint32_t currentFrameIndex,
12395  VkDeviceSize size,
12396  VkDeviceSize alignment,
12397  VmaAllocationCreateFlags allocFlags,
12398  void* pUserData,
12399  VmaSuballocationType suballocType,
12400  uint32_t strategy,
12401  VmaAllocation* pAllocation)
12402 {
12403  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12404  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12405  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12406  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12407 
12408  VmaAllocationRequest currRequest = {};
12409  if(pBlock->m_pMetadata->CreateAllocationRequest(
12410  currentFrameIndex,
12411  m_FrameInUseCount,
12412  m_BufferImageGranularity,
12413  size,
12414  alignment,
12415  isUpperAddress,
12416  suballocType,
12417  false, // canMakeOtherLost
12418  strategy,
12419  &currRequest))
12420  {
12421  // Allocate from pCurrBlock.
12422  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12423 
12424  if(mapped)
12425  {
12426  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12427  if(res != VK_SUCCESS)
12428  {
12429  return res;
12430  }
12431  }
12432 
12433  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12434  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12435  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12436  UpdateHasEmptyBlock();
12437  (*pAllocation)->InitBlockAllocation(
12438  pBlock,
12439  currRequest.offset,
12440  alignment,
12441  size,
12442  m_MemoryTypeIndex,
12443  suballocType,
12444  mapped,
12445  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12446  VMA_HEAVY_ASSERT(pBlock->Validate());
12447  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12448  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12449  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12450  {
12451  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12452  }
12453  if(IsCorruptionDetectionEnabled())
12454  {
12455  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12456  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12457  }
12458  return VK_SUCCESS;
12459  }
12460  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12461 }
12462 
12463 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12464 {
12465  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12466  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12467  allocInfo.allocationSize = blockSize;
12468  VkDeviceMemory mem = VK_NULL_HANDLE;
12469  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12470  if(res < 0)
12471  {
12472  return res;
12473  }
12474 
12475  // New VkDeviceMemory successfully created.
12476 
12477  // Create new Allocation for it.
12478  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12479  pBlock->Init(
12480  m_hAllocator,
12481  m_hParentPool,
12482  m_MemoryTypeIndex,
12483  mem,
12484  allocInfo.allocationSize,
12485  m_NextBlockId++,
12486  m_Algorithm);
12487 
12488  m_Blocks.push_back(pBlock);
12489  if(pNewBlockIndex != VMA_NULL)
12490  {
12491  *pNewBlockIndex = m_Blocks.size() - 1;
12492  }
12493 
12494  return VK_SUCCESS;
12495 }
12496 
12497 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12498  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12499  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12500 {
12501  const size_t blockCount = m_Blocks.size();
12502  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12503 
12504  enum BLOCK_FLAG
12505  {
12506  BLOCK_FLAG_USED = 0x00000001,
12507  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12508  };
12509 
12510  struct BlockInfo
12511  {
12512  uint32_t flags;
12513  void* pMappedData;
12514  };
12515  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12516  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12517  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12518 
12519  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12520  const size_t moveCount = moves.size();
12521  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12522  {
12523  const VmaDefragmentationMove& move = moves[moveIndex];
12524  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12525  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12526  }
12527 
12528  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12529 
12530  // Go over all blocks. Get mapped pointer or map if necessary.
12531  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12532  {
12533  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12534  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12535  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12536  {
12537  currBlockInfo.pMappedData = pBlock->GetMappedData();
12538  // It is not originally mapped - map it.
12539  if(currBlockInfo.pMappedData == VMA_NULL)
12540  {
12541  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12542  if(pDefragCtx->res == VK_SUCCESS)
12543  {
12544  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12545  }
12546  }
12547  }
12548  }
12549 
12550  // Go over all moves. Do actual data transfer.
12551  if(pDefragCtx->res == VK_SUCCESS)
12552  {
12553  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12554  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12555 
12556  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12557  {
12558  const VmaDefragmentationMove& move = moves[moveIndex];
12559 
12560  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12561  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12562 
12563  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12564 
12565  // Invalidate source.
12566  if(isNonCoherent)
12567  {
12568  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12569  memRange.memory = pSrcBlock->GetDeviceMemory();
12570  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12571  memRange.size = VMA_MIN(
12572  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12573  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12574  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12575  }
12576 
12577  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12578  memmove(
12579  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12580  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12581  static_cast<size_t>(move.size));
12582 
12583  if(IsCorruptionDetectionEnabled())
12584  {
12585  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12586  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12587  }
12588 
12589  // Flush destination.
12590  if(isNonCoherent)
12591  {
12592  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12593  memRange.memory = pDstBlock->GetDeviceMemory();
12594  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12595  memRange.size = VMA_MIN(
12596  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12597  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12598  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12599  }
12600  }
12601  }
12602 
12603  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12604  // Regardless of pCtx->res == VK_SUCCESS.
12605  for(size_t blockIndex = blockCount; blockIndex--; )
12606  {
12607  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12608  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12609  {
12610  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12611  pBlock->Unmap(m_hAllocator, 1);
12612  }
12613  }
12614 }
12615 
12616 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12617  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12618  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12619  VkCommandBuffer commandBuffer)
12620 {
12621  const size_t blockCount = m_Blocks.size();
12622 
12623  pDefragCtx->blockContexts.resize(blockCount);
12624  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12625 
12626  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12627  const size_t moveCount = moves.size();
12628  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12629  {
12630  const VmaDefragmentationMove& move = moves[moveIndex];
12631  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12632  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12633  }
12634 
12635  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12636 
12637  // Go over all blocks. Create and bind buffer for whole block if necessary.
12638  {
12639  VkBufferCreateInfo bufCreateInfo;
12640  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12641 
12642  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12643  {
12644  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12645  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12646  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12647  {
12648  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12649  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12650  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12651  if(pDefragCtx->res == VK_SUCCESS)
12652  {
12653  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12654  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12655  }
12656  }
12657  }
12658  }
12659 
12660  // Go over all moves. Post data transfer commands to command buffer.
12661  if(pDefragCtx->res == VK_SUCCESS)
12662  {
12663  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12664  {
12665  const VmaDefragmentationMove& move = moves[moveIndex];
12666 
12667  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12668  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12669 
12670  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12671 
12672  VkBufferCopy region = {
12673  move.srcOffset,
12674  move.dstOffset,
12675  move.size };
12676  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12677  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12678  }
12679  }
12680 
12681  // Save buffers to defrag context for later destruction.
12682  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12683  {
12684  pDefragCtx->res = VK_NOT_READY;
12685  }
12686 }
12687 
12688 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12689 {
12690  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12691  {
12692  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12693  if(pBlock->m_pMetadata->IsEmpty())
12694  {
12695  if(m_Blocks.size() > m_MinBlockCount)
12696  {
12697  if(pDefragmentationStats != VMA_NULL)
12698  {
12699  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12700  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12701  }
12702 
12703  VmaVectorRemove(m_Blocks, blockIndex);
12704  pBlock->Destroy(m_hAllocator);
12705  vma_delete(m_hAllocator, pBlock);
12706  }
12707  else
12708  {
12709  break;
12710  }
12711  }
12712  }
12713  UpdateHasEmptyBlock();
12714 }
12715 
12716 void VmaBlockVector::UpdateHasEmptyBlock()
12717 {
12718  m_HasEmptyBlock = false;
12719  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12720  {
12721  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12722  if(pBlock->m_pMetadata->IsEmpty())
12723  {
12724  m_HasEmptyBlock = true;
12725  break;
12726  }
12727  }
12728 }
12729 
12730 #if VMA_STATS_STRING_ENABLED
12731 
12732 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12733 {
12734  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12735 
12736  json.BeginObject();
12737 
12738  if(IsCustomPool())
12739  {
12740  const char* poolName = m_hParentPool->GetName();
12741  if(poolName != VMA_NULL && poolName[0] != '\0')
12742  {
12743  json.WriteString("Name");
12744  json.WriteString(poolName);
12745  }
12746 
12747  json.WriteString("MemoryTypeIndex");
12748  json.WriteNumber(m_MemoryTypeIndex);
12749 
12750  json.WriteString("BlockSize");
12751  json.WriteNumber(m_PreferredBlockSize);
12752 
12753  json.WriteString("BlockCount");
12754  json.BeginObject(true);
12755  if(m_MinBlockCount > 0)
12756  {
12757  json.WriteString("Min");
12758  json.WriteNumber((uint64_t)m_MinBlockCount);
12759  }
12760  if(m_MaxBlockCount < SIZE_MAX)
12761  {
12762  json.WriteString("Max");
12763  json.WriteNumber((uint64_t)m_MaxBlockCount);
12764  }
12765  json.WriteString("Cur");
12766  json.WriteNumber((uint64_t)m_Blocks.size());
12767  json.EndObject();
12768 
12769  if(m_FrameInUseCount > 0)
12770  {
12771  json.WriteString("FrameInUseCount");
12772  json.WriteNumber(m_FrameInUseCount);
12773  }
12774 
12775  if(m_Algorithm != 0)
12776  {
12777  json.WriteString("Algorithm");
12778  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12779  }
12780  }
12781  else
12782  {
12783  json.WriteString("PreferredBlockSize");
12784  json.WriteNumber(m_PreferredBlockSize);
12785  }
12786 
12787  json.WriteString("Blocks");
12788  json.BeginObject();
12789  for(size_t i = 0; i < m_Blocks.size(); ++i)
12790  {
12791  json.BeginString();
12792  json.ContinueString(m_Blocks[i]->GetId());
12793  json.EndString();
12794 
12795  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12796  }
12797  json.EndObject();
12798 
12799  json.EndObject();
12800 }
12801 
12802 #endif // #if VMA_STATS_STRING_ENABLED
12803 
12804 void VmaBlockVector::Defragment(
12805  class VmaBlockVectorDefragmentationContext* pCtx,
12806  VmaDefragmentationStats* pStats,
12807  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12808  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12809  VkCommandBuffer commandBuffer)
12810 {
12811  pCtx->res = VK_SUCCESS;
12812 
12813  const VkMemoryPropertyFlags memPropFlags =
12814  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12815  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12816 
12817  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12818  isHostVisible;
12819  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12820  !IsCorruptionDetectionEnabled() &&
12821  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12822 
12823  // There are options to defragment this memory type.
12824  if(canDefragmentOnCpu || canDefragmentOnGpu)
12825  {
12826  bool defragmentOnGpu;
12827  // There is only one option to defragment this memory type.
12828  if(canDefragmentOnGpu != canDefragmentOnCpu)
12829  {
12830  defragmentOnGpu = canDefragmentOnGpu;
12831  }
12832  // Both options are available: Heuristics to choose the best one.
12833  else
12834  {
12835  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12836  m_hAllocator->IsIntegratedGpu();
12837  }
12838 
12839  bool overlappingMoveSupported = !defragmentOnGpu;
12840 
12841  if(m_hAllocator->m_UseMutex)
12842  {
12843  m_Mutex.LockWrite();
12844  pCtx->mutexLocked = true;
12845  }
12846 
12847  pCtx->Begin(overlappingMoveSupported);
12848 
12849  // Defragment.
12850 
12851  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12852  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12853  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12854  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12855  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12856 
12857  // Accumulate statistics.
12858  if(pStats != VMA_NULL)
12859  {
12860  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12861  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12862  pStats->bytesMoved += bytesMoved;
12863  pStats->allocationsMoved += allocationsMoved;
12864  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12865  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12866  if(defragmentOnGpu)
12867  {
12868  maxGpuBytesToMove -= bytesMoved;
12869  maxGpuAllocationsToMove -= allocationsMoved;
12870  }
12871  else
12872  {
12873  maxCpuBytesToMove -= bytesMoved;
12874  maxCpuAllocationsToMove -= allocationsMoved;
12875  }
12876  }
12877 
12878  if(pCtx->res >= VK_SUCCESS)
12879  {
12880  if(defragmentOnGpu)
12881  {
12882  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12883  }
12884  else
12885  {
12886  ApplyDefragmentationMovesCpu(pCtx, moves);
12887  }
12888  }
12889  }
12890 }
12891 
12892 void VmaBlockVector::DefragmentationEnd(
12893  class VmaBlockVectorDefragmentationContext* pCtx,
12894  VmaDefragmentationStats* pStats)
12895 {
12896  // Destroy buffers.
12897  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12898  {
12899  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12900  if(blockCtx.hBuffer)
12901  {
12902  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12903  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12904  }
12905  }
12906 
12907  if(pCtx->res >= VK_SUCCESS)
12908  {
12909  FreeEmptyBlocks(pStats);
12910  }
12911 
12912  if(pCtx->mutexLocked)
12913  {
12914  VMA_ASSERT(m_hAllocator->m_UseMutex);
12915  m_Mutex.UnlockWrite();
12916  }
12917 }
12918 
12919 size_t VmaBlockVector::CalcAllocationCount() const
12920 {
12921  size_t result = 0;
12922  for(size_t i = 0; i < m_Blocks.size(); ++i)
12923  {
12924  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12925  }
12926  return result;
12927 }
12928 
12929 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12930 {
12931  if(m_BufferImageGranularity == 1)
12932  {
12933  return false;
12934  }
12935  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12936  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12937  {
12938  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12939  VMA_ASSERT(m_Algorithm == 0);
12940  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12941  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12942  {
12943  return true;
12944  }
12945  }
12946  return false;
12947 }
12948 
12949 void VmaBlockVector::MakePoolAllocationsLost(
12950  uint32_t currentFrameIndex,
12951  size_t* pLostAllocationCount)
12952 {
12953  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12954  size_t lostAllocationCount = 0;
12955  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12956  {
12957  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12958  VMA_ASSERT(pBlock);
12959  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12960  }
12961  if(pLostAllocationCount != VMA_NULL)
12962  {
12963  *pLostAllocationCount = lostAllocationCount;
12964  }
12965 }
12966 
12967 VkResult VmaBlockVector::CheckCorruption()
12968 {
12969  if(!IsCorruptionDetectionEnabled())
12970  {
12971  return VK_ERROR_FEATURE_NOT_PRESENT;
12972  }
12973 
12974  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12975  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12976  {
12977  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12978  VMA_ASSERT(pBlock);
12979  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12980  if(res != VK_SUCCESS)
12981  {
12982  return res;
12983  }
12984  }
12985  return VK_SUCCESS;
12986 }
12987 
12988 void VmaBlockVector::AddStats(VmaStats* pStats)
12989 {
12990  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12991  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12992 
12993  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12994 
12995  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12996  {
12997  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12998  VMA_ASSERT(pBlock);
12999  VMA_HEAVY_ASSERT(pBlock->Validate());
13000  VmaStatInfo allocationStatInfo;
13001  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13002  VmaAddStatInfo(pStats->total, allocationStatInfo);
13003  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13004  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13005  }
13006 }
13007 
13009 // VmaDefragmentationAlgorithm_Generic members definition
13010 
13011 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13012  VmaAllocator hAllocator,
13013  VmaBlockVector* pBlockVector,
13014  uint32_t currentFrameIndex,
13015  bool overlappingMoveSupported) :
13016  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13017  m_AllocationCount(0),
13018  m_AllAllocations(false),
13019  m_BytesMoved(0),
13020  m_AllocationsMoved(0),
13021  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13022 {
13023  // Create block info for each block.
13024  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13025  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13026  {
13027  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13028  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13029  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13030  m_Blocks.push_back(pBlockInfo);
13031  }
13032 
13033  // Sort them by m_pBlock pointer value.
13034  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13035 }
13036 
13037 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13038 {
13039  for(size_t i = m_Blocks.size(); i--; )
13040  {
13041  vma_delete(m_hAllocator, m_Blocks[i]);
13042  }
13043 }
13044 
13045 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13046 {
13047  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13048  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13049  {
13050  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13051  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13052  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13053  {
13054  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13055  (*it)->m_Allocations.push_back(allocInfo);
13056  }
13057  else
13058  {
13059  VMA_ASSERT(0);
13060  }
13061 
13062  ++m_AllocationCount;
13063  }
13064 }
13065 
13066 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13067  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13068  VkDeviceSize maxBytesToMove,
13069  uint32_t maxAllocationsToMove)
13070 {
13071  if(m_Blocks.empty())
13072  {
13073  return VK_SUCCESS;
13074  }
13075 
13076  // This is a choice based on research.
13077  // Option 1:
13078  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13079  // Option 2:
13080  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13081  // Option 3:
13082  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13083 
13084  size_t srcBlockMinIndex = 0;
13085  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13086  /*
13087  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13088  {
13089  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13090  if(blocksWithNonMovableCount > 0)
13091  {
13092  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13093  }
13094  }
13095  */
13096 
13097  size_t srcBlockIndex = m_Blocks.size() - 1;
13098  size_t srcAllocIndex = SIZE_MAX;
13099  for(;;)
13100  {
13101  // 1. Find next allocation to move.
13102  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13103  // 1.2. Then start from last to first m_Allocations.
13104  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13105  {
13106  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13107  {
13108  // Finished: no more allocations to process.
13109  if(srcBlockIndex == srcBlockMinIndex)
13110  {
13111  return VK_SUCCESS;
13112  }
13113  else
13114  {
13115  --srcBlockIndex;
13116  srcAllocIndex = SIZE_MAX;
13117  }
13118  }
13119  else
13120  {
13121  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13122  }
13123  }
13124 
13125  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13126  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13127 
13128  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13129  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13130  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13131  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13132 
13133  // 2. Try to find new place for this allocation in preceding or current block.
13134  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13135  {
13136  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13137  VmaAllocationRequest dstAllocRequest;
13138  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13139  m_CurrentFrameIndex,
13140  m_pBlockVector->GetFrameInUseCount(),
13141  m_pBlockVector->GetBufferImageGranularity(),
13142  size,
13143  alignment,
13144  false, // upperAddress
13145  suballocType,
13146  false, // canMakeOtherLost
13147  strategy,
13148  &dstAllocRequest) &&
13149  MoveMakesSense(
13150  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13151  {
13152  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13153 
13154  // Reached limit on number of allocations or bytes to move.
13155  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13156  (m_BytesMoved + size > maxBytesToMove))
13157  {
13158  return VK_SUCCESS;
13159  }
13160 
13161  VmaDefragmentationMove move;
13162  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13163  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13164  move.srcOffset = srcOffset;
13165  move.dstOffset = dstAllocRequest.offset;
13166  move.size = size;
13167  moves.push_back(move);
13168 
13169  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13170  dstAllocRequest,
13171  suballocType,
13172  size,
13173  allocInfo.m_hAllocation);
13174  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13175 
13176  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13177 
13178  if(allocInfo.m_pChanged != VMA_NULL)
13179  {
13180  *allocInfo.m_pChanged = VK_TRUE;
13181  }
13182 
13183  ++m_AllocationsMoved;
13184  m_BytesMoved += size;
13185 
13186  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13187 
13188  break;
13189  }
13190  }
13191 
13192  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13193 
13194  if(srcAllocIndex > 0)
13195  {
13196  --srcAllocIndex;
13197  }
13198  else
13199  {
13200  if(srcBlockIndex > 0)
13201  {
13202  --srcBlockIndex;
13203  srcAllocIndex = SIZE_MAX;
13204  }
13205  else
13206  {
13207  return VK_SUCCESS;
13208  }
13209  }
13210  }
13211 }
13212 
13213 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13214 {
13215  size_t result = 0;
13216  for(size_t i = 0; i < m_Blocks.size(); ++i)
13217  {
13218  if(m_Blocks[i]->m_HasNonMovableAllocations)
13219  {
13220  ++result;
13221  }
13222  }
13223  return result;
13224 }
13225 
13226 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13227  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13228  VkDeviceSize maxBytesToMove,
13229  uint32_t maxAllocationsToMove)
13230 {
13231  if(!m_AllAllocations && m_AllocationCount == 0)
13232  {
13233  return VK_SUCCESS;
13234  }
13235 
13236  const size_t blockCount = m_Blocks.size();
13237  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13238  {
13239  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13240 
13241  if(m_AllAllocations)
13242  {
13243  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13244  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13245  it != pMetadata->m_Suballocations.end();
13246  ++it)
13247  {
13248  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13249  {
13250  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13251  pBlockInfo->m_Allocations.push_back(allocInfo);
13252  }
13253  }
13254  }
13255 
13256  pBlockInfo->CalcHasNonMovableAllocations();
13257 
13258  // This is a choice based on research.
13259  // Option 1:
13260  pBlockInfo->SortAllocationsByOffsetDescending();
13261  // Option 2:
13262  //pBlockInfo->SortAllocationsBySizeDescending();
13263  }
13264 
13265  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13266  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13267 
13268  // This is a choice based on research.
13269  const uint32_t roundCount = 2;
13270 
13271  // Execute defragmentation rounds (the main part).
13272  VkResult result = VK_SUCCESS;
13273  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13274  {
13275  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
13276  }
13277 
13278  return result;
13279 }
13280 
13281 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13282  size_t dstBlockIndex, VkDeviceSize dstOffset,
13283  size_t srcBlockIndex, VkDeviceSize srcOffset)
13284 {
13285  if(dstBlockIndex < srcBlockIndex)
13286  {
13287  return true;
13288  }
13289  if(dstBlockIndex > srcBlockIndex)
13290  {
13291  return false;
13292  }
13293  if(dstOffset < srcOffset)
13294  {
13295  return true;
13296  }
13297  return false;
13298 }
13299 
13301 // VmaDefragmentationAlgorithm_Fast
13302 
13303 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13304  VmaAllocator hAllocator,
13305  VmaBlockVector* pBlockVector,
13306  uint32_t currentFrameIndex,
13307  bool overlappingMoveSupported) :
13308  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13309  m_OverlappingMoveSupported(overlappingMoveSupported),
13310  m_AllocationCount(0),
13311  m_AllAllocations(false),
13312  m_BytesMoved(0),
13313  m_AllocationsMoved(0),
13314  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13315 {
13316  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13317 
13318 }
13319 
13320 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13321 {
13322 }
13323 
13324 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13325  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13326  VkDeviceSize maxBytesToMove,
13327  uint32_t maxAllocationsToMove)
13328 {
13329  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13330 
13331  const size_t blockCount = m_pBlockVector->GetBlockCount();
13332  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13333  {
13334  return VK_SUCCESS;
13335  }
13336 
13337  PreprocessMetadata();
13338 
13339  // Sort blocks in order from most destination.
13340 
13341  m_BlockInfos.resize(blockCount);
13342  for(size_t i = 0; i < blockCount; ++i)
13343  {
13344  m_BlockInfos[i].origBlockIndex = i;
13345  }
13346 
13347  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13348  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13349  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13350  });
13351 
13352  // THE MAIN ALGORITHM
13353 
13354  FreeSpaceDatabase freeSpaceDb;
13355 
13356  size_t dstBlockInfoIndex = 0;
13357  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13358  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13359  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13360  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13361  VkDeviceSize dstOffset = 0;
13362 
13363  bool end = false;
13364  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13365  {
13366  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13367  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13368  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13369  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13370  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13371  {
13372  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13373  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13374  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13375  if(m_AllocationsMoved == maxAllocationsToMove ||
13376  m_BytesMoved + srcAllocSize > maxBytesToMove)
13377  {
13378  end = true;
13379  break;
13380  }
13381  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13382 
13383  // Try to place it in one of free spaces from the database.
13384  size_t freeSpaceInfoIndex;
13385  VkDeviceSize dstAllocOffset;
13386  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13387  freeSpaceInfoIndex, dstAllocOffset))
13388  {
13389  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13390  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13391  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13392 
13393  // Same block
13394  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13395  {
13396  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13397 
13398  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13399 
13400  VmaSuballocation suballoc = *srcSuballocIt;
13401  suballoc.offset = dstAllocOffset;
13402  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13403  m_BytesMoved += srcAllocSize;
13404  ++m_AllocationsMoved;
13405 
13406  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13407  ++nextSuballocIt;
13408  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13409  srcSuballocIt = nextSuballocIt;
13410 
13411  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13412 
13413  VmaDefragmentationMove move = {
13414  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13415  srcAllocOffset, dstAllocOffset,
13416  srcAllocSize };
13417  moves.push_back(move);
13418  }
13419  // Different block
13420  else
13421  {
13422  // MOVE OPTION 2: Move the allocation to a different block.
13423 
13424  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13425 
13426  VmaSuballocation suballoc = *srcSuballocIt;
13427  suballoc.offset = dstAllocOffset;
13428  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13429  m_BytesMoved += srcAllocSize;
13430  ++m_AllocationsMoved;
13431 
13432  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13433  ++nextSuballocIt;
13434  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13435  srcSuballocIt = nextSuballocIt;
13436 
13437  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13438 
13439  VmaDefragmentationMove move = {
13440  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13441  srcAllocOffset, dstAllocOffset,
13442  srcAllocSize };
13443  moves.push_back(move);
13444  }
13445  }
13446  else
13447  {
13448  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13449 
13450  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13451  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13452  dstAllocOffset + srcAllocSize > dstBlockSize)
13453  {
13454  // But before that, register remaining free space at the end of dst block.
13455  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13456 
13457  ++dstBlockInfoIndex;
13458  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13459  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13460  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13461  dstBlockSize = pDstMetadata->GetSize();
13462  dstOffset = 0;
13463  dstAllocOffset = 0;
13464  }
13465 
13466  // Same block
13467  if(dstBlockInfoIndex == srcBlockInfoIndex)
13468  {
13469  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13470 
13471  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13472 
13473  bool skipOver = overlap;
13474  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13475  {
13476  // If destination and source place overlap, skip if it would move it
13477  // by only < 1/64 of its size.
13478  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13479  }
13480 
13481  if(skipOver)
13482  {
13483  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13484 
13485  dstOffset = srcAllocOffset + srcAllocSize;
13486  ++srcSuballocIt;
13487  }
13488  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13489  else
13490  {
13491  srcSuballocIt->offset = dstAllocOffset;
13492  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13493  dstOffset = dstAllocOffset + srcAllocSize;
13494  m_BytesMoved += srcAllocSize;
13495  ++m_AllocationsMoved;
13496  ++srcSuballocIt;
13497  VmaDefragmentationMove move = {
13498  srcOrigBlockIndex, dstOrigBlockIndex,
13499  srcAllocOffset, dstAllocOffset,
13500  srcAllocSize };
13501  moves.push_back(move);
13502  }
13503  }
13504  // Different block
13505  else
13506  {
13507  // MOVE OPTION 2: Move the allocation to a different block.
13508 
13509  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13510  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13511 
13512  VmaSuballocation suballoc = *srcSuballocIt;
13513  suballoc.offset = dstAllocOffset;
13514  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13515  dstOffset = dstAllocOffset + srcAllocSize;
13516  m_BytesMoved += srcAllocSize;
13517  ++m_AllocationsMoved;
13518 
13519  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13520  ++nextSuballocIt;
13521  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13522  srcSuballocIt = nextSuballocIt;
13523 
13524  pDstMetadata->m_Suballocations.push_back(suballoc);
13525 
13526  VmaDefragmentationMove move = {
13527  srcOrigBlockIndex, dstOrigBlockIndex,
13528  srcAllocOffset, dstAllocOffset,
13529  srcAllocSize };
13530  moves.push_back(move);
13531  }
13532  }
13533  }
13534  }
13535 
13536  m_BlockInfos.clear();
13537 
13538  PostprocessMetadata();
13539 
13540  return VK_SUCCESS;
13541 }
13542 
13543 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13544 {
13545  const size_t blockCount = m_pBlockVector->GetBlockCount();
13546  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13547  {
13548  VmaBlockMetadata_Generic* const pMetadata =
13549  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13550  pMetadata->m_FreeCount = 0;
13551  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13552  pMetadata->m_FreeSuballocationsBySize.clear();
13553  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13554  it != pMetadata->m_Suballocations.end(); )
13555  {
13556  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13557  {
13558  VmaSuballocationList::iterator nextIt = it;
13559  ++nextIt;
13560  pMetadata->m_Suballocations.erase(it);
13561  it = nextIt;
13562  }
13563  else
13564  {
13565  ++it;
13566  }
13567  }
13568  }
13569 }
13570 
13571 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13572 {
13573  const size_t blockCount = m_pBlockVector->GetBlockCount();
13574  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13575  {
13576  VmaBlockMetadata_Generic* const pMetadata =
13577  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13578  const VkDeviceSize blockSize = pMetadata->GetSize();
13579 
13580  // No allocations in this block - entire area is free.
13581  if(pMetadata->m_Suballocations.empty())
13582  {
13583  pMetadata->m_FreeCount = 1;
13584  //pMetadata->m_SumFreeSize is already set to blockSize.
13585  VmaSuballocation suballoc = {
13586  0, // offset
13587  blockSize, // size
13588  VMA_NULL, // hAllocation
13589  VMA_SUBALLOCATION_TYPE_FREE };
13590  pMetadata->m_Suballocations.push_back(suballoc);
13591  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13592  }
13593  // There are some allocations in this block.
13594  else
13595  {
13596  VkDeviceSize offset = 0;
13597  VmaSuballocationList::iterator it;
13598  for(it = pMetadata->m_Suballocations.begin();
13599  it != pMetadata->m_Suballocations.end();
13600  ++it)
13601  {
13602  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13603  VMA_ASSERT(it->offset >= offset);
13604 
13605  // Need to insert preceding free space.
13606  if(it->offset > offset)
13607  {
13608  ++pMetadata->m_FreeCount;
13609  const VkDeviceSize freeSize = it->offset - offset;
13610  VmaSuballocation suballoc = {
13611  offset, // offset
13612  freeSize, // size
13613  VMA_NULL, // hAllocation
13614  VMA_SUBALLOCATION_TYPE_FREE };
13615  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13616  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13617  {
13618  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13619  }
13620  }
13621 
13622  pMetadata->m_SumFreeSize -= it->size;
13623  offset = it->offset + it->size;
13624  }
13625 
13626  // Need to insert trailing free space.
13627  if(offset < blockSize)
13628  {
13629  ++pMetadata->m_FreeCount;
13630  const VkDeviceSize freeSize = blockSize - offset;
13631  VmaSuballocation suballoc = {
13632  offset, // offset
13633  freeSize, // size
13634  VMA_NULL, // hAllocation
13635  VMA_SUBALLOCATION_TYPE_FREE };
13636  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13637  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13638  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13639  {
13640  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13641  }
13642  }
13643 
13644  VMA_SORT(
13645  pMetadata->m_FreeSuballocationsBySize.begin(),
13646  pMetadata->m_FreeSuballocationsBySize.end(),
13647  VmaSuballocationItemSizeLess());
13648  }
13649 
13650  VMA_HEAVY_ASSERT(pMetadata->Validate());
13651  }
13652 }
13653 
13654 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13655 {
13656  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13657  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13658  while(it != pMetadata->m_Suballocations.end())
13659  {
13660  if(it->offset < suballoc.offset)
13661  {
13662  ++it;
13663  }
13664  }
13665  pMetadata->m_Suballocations.insert(it, suballoc);
13666 }
13667 
13669 // VmaBlockVectorDefragmentationContext
13670 
13671 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13672  VmaAllocator hAllocator,
13673  VmaPool hCustomPool,
13674  VmaBlockVector* pBlockVector,
13675  uint32_t currFrameIndex) :
13676  res(VK_SUCCESS),
13677  mutexLocked(false),
13678  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13679  m_hAllocator(hAllocator),
13680  m_hCustomPool(hCustomPool),
13681  m_pBlockVector(pBlockVector),
13682  m_CurrFrameIndex(currFrameIndex),
13683  m_pAlgorithm(VMA_NULL),
13684  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13685  m_AllAllocations(false)
13686 {
13687 }
13688 
13689 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13690 {
13691  vma_delete(m_hAllocator, m_pAlgorithm);
13692 }
13693 
13694 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13695 {
13696  AllocInfo info = { hAlloc, pChanged };
13697  m_Allocations.push_back(info);
13698 }
13699 
13700 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13701 {
13702  const bool allAllocations = m_AllAllocations ||
13703  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13704 
13705  /********************************
13706  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13707  ********************************/
13708 
13709  /*
13710  Fast algorithm is supported only when certain criteria are met:
13711  - VMA_DEBUG_MARGIN is 0.
13712  - All allocations in this block vector are moveable.
13713  - There is no possibility of image/buffer granularity conflict.
13714  */
13715  if(VMA_DEBUG_MARGIN == 0 &&
13716  allAllocations &&
13717  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13718  {
13719  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13720  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13721  }
13722  else
13723  {
13724  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13725  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13726  }
13727 
13728  if(allAllocations)
13729  {
13730  m_pAlgorithm->AddAll();
13731  }
13732  else
13733  {
13734  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13735  {
13736  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13737  }
13738  }
13739 }
13740 
13742 // VmaDefragmentationContext
13743 
13744 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13745  VmaAllocator hAllocator,
13746  uint32_t currFrameIndex,
13747  uint32_t flags,
13748  VmaDefragmentationStats* pStats) :
13749  m_hAllocator(hAllocator),
13750  m_CurrFrameIndex(currFrameIndex),
13751  m_Flags(flags),
13752  m_pStats(pStats),
13753  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13754 {
13755  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13756 }
13757 
13758 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13759 {
13760  for(size_t i = m_CustomPoolContexts.size(); i--; )
13761  {
13762  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13763  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13764  vma_delete(m_hAllocator, pBlockVectorCtx);
13765  }
13766  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13767  {
13768  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13769  if(pBlockVectorCtx)
13770  {
13771  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13772  vma_delete(m_hAllocator, pBlockVectorCtx);
13773  }
13774  }
13775 }
13776 
13777 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13778 {
13779  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13780  {
13781  VmaPool pool = pPools[poolIndex];
13782  VMA_ASSERT(pool);
13783  // Pools with algorithm other than default are not defragmented.
13784  if(pool->m_BlockVector.GetAlgorithm() == 0)
13785  {
13786  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13787 
13788  for(size_t i = m_CustomPoolContexts.size(); i--; )
13789  {
13790  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13791  {
13792  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13793  break;
13794  }
13795  }
13796 
13797  if(!pBlockVectorDefragCtx)
13798  {
13799  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13800  m_hAllocator,
13801  pool,
13802  &pool->m_BlockVector,
13803  m_CurrFrameIndex);
13804  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13805  }
13806 
13807  pBlockVectorDefragCtx->AddAll();
13808  }
13809  }
13810 }
13811 
13812 void VmaDefragmentationContext_T::AddAllocations(
13813  uint32_t allocationCount,
13814  VmaAllocation* pAllocations,
13815  VkBool32* pAllocationsChanged)
13816 {
13817  // Dispatch pAllocations among defragmentators. Create them when necessary.
13818  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13819  {
13820  const VmaAllocation hAlloc = pAllocations[allocIndex];
13821  VMA_ASSERT(hAlloc);
13822  // DedicatedAlloc cannot be defragmented.
13823  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13824  // Lost allocation cannot be defragmented.
13825  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13826  {
13827  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13828 
13829  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13830  // This allocation belongs to custom pool.
13831  if(hAllocPool != VK_NULL_HANDLE)
13832  {
13833  // Pools with algorithm other than default are not defragmented.
13834  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13835  {
13836  for(size_t i = m_CustomPoolContexts.size(); i--; )
13837  {
13838  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13839  {
13840  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13841  break;
13842  }
13843  }
13844  if(!pBlockVectorDefragCtx)
13845  {
13846  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13847  m_hAllocator,
13848  hAllocPool,
13849  &hAllocPool->m_BlockVector,
13850  m_CurrFrameIndex);
13851  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13852  }
13853  }
13854  }
13855  // This allocation belongs to default pool.
13856  else
13857  {
13858  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13859  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13860  if(!pBlockVectorDefragCtx)
13861  {
13862  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13863  m_hAllocator,
13864  VMA_NULL, // hCustomPool
13865  m_hAllocator->m_pBlockVectors[memTypeIndex],
13866  m_CurrFrameIndex);
13867  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13868  }
13869  }
13870 
13871  if(pBlockVectorDefragCtx)
13872  {
13873  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13874  &pAllocationsChanged[allocIndex] : VMA_NULL;
13875  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13876  }
13877  }
13878  }
13879 }
13880 
13881 VkResult VmaDefragmentationContext_T::Defragment(
13882  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13883  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13884  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13885 {
13886  if(pStats)
13887  {
13888  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13889  }
13890 
13891  if(commandBuffer == VK_NULL_HANDLE)
13892  {
13893  maxGpuBytesToMove = 0;
13894  maxGpuAllocationsToMove = 0;
13895  }
13896 
13897  VkResult res = VK_SUCCESS;
13898 
13899  // Process default pools.
13900  for(uint32_t memTypeIndex = 0;
13901  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13902  ++memTypeIndex)
13903  {
13904  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13905  if(pBlockVectorCtx)
13906  {
13907  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13908  pBlockVectorCtx->GetBlockVector()->Defragment(
13909  pBlockVectorCtx,
13910  pStats,
13911  maxCpuBytesToMove, maxCpuAllocationsToMove,
13912  maxGpuBytesToMove, maxGpuAllocationsToMove,
13913  commandBuffer);
13914  if(pBlockVectorCtx->res != VK_SUCCESS)
13915  {
13916  res = pBlockVectorCtx->res;
13917  }
13918  }
13919  }
13920 
13921  // Process custom pools.
13922  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13923  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13924  ++customCtxIndex)
13925  {
13926  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13927  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13928  pBlockVectorCtx->GetBlockVector()->Defragment(
13929  pBlockVectorCtx,
13930  pStats,
13931  maxCpuBytesToMove, maxCpuAllocationsToMove,
13932  maxGpuBytesToMove, maxGpuAllocationsToMove,
13933  commandBuffer);
13934  if(pBlockVectorCtx->res != VK_SUCCESS)
13935  {
13936  res = pBlockVectorCtx->res;
13937  }
13938  }
13939 
13940  return res;
13941 }
13942 
13944 // VmaRecorder
13945 
13946 #if VMA_RECORDING_ENABLED
13947 
13948 VmaRecorder::VmaRecorder() :
13949  m_UseMutex(true),
13950  m_Flags(0),
13951  m_File(VMA_NULL),
13952  m_Freq(INT64_MAX),
13953  m_StartCounter(INT64_MAX)
13954 {
13955 }
13956 
13957 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13958 {
13959  m_UseMutex = useMutex;
13960  m_Flags = settings.flags;
13961 
13962  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13963  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13964 
13965  // Open file for writing.
13966  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13967  if(err != 0)
13968  {
13969  return VK_ERROR_INITIALIZATION_FAILED;
13970  }
13971 
13972  // Write header.
13973  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13974  fprintf(m_File, "%s\n", "1,8");
13975 
13976  return VK_SUCCESS;
13977 }
13978 
13979 VmaRecorder::~VmaRecorder()
13980 {
13981  if(m_File != VMA_NULL)
13982  {
13983  fclose(m_File);
13984  }
13985 }
13986 
13987 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13988 {
13989  CallParams callParams;
13990  GetBasicParams(callParams);
13991 
13992  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13993  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13994  Flush();
13995 }
13996 
13997 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13998 {
13999  CallParams callParams;
14000  GetBasicParams(callParams);
14001 
14002  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14003  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14004  Flush();
14005 }
14006 
14007 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14008 {
14009  CallParams callParams;
14010  GetBasicParams(callParams);
14011 
14012  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14013  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14014  createInfo.memoryTypeIndex,
14015  createInfo.flags,
14016  createInfo.blockSize,
14017  (uint64_t)createInfo.minBlockCount,
14018  (uint64_t)createInfo.maxBlockCount,
14019  createInfo.frameInUseCount,
14020  pool);
14021  Flush();
14022 }
14023 
14024 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14025 {
14026  CallParams callParams;
14027  GetBasicParams(callParams);
14028 
14029  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14030  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14031  pool);
14032  Flush();
14033 }
14034 
14035 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14036  const VkMemoryRequirements& vkMemReq,
14037  const VmaAllocationCreateInfo& createInfo,
14038  VmaAllocation allocation)
14039 {
14040  CallParams callParams;
14041  GetBasicParams(callParams);
14042 
14043  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14044  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14045  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14046  vkMemReq.size,
14047  vkMemReq.alignment,
14048  vkMemReq.memoryTypeBits,
14049  createInfo.flags,
14050  createInfo.usage,
14051  createInfo.requiredFlags,
14052  createInfo.preferredFlags,
14053  createInfo.memoryTypeBits,
14054  createInfo.pool,
14055  allocation,
14056  userDataStr.GetString());
14057  Flush();
14058 }
14059 
14060 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14061  const VkMemoryRequirements& vkMemReq,
14062  const VmaAllocationCreateInfo& createInfo,
14063  uint64_t allocationCount,
14064  const VmaAllocation* pAllocations)
14065 {
14066  CallParams callParams;
14067  GetBasicParams(callParams);
14068 
14069  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14070  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14071  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14072  vkMemReq.size,
14073  vkMemReq.alignment,
14074  vkMemReq.memoryTypeBits,
14075  createInfo.flags,
14076  createInfo.usage,
14077  createInfo.requiredFlags,
14078  createInfo.preferredFlags,
14079  createInfo.memoryTypeBits,
14080  createInfo.pool);
14081  PrintPointerList(allocationCount, pAllocations);
14082  fprintf(m_File, ",%s\n", userDataStr.GetString());
14083  Flush();
14084 }
14085 
14086 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14087  const VkMemoryRequirements& vkMemReq,
14088  bool requiresDedicatedAllocation,
14089  bool prefersDedicatedAllocation,
14090  const VmaAllocationCreateInfo& createInfo,
14091  VmaAllocation allocation)
14092 {
14093  CallParams callParams;
14094  GetBasicParams(callParams);
14095 
14096  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14097  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14098  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14099  vkMemReq.size,
14100  vkMemReq.alignment,
14101  vkMemReq.memoryTypeBits,
14102  requiresDedicatedAllocation ? 1 : 0,
14103  prefersDedicatedAllocation ? 1 : 0,
14104  createInfo.flags,
14105  createInfo.usage,
14106  createInfo.requiredFlags,
14107  createInfo.preferredFlags,
14108  createInfo.memoryTypeBits,
14109  createInfo.pool,
14110  allocation,
14111  userDataStr.GetString());
14112  Flush();
14113 }
14114 
14115 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14116  const VkMemoryRequirements& vkMemReq,
14117  bool requiresDedicatedAllocation,
14118  bool prefersDedicatedAllocation,
14119  const VmaAllocationCreateInfo& createInfo,
14120  VmaAllocation allocation)
14121 {
14122  CallParams callParams;
14123  GetBasicParams(callParams);
14124 
14125  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14126  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14127  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14128  vkMemReq.size,
14129  vkMemReq.alignment,
14130  vkMemReq.memoryTypeBits,
14131  requiresDedicatedAllocation ? 1 : 0,
14132  prefersDedicatedAllocation ? 1 : 0,
14133  createInfo.flags,
14134  createInfo.usage,
14135  createInfo.requiredFlags,
14136  createInfo.preferredFlags,
14137  createInfo.memoryTypeBits,
14138  createInfo.pool,
14139  allocation,
14140  userDataStr.GetString());
14141  Flush();
14142 }
14143 
14144 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14145  VmaAllocation allocation)
14146 {
14147  CallParams callParams;
14148  GetBasicParams(callParams);
14149 
14150  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14151  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14152  allocation);
14153  Flush();
14154 }
14155 
14156 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14157  uint64_t allocationCount,
14158  const VmaAllocation* pAllocations)
14159 {
14160  CallParams callParams;
14161  GetBasicParams(callParams);
14162 
14163  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14164  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14165  PrintPointerList(allocationCount, pAllocations);
14166  fprintf(m_File, "\n");
14167  Flush();
14168 }
14169 
14170 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14171  VmaAllocation allocation,
14172  const void* pUserData)
14173 {
14174  CallParams callParams;
14175  GetBasicParams(callParams);
14176 
14177  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14178  UserDataString userDataStr(
14179  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14180  pUserData);
14181  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14182  allocation,
14183  userDataStr.GetString());
14184  Flush();
14185 }
14186 
14187 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14188  VmaAllocation allocation)
14189 {
14190  CallParams callParams;
14191  GetBasicParams(callParams);
14192 
14193  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14194  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14195  allocation);
14196  Flush();
14197 }
14198 
14199 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14200  VmaAllocation allocation)
14201 {
14202  CallParams callParams;
14203  GetBasicParams(callParams);
14204 
14205  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14206  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14207  allocation);
14208  Flush();
14209 }
14210 
14211 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14212  VmaAllocation allocation)
14213 {
14214  CallParams callParams;
14215  GetBasicParams(callParams);
14216 
14217  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14218  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14219  allocation);
14220  Flush();
14221 }
14222 
14223 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14224  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14225 {
14226  CallParams callParams;
14227  GetBasicParams(callParams);
14228 
14229  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14230  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14231  allocation,
14232  offset,
14233  size);
14234  Flush();
14235 }
14236 
14237 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14238  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14239 {
14240  CallParams callParams;
14241  GetBasicParams(callParams);
14242 
14243  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14244  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14245  allocation,
14246  offset,
14247  size);
14248  Flush();
14249 }
14250 
14251 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14252  const VkBufferCreateInfo& bufCreateInfo,
14253  const VmaAllocationCreateInfo& allocCreateInfo,
14254  VmaAllocation allocation)
14255 {
14256  CallParams callParams;
14257  GetBasicParams(callParams);
14258 
14259  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14260  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14261  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14262  bufCreateInfo.flags,
14263  bufCreateInfo.size,
14264  bufCreateInfo.usage,
14265  bufCreateInfo.sharingMode,
14266  allocCreateInfo.flags,
14267  allocCreateInfo.usage,
14268  allocCreateInfo.requiredFlags,
14269  allocCreateInfo.preferredFlags,
14270  allocCreateInfo.memoryTypeBits,
14271  allocCreateInfo.pool,
14272  allocation,
14273  userDataStr.GetString());
14274  Flush();
14275 }
14276 
14277 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14278  const VkImageCreateInfo& imageCreateInfo,
14279  const VmaAllocationCreateInfo& allocCreateInfo,
14280  VmaAllocation allocation)
14281 {
14282  CallParams callParams;
14283  GetBasicParams(callParams);
14284 
14285  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14286  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14287  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14288  imageCreateInfo.flags,
14289  imageCreateInfo.imageType,
14290  imageCreateInfo.format,
14291  imageCreateInfo.extent.width,
14292  imageCreateInfo.extent.height,
14293  imageCreateInfo.extent.depth,
14294  imageCreateInfo.mipLevels,
14295  imageCreateInfo.arrayLayers,
14296  imageCreateInfo.samples,
14297  imageCreateInfo.tiling,
14298  imageCreateInfo.usage,
14299  imageCreateInfo.sharingMode,
14300  imageCreateInfo.initialLayout,
14301  allocCreateInfo.flags,
14302  allocCreateInfo.usage,
14303  allocCreateInfo.requiredFlags,
14304  allocCreateInfo.preferredFlags,
14305  allocCreateInfo.memoryTypeBits,
14306  allocCreateInfo.pool,
14307  allocation,
14308  userDataStr.GetString());
14309  Flush();
14310 }
14311 
14312 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14313  VmaAllocation allocation)
14314 {
14315  CallParams callParams;
14316  GetBasicParams(callParams);
14317 
14318  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14319  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14320  allocation);
14321  Flush();
14322 }
14323 
14324 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14325  VmaAllocation allocation)
14326 {
14327  CallParams callParams;
14328  GetBasicParams(callParams);
14329 
14330  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14331  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14332  allocation);
14333  Flush();
14334 }
14335 
14336 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14337  VmaAllocation allocation)
14338 {
14339  CallParams callParams;
14340  GetBasicParams(callParams);
14341 
14342  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14343  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14344  allocation);
14345  Flush();
14346 }
14347 
14348 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14349  VmaAllocation allocation)
14350 {
14351  CallParams callParams;
14352  GetBasicParams(callParams);
14353 
14354  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14355  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14356  allocation);
14357  Flush();
14358 }
14359 
14360 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14361  VmaPool pool)
14362 {
14363  CallParams callParams;
14364  GetBasicParams(callParams);
14365 
14366  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14367  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14368  pool);
14369  Flush();
14370 }
14371 
14372 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14373  const VmaDefragmentationInfo2& info,
14375 {
14376  CallParams callParams;
14377  GetBasicParams(callParams);
14378 
14379  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14380  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14381  info.flags);
14382  PrintPointerList(info.allocationCount, info.pAllocations);
14383  fprintf(m_File, ",");
14384  PrintPointerList(info.poolCount, info.pPools);
14385  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14386  info.maxCpuBytesToMove,
14388  info.maxGpuBytesToMove,
14390  info.commandBuffer,
14391  ctx);
14392  Flush();
14393 }
14394 
14395 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14397 {
14398  CallParams callParams;
14399  GetBasicParams(callParams);
14400 
14401  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14402  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14403  ctx);
14404  Flush();
14405 }
14406 
14407 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14408  VmaPool pool,
14409  const char* name)
14410 {
14411  CallParams callParams;
14412  GetBasicParams(callParams);
14413 
14414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14415  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14416  pool, name != VMA_NULL ? name : "");
14417  Flush();
14418 }
14419 
14420 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14421 {
14422  if(pUserData != VMA_NULL)
14423  {
14424  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14425  {
14426  m_Str = (const char*)pUserData;
14427  }
14428  else
14429  {
14430  sprintf_s(m_PtrStr, "%p", pUserData);
14431  m_Str = m_PtrStr;
14432  }
14433  }
14434  else
14435  {
14436  m_Str = "";
14437  }
14438 }
14439 
14440 void VmaRecorder::WriteConfiguration(
14441  const VkPhysicalDeviceProperties& devProps,
14442  const VkPhysicalDeviceMemoryProperties& memProps,
14443  uint32_t vulkanApiVersion,
14444  bool dedicatedAllocationExtensionEnabled,
14445  bool bindMemory2ExtensionEnabled,
14446  bool memoryBudgetExtensionEnabled)
14447 {
14448  fprintf(m_File, "Config,Begin\n");
14449 
14450  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14451 
14452  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14453  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14454  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14455  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14456  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14457  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14458 
14459  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14460  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14461  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14462 
14463  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14464  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14465  {
14466  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14467  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14468  }
14469  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14470  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14471  {
14472  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14473  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14474  }
14475 
14476  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14477  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14478  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
14479 
14480  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14481  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14482  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14483  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14484  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14485  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14486  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14487  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14488  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14489 
14490  fprintf(m_File, "Config,End\n");
14491 }
14492 
14493 void VmaRecorder::GetBasicParams(CallParams& outParams)
14494 {
14495  outParams.threadId = GetCurrentThreadId();
14496 
14497  LARGE_INTEGER counter;
14498  QueryPerformanceCounter(&counter);
14499  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14500 }
14501 
14502 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14503 {
14504  if(count)
14505  {
14506  fprintf(m_File, "%p", pItems[0]);
14507  for(uint64_t i = 1; i < count; ++i)
14508  {
14509  fprintf(m_File, " %p", pItems[i]);
14510  }
14511  }
14512 }
14513 
14514 void VmaRecorder::Flush()
14515 {
14516  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14517  {
14518  fflush(m_File);
14519  }
14520 }
14521 
14522 #endif // #if VMA_RECORDING_ENABLED
14523 
14525 // VmaAllocationObjectAllocator
14526 
14527 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14528  m_Allocator(pAllocationCallbacks, 1024)
14529 {
14530 }
14531 
14532 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14533 {
14534  VmaMutexLock mutexLock(m_Mutex);
14535  return m_Allocator.Alloc();
14536 }
14537 
14538 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14539 {
14540  VmaMutexLock mutexLock(m_Mutex);
14541  m_Allocator.Free(hAlloc);
14542 }
14543 
14545 // VmaAllocator_T
14546 
14547 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14548  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14549  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
14550  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14551  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14552  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14553  m_hDevice(pCreateInfo->device),
14554  m_hInstance(pCreateInfo->instance),
14555  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14556  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14557  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14558  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14559  m_HeapSizeLimitMask(0),
14560  m_PreferredLargeHeapBlockSize(0),
14561  m_PhysicalDevice(pCreateInfo->physicalDevice),
14562  m_CurrentFrameIndex(0),
14563  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14564  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14565  m_NextPoolId(0)
14567  ,m_pRecorder(VMA_NULL)
14568 #endif
14569 {
14570  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14571  {
14572  m_UseKhrDedicatedAllocation = false;
14573  m_UseKhrBindMemory2 = false;
14574  }
14575 
14576  if(VMA_DEBUG_DETECT_CORRUPTION)
14577  {
14578  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14579  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14580  }
14581 
14582  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14583 
14584  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14585  {
14586 #if !(VMA_DEDICATED_ALLOCATION)
14588  {
14589  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14590  }
14591 #endif
14592 #if !(VMA_BIND_MEMORY2)
14593  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14594  {
14595  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14596  }
14597 #endif
14598  }
14599 #if !(VMA_MEMORY_BUDGET)
14600  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14601  {
14602  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14603  }
14604 #endif
14605 #if VMA_VULKAN_VERSION < 1001000
14606  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14607  {
14608  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14609  }
14610 #endif
14611 
14612  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14613  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14614  memset(&m_MemProps, 0, sizeof(m_MemProps));
14615 
14616  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14617  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14618  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14619 
14620  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14621  {
14622  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14623  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14624  }
14625 
14626  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14627 
14628  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14629  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14630 
14631  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14632  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14633  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14634  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14635 
14636  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14637  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14638 
14639  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14640  {
14641  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14642  {
14643  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14644  if(limit != VK_WHOLE_SIZE)
14645  {
14646  m_HeapSizeLimitMask |= 1u << heapIndex;
14647  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14648  {
14649  m_MemProps.memoryHeaps[heapIndex].size = limit;
14650  }
14651  }
14652  }
14653  }
14654 
14655  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14656  {
14657  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14658 
14659  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14660  this,
14661  VK_NULL_HANDLE, // hParentPool
14662  memTypeIndex,
14663  preferredBlockSize,
14664  0,
14665  SIZE_MAX,
14666  GetBufferImageGranularity(),
14667  pCreateInfo->frameInUseCount,
14668  false, // explicitBlockSize
14669  false); // linearAlgorithm
14670  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14671  // becase minBlockCount is 0.
14672  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14673 
14674  }
14675 }
14676 
14677 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14678 {
14679  VkResult res = VK_SUCCESS;
14680 
14681  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14682  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14683  {
14684 #if VMA_RECORDING_ENABLED
14685  m_pRecorder = vma_new(this, VmaRecorder)();
14686  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14687  if(res != VK_SUCCESS)
14688  {
14689  return res;
14690  }
14691  m_pRecorder->WriteConfiguration(
14692  m_PhysicalDeviceProperties,
14693  m_MemProps,
14694  m_VulkanApiVersion,
14695  m_UseKhrDedicatedAllocation,
14696  m_UseKhrBindMemory2,
14697  m_UseExtMemoryBudget);
14698  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14699 #else
14700  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14701  return VK_ERROR_FEATURE_NOT_PRESENT;
14702 #endif
14703  }
14704 
14705 #if VMA_MEMORY_BUDGET
14706  if(m_UseExtMemoryBudget)
14707  {
14708  UpdateVulkanBudget();
14709  }
14710 #endif // #if VMA_MEMORY_BUDGET
14711 
14712  return res;
14713 }
14714 
14715 VmaAllocator_T::~VmaAllocator_T()
14716 {
14717 #if VMA_RECORDING_ENABLED
14718  if(m_pRecorder != VMA_NULL)
14719  {
14720  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14721  vma_delete(this, m_pRecorder);
14722  }
14723 #endif
14724 
14725  VMA_ASSERT(m_Pools.empty());
14726 
14727  for(size_t i = GetMemoryTypeCount(); i--; )
14728  {
14729  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14730  {
14731  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14732  }
14733 
14734  vma_delete(this, m_pDedicatedAllocations[i]);
14735  vma_delete(this, m_pBlockVectors[i]);
14736  }
14737 }
14738 
14739 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14740 {
14741 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14742  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14743  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14744  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14745  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14746  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14747  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14748  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14749  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14750  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14751  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14752  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14753  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14754  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14755  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14756  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14757  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14758  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14759 #if VMA_VULKAN_VERSION >= 1001000
14760  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14761  {
14762  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14763  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14764  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
14765  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14766  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
14767  m_VulkanFunctions.vkBindBufferMemory2KHR =
14768  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
14769  m_VulkanFunctions.vkBindImageMemory2KHR =
14770  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
14771  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14772  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
14773  }
14774 #endif
14775 #if VMA_DEDICATED_ALLOCATION
14776  if(m_UseKhrDedicatedAllocation)
14777  {
14778  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14779  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14780  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14781  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14782  }
14783 #endif
14784 #if VMA_BIND_MEMORY2
14785  if(m_UseKhrBindMemory2)
14786  {
14787  m_VulkanFunctions.vkBindBufferMemory2KHR =
14788  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14789  m_VulkanFunctions.vkBindImageMemory2KHR =
14790  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14791  }
14792 #endif // #if VMA_BIND_MEMORY2
14793 #if VMA_MEMORY_BUDGET
14794  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14795  {
14796  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14797  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14798  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
14799  }
14800 #endif // #if VMA_MEMORY_BUDGET
14801 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14802 
14803 #define VMA_COPY_IF_NOT_NULL(funcName) \
14804  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14805 
14806  if(pVulkanFunctions != VMA_NULL)
14807  {
14808  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14809  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14810  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14811  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14812  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14813  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14814  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14815  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14816  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14817  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14818  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14819  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14820  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14821  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14822  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14823  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14824  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14825 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14826  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14827  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14828 #endif
14829 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14830  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14831  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14832 #endif
14833 #if VMA_MEMORY_BUDGET
14834  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14835 #endif
14836  }
14837 
14838 #undef VMA_COPY_IF_NOT_NULL
14839 
14840  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14841  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14842  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14843  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14844  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14845  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14846  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14847  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14848  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14849  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14850  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14851  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14852  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14853  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14854  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14855  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14856  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14857  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14858  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14859 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14860  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14861  {
14862  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14863  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14864  }
14865 #endif
14866 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14867  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14868  {
14869  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14870  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14871  }
14872 #endif
14873 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14874  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14875  {
14876  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14877  }
14878 #endif
14879 }
14880 
14881 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14882 {
14883  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14884  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14885  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14886  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14887 }
14888 
14889 VkResult VmaAllocator_T::AllocateMemoryOfType(
14890  VkDeviceSize size,
14891  VkDeviceSize alignment,
14892  bool dedicatedAllocation,
14893  VkBuffer dedicatedBuffer,
14894  VkImage dedicatedImage,
14895  const VmaAllocationCreateInfo& createInfo,
14896  uint32_t memTypeIndex,
14897  VmaSuballocationType suballocType,
14898  size_t allocationCount,
14899  VmaAllocation* pAllocations)
14900 {
14901  VMA_ASSERT(pAllocations != VMA_NULL);
14902  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14903 
14904  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14905 
14906  // If memory type is not HOST_VISIBLE, disable MAPPED.
14907  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14908  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14909  {
14910  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14911  }
14912  // If memory is lazily allocated, it should be always dedicated.
14913  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
14914  {
14916  }
14917 
14918  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14919  VMA_ASSERT(blockVector);
14920 
14921  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14922  bool preferDedicatedMemory =
14923  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14924  dedicatedAllocation ||
14925  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14926  size > preferredBlockSize / 2;
14927 
14928  if(preferDedicatedMemory &&
14929  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14930  finalCreateInfo.pool == VK_NULL_HANDLE)
14931  {
14933  }
14934 
14935  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14936  {
14937  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14938  {
14939  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14940  }
14941  else
14942  {
14943  return AllocateDedicatedMemory(
14944  size,
14945  suballocType,
14946  memTypeIndex,
14947  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14948  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14949  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14950  finalCreateInfo.pUserData,
14951  dedicatedBuffer,
14952  dedicatedImage,
14953  allocationCount,
14954  pAllocations);
14955  }
14956  }
14957  else
14958  {
14959  VkResult res = blockVector->Allocate(
14960  m_CurrentFrameIndex.load(),
14961  size,
14962  alignment,
14963  finalCreateInfo,
14964  suballocType,
14965  allocationCount,
14966  pAllocations);
14967  if(res == VK_SUCCESS)
14968  {
14969  return res;
14970  }
14971 
14972  // 5. Try dedicated memory.
14973  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14974  {
14975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14976  }
14977  else
14978  {
14979  res = AllocateDedicatedMemory(
14980  size,
14981  suballocType,
14982  memTypeIndex,
14983  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14984  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14985  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14986  finalCreateInfo.pUserData,
14987  dedicatedBuffer,
14988  dedicatedImage,
14989  allocationCount,
14990  pAllocations);
14991  if(res == VK_SUCCESS)
14992  {
14993  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14994  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14995  return VK_SUCCESS;
14996  }
14997  else
14998  {
14999  // Everything failed: Return error code.
15000  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15001  return res;
15002  }
15003  }
15004  }
15005 }
15006 
15007 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15008  VkDeviceSize size,
15009  VmaSuballocationType suballocType,
15010  uint32_t memTypeIndex,
15011  bool withinBudget,
15012  bool map,
15013  bool isUserDataString,
15014  void* pUserData,
15015  VkBuffer dedicatedBuffer,
15016  VkImage dedicatedImage,
15017  size_t allocationCount,
15018  VmaAllocation* pAllocations)
15019 {
15020  VMA_ASSERT(allocationCount > 0 && pAllocations);
15021 
15022  if(withinBudget)
15023  {
15024  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15025  VmaBudget heapBudget = {};
15026  GetBudget(&heapBudget, heapIndex, 1);
15027  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15028  {
15029  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15030  }
15031  }
15032 
15033  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15034  allocInfo.memoryTypeIndex = memTypeIndex;
15035  allocInfo.allocationSize = size;
15036 
15037 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15038  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15039  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15040  {
15041  if(dedicatedBuffer != VK_NULL_HANDLE)
15042  {
15043  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15044  dedicatedAllocInfo.buffer = dedicatedBuffer;
15045  allocInfo.pNext = &dedicatedAllocInfo;
15046  }
15047  else if(dedicatedImage != VK_NULL_HANDLE)
15048  {
15049  dedicatedAllocInfo.image = dedicatedImage;
15050  allocInfo.pNext = &dedicatedAllocInfo;
15051  }
15052  }
15053 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15054 
15055  size_t allocIndex;
15056  VkResult res = VK_SUCCESS;
15057  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15058  {
15059  res = AllocateDedicatedMemoryPage(
15060  size,
15061  suballocType,
15062  memTypeIndex,
15063  allocInfo,
15064  map,
15065  isUserDataString,
15066  pUserData,
15067  pAllocations + allocIndex);
15068  if(res != VK_SUCCESS)
15069  {
15070  break;
15071  }
15072  }
15073 
15074  if(res == VK_SUCCESS)
15075  {
15076  // Register them in m_pDedicatedAllocations.
15077  {
15078  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15079  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15080  VMA_ASSERT(pDedicatedAllocations);
15081  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15082  {
15083  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15084  }
15085  }
15086 
15087  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15088  }
15089  else
15090  {
15091  // Free all already created allocations.
15092  while(allocIndex--)
15093  {
15094  VmaAllocation currAlloc = pAllocations[allocIndex];
15095  VkDeviceMemory hMemory = currAlloc->GetMemory();
15096 
15097  /*
15098  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15099  before vkFreeMemory.
15100 
15101  if(currAlloc->GetMappedData() != VMA_NULL)
15102  {
15103  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15104  }
15105  */
15106 
15107  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15108  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15109  currAlloc->SetUserData(this, VMA_NULL);
15110  currAlloc->Dtor();
15111  m_AllocationObjectAllocator.Free(currAlloc);
15112  }
15113 
15114  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15115  }
15116 
15117  return res;
15118 }
15119 
15120 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15121  VkDeviceSize size,
15122  VmaSuballocationType suballocType,
15123  uint32_t memTypeIndex,
15124  const VkMemoryAllocateInfo& allocInfo,
15125  bool map,
15126  bool isUserDataString,
15127  void* pUserData,
15128  VmaAllocation* pAllocation)
15129 {
15130  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15131  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15132  if(res < 0)
15133  {
15134  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15135  return res;
15136  }
15137 
15138  void* pMappedData = VMA_NULL;
15139  if(map)
15140  {
15141  res = (*m_VulkanFunctions.vkMapMemory)(
15142  m_hDevice,
15143  hMemory,
15144  0,
15145  VK_WHOLE_SIZE,
15146  0,
15147  &pMappedData);
15148  if(res < 0)
15149  {
15150  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15151  FreeVulkanMemory(memTypeIndex, size, hMemory);
15152  return res;
15153  }
15154  }
15155 
15156  *pAllocation = m_AllocationObjectAllocator.Allocate();
15157  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
15158  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15159  (*pAllocation)->SetUserData(this, pUserData);
15160  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15161  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15162  {
15163  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15164  }
15165 
15166  return VK_SUCCESS;
15167 }
15168 
15169 void VmaAllocator_T::GetBufferMemoryRequirements(
15170  VkBuffer hBuffer,
15171  VkMemoryRequirements& memReq,
15172  bool& requiresDedicatedAllocation,
15173  bool& prefersDedicatedAllocation) const
15174 {
15175 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15176  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15177  {
15178  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15179  memReqInfo.buffer = hBuffer;
15180 
15181  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15182 
15183  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15184  memReq2.pNext = &memDedicatedReq;
15185 
15186  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15187 
15188  memReq = memReq2.memoryRequirements;
15189  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15190  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15191  }
15192  else
15193 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15194  {
15195  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15196  requiresDedicatedAllocation = false;
15197  prefersDedicatedAllocation = false;
15198  }
15199 }
15200 
15201 void VmaAllocator_T::GetImageMemoryRequirements(
15202  VkImage hImage,
15203  VkMemoryRequirements& memReq,
15204  bool& requiresDedicatedAllocation,
15205  bool& prefersDedicatedAllocation) const
15206 {
15207 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15208  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15209  {
15210  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15211  memReqInfo.image = hImage;
15212 
15213  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15214 
15215  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15216  memReq2.pNext = &memDedicatedReq;
15217 
15218  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15219 
15220  memReq = memReq2.memoryRequirements;
15221  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15222  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15223  }
15224  else
15225 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15226  {
15227  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15228  requiresDedicatedAllocation = false;
15229  prefersDedicatedAllocation = false;
15230  }
15231 }
15232 
15233 VkResult VmaAllocator_T::AllocateMemory(
15234  const VkMemoryRequirements& vkMemReq,
15235  bool requiresDedicatedAllocation,
15236  bool prefersDedicatedAllocation,
15237  VkBuffer dedicatedBuffer,
15238  VkImage dedicatedImage,
15239  const VmaAllocationCreateInfo& createInfo,
15240  VmaSuballocationType suballocType,
15241  size_t allocationCount,
15242  VmaAllocation* pAllocations)
15243 {
15244  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15245 
15246  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15247 
15248  if(vkMemReq.size == 0)
15249  {
15250  return VK_ERROR_VALIDATION_FAILED_EXT;
15251  }
15252  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15253  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15254  {
15255  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15256  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15257  }
15258  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15260  {
15261  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15262  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15263  }
15264  if(requiresDedicatedAllocation)
15265  {
15266  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15267  {
15268  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15269  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15270  }
15271  if(createInfo.pool != VK_NULL_HANDLE)
15272  {
15273  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15274  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15275  }
15276  }
15277  if((createInfo.pool != VK_NULL_HANDLE) &&
15278  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15279  {
15280  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15282  }
15283 
15284  if(createInfo.pool != VK_NULL_HANDLE)
15285  {
15286  const VkDeviceSize alignmentForPool = VMA_MAX(
15287  vkMemReq.alignment,
15288  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15289 
15290  VmaAllocationCreateInfo createInfoForPool = createInfo;
15291  // If memory type is not HOST_VISIBLE, disable MAPPED.
15292  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15293  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15294  {
15295  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15296  }
15297 
15298  return createInfo.pool->m_BlockVector.Allocate(
15299  m_CurrentFrameIndex.load(),
15300  vkMemReq.size,
15301  alignmentForPool,
15302  createInfoForPool,
15303  suballocType,
15304  allocationCount,
15305  pAllocations);
15306  }
15307  else
15308  {
15309  // Bit mask of memory Vulkan types acceptable for this allocation.
15310  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15311  uint32_t memTypeIndex = UINT32_MAX;
15312  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15313  if(res == VK_SUCCESS)
15314  {
15315  VkDeviceSize alignmentForMemType = VMA_MAX(
15316  vkMemReq.alignment,
15317  GetMemoryTypeMinAlignment(memTypeIndex));
15318 
15319  res = AllocateMemoryOfType(
15320  vkMemReq.size,
15321  alignmentForMemType,
15322  requiresDedicatedAllocation || prefersDedicatedAllocation,
15323  dedicatedBuffer,
15324  dedicatedImage,
15325  createInfo,
15326  memTypeIndex,
15327  suballocType,
15328  allocationCount,
15329  pAllocations);
15330  // Succeeded on first try.
15331  if(res == VK_SUCCESS)
15332  {
15333  return res;
15334  }
15335  // Allocation from this memory type failed. Try other compatible memory types.
15336  else
15337  {
15338  for(;;)
15339  {
15340  // Remove old memTypeIndex from list of possibilities.
15341  memoryTypeBits &= ~(1u << memTypeIndex);
15342  // Find alternative memTypeIndex.
15343  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15344  if(res == VK_SUCCESS)
15345  {
15346  alignmentForMemType = VMA_MAX(
15347  vkMemReq.alignment,
15348  GetMemoryTypeMinAlignment(memTypeIndex));
15349 
15350  res = AllocateMemoryOfType(
15351  vkMemReq.size,
15352  alignmentForMemType,
15353  requiresDedicatedAllocation || prefersDedicatedAllocation,
15354  dedicatedBuffer,
15355  dedicatedImage,
15356  createInfo,
15357  memTypeIndex,
15358  suballocType,
15359  allocationCount,
15360  pAllocations);
15361  // Allocation from this alternative memory type succeeded.
15362  if(res == VK_SUCCESS)
15363  {
15364  return res;
15365  }
15366  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15367  }
15368  // No other matching memory type index could be found.
15369  else
15370  {
15371  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15372  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15373  }
15374  }
15375  }
15376  }
15377  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15378  else
15379  return res;
15380  }
15381 }
15382 
15383 void VmaAllocator_T::FreeMemory(
15384  size_t allocationCount,
15385  const VmaAllocation* pAllocations)
15386 {
15387  VMA_ASSERT(pAllocations);
15388 
15389  for(size_t allocIndex = allocationCount; allocIndex--; )
15390  {
15391  VmaAllocation allocation = pAllocations[allocIndex];
15392 
15393  if(allocation != VK_NULL_HANDLE)
15394  {
15395  if(TouchAllocation(allocation))
15396  {
15397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15398  {
15399  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15400  }
15401 
15402  switch(allocation->GetType())
15403  {
15404  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15405  {
15406  VmaBlockVector* pBlockVector = VMA_NULL;
15407  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15408  if(hPool != VK_NULL_HANDLE)
15409  {
15410  pBlockVector = &hPool->m_BlockVector;
15411  }
15412  else
15413  {
15414  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15415  pBlockVector = m_pBlockVectors[memTypeIndex];
15416  }
15417  pBlockVector->Free(allocation);
15418  }
15419  break;
15420  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15421  FreeDedicatedMemory(allocation);
15422  break;
15423  default:
15424  VMA_ASSERT(0);
15425  }
15426  }
15427 
15428  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
15429  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15430  allocation->SetUserData(this, VMA_NULL);
15431  allocation->Dtor();
15432  m_AllocationObjectAllocator.Free(allocation);
15433  }
15434  }
15435 }
15436 
15437 VkResult VmaAllocator_T::ResizeAllocation(
15438  const VmaAllocation alloc,
15439  VkDeviceSize newSize)
15440 {
15441  // This function is deprecated and so it does nothing. It's left for backward compatibility.
15442  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
15443  {
15444  return VK_ERROR_VALIDATION_FAILED_EXT;
15445  }
15446  if(newSize == alloc->GetSize())
15447  {
15448  return VK_SUCCESS;
15449  }
15450  return VK_ERROR_OUT_OF_POOL_MEMORY;
15451 }
15452 
15453 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15454 {
15455  // Initialize.
15456  InitStatInfo(pStats->total);
15457  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15458  InitStatInfo(pStats->memoryType[i]);
15459  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15460  InitStatInfo(pStats->memoryHeap[i]);
15461 
15462  // Process default pools.
15463  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15464  {
15465  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15466  VMA_ASSERT(pBlockVector);
15467  pBlockVector->AddStats(pStats);
15468  }
15469 
15470  // Process custom pools.
15471  {
15472  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15473  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15474  {
15475  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15476  }
15477  }
15478 
15479  // Process dedicated allocations.
15480  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15481  {
15482  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15483  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15484  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15485  VMA_ASSERT(pDedicatedAllocVector);
15486  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15487  {
15488  VmaStatInfo allocationStatInfo;
15489  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15490  VmaAddStatInfo(pStats->total, allocationStatInfo);
15491  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15492  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15493  }
15494  }
15495 
15496  // Postprocess.
15497  VmaPostprocessCalcStatInfo(pStats->total);
15498  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15499  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15500  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15501  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15502 }
15503 
15504 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
15505 {
15506 #if VMA_MEMORY_BUDGET
15507  if(m_UseExtMemoryBudget)
15508  {
15509  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15510  {
15511  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15512  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15513  {
15514  const uint32_t heapIndex = firstHeap + i;
15515 
15516  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15517  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15518 
15519  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15520  {
15521  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
15522  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15523  }
15524  else
15525  {
15526  outBudget->usage = 0;
15527  }
15528 
15529  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15530  outBudget->budget = VMA_MIN(
15531  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15532  }
15533  }
15534  else
15535  {
15536  UpdateVulkanBudget(); // Outside of mutex lock
15537  GetBudget(outBudget, firstHeap, heapCount); // Recursion
15538  }
15539  }
15540  else
15541 #endif
15542  {
15543  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15544  {
15545  const uint32_t heapIndex = firstHeap + i;
15546 
15547  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15548  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15549 
15550  outBudget->usage = outBudget->blockBytes;
15551  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15552  }
15553  }
15554 }
15555 
15556 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15557 
15558 VkResult VmaAllocator_T::DefragmentationBegin(
15559  const VmaDefragmentationInfo2& info,
15560  VmaDefragmentationStats* pStats,
15561  VmaDefragmentationContext* pContext)
15562 {
15563  if(info.pAllocationsChanged != VMA_NULL)
15564  {
15565  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15566  }
15567 
15568  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15569  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15570 
15571  (*pContext)->AddPools(info.poolCount, info.pPools);
15572  (*pContext)->AddAllocations(
15574 
15575  VkResult res = (*pContext)->Defragment(
15578  info.commandBuffer, pStats);
15579 
15580  if(res != VK_NOT_READY)
15581  {
15582  vma_delete(this, *pContext);
15583  *pContext = VMA_NULL;
15584  }
15585 
15586  return res;
15587 }
15588 
15589 VkResult VmaAllocator_T::DefragmentationEnd(
15590  VmaDefragmentationContext context)
15591 {
15592  vma_delete(this, context);
15593  return VK_SUCCESS;
15594 }
15595 
15596 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15597 {
15598  if(hAllocation->CanBecomeLost())
15599  {
15600  /*
15601  Warning: This is a carefully designed algorithm.
15602  Do not modify unless you really know what you're doing :)
15603  */
15604  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15605  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15606  for(;;)
15607  {
15608  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15609  {
15610  pAllocationInfo->memoryType = UINT32_MAX;
15611  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15612  pAllocationInfo->offset = 0;
15613  pAllocationInfo->size = hAllocation->GetSize();
15614  pAllocationInfo->pMappedData = VMA_NULL;
15615  pAllocationInfo->pUserData = hAllocation->GetUserData();
15616  return;
15617  }
15618  else if(localLastUseFrameIndex == localCurrFrameIndex)
15619  {
15620  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15621  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15622  pAllocationInfo->offset = hAllocation->GetOffset();
15623  pAllocationInfo->size = hAllocation->GetSize();
15624  pAllocationInfo->pMappedData = VMA_NULL;
15625  pAllocationInfo->pUserData = hAllocation->GetUserData();
15626  return;
15627  }
15628  else // Last use time earlier than current time.
15629  {
15630  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15631  {
15632  localLastUseFrameIndex = localCurrFrameIndex;
15633  }
15634  }
15635  }
15636  }
15637  else
15638  {
15639 #if VMA_STATS_STRING_ENABLED
15640  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15641  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15642  for(;;)
15643  {
15644  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15645  if(localLastUseFrameIndex == localCurrFrameIndex)
15646  {
15647  break;
15648  }
15649  else // Last use time earlier than current time.
15650  {
15651  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15652  {
15653  localLastUseFrameIndex = localCurrFrameIndex;
15654  }
15655  }
15656  }
15657 #endif
15658 
15659  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15660  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15661  pAllocationInfo->offset = hAllocation->GetOffset();
15662  pAllocationInfo->size = hAllocation->GetSize();
15663  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15664  pAllocationInfo->pUserData = hAllocation->GetUserData();
15665  }
15666 }
15667 
15668 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15669 {
15670  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15671  if(hAllocation->CanBecomeLost())
15672  {
15673  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15674  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15675  for(;;)
15676  {
15677  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15678  {
15679  return false;
15680  }
15681  else if(localLastUseFrameIndex == localCurrFrameIndex)
15682  {
15683  return true;
15684  }
15685  else // Last use time earlier than current time.
15686  {
15687  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15688  {
15689  localLastUseFrameIndex = localCurrFrameIndex;
15690  }
15691  }
15692  }
15693  }
15694  else
15695  {
15696 #if VMA_STATS_STRING_ENABLED
15697  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15698  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15699  for(;;)
15700  {
15701  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15702  if(localLastUseFrameIndex == localCurrFrameIndex)
15703  {
15704  break;
15705  }
15706  else // Last use time earlier than current time.
15707  {
15708  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15709  {
15710  localLastUseFrameIndex = localCurrFrameIndex;
15711  }
15712  }
15713  }
15714 #endif
15715 
15716  return true;
15717  }
15718 }
15719 
15720 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15721 {
15722  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15723 
15724  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15725 
15726  if(newCreateInfo.maxBlockCount == 0)
15727  {
15728  newCreateInfo.maxBlockCount = SIZE_MAX;
15729  }
15730  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15731  {
15732  return VK_ERROR_INITIALIZATION_FAILED;
15733  }
15734 
15735  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15736 
15737  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15738 
15739  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15740  if(res != VK_SUCCESS)
15741  {
15742  vma_delete(this, *pPool);
15743  *pPool = VMA_NULL;
15744  return res;
15745  }
15746 
15747  // Add to m_Pools.
15748  {
15749  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15750  (*pPool)->SetId(m_NextPoolId++);
15751  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15752  }
15753 
15754  return VK_SUCCESS;
15755 }
15756 
15757 void VmaAllocator_T::DestroyPool(VmaPool pool)
15758 {
15759  // Remove from m_Pools.
15760  {
15761  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15762  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15763  VMA_ASSERT(success && "Pool not found in Allocator.");
15764  }
15765 
15766  vma_delete(this, pool);
15767 }
15768 
15769 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15770 {
15771  pool->m_BlockVector.GetPoolStats(pPoolStats);
15772 }
15773 
15774 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15775 {
15776  m_CurrentFrameIndex.store(frameIndex);
15777 
15778 #if VMA_MEMORY_BUDGET
15779  if(m_UseExtMemoryBudget)
15780  {
15781  UpdateVulkanBudget();
15782  }
15783 #endif // #if VMA_MEMORY_BUDGET
15784 }
15785 
15786 void VmaAllocator_T::MakePoolAllocationsLost(
15787  VmaPool hPool,
15788  size_t* pLostAllocationCount)
15789 {
15790  hPool->m_BlockVector.MakePoolAllocationsLost(
15791  m_CurrentFrameIndex.load(),
15792  pLostAllocationCount);
15793 }
15794 
15795 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15796 {
15797  return hPool->m_BlockVector.CheckCorruption();
15798 }
15799 
15800 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15801 {
15802  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15803 
15804  // Process default pools.
15805  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15806  {
15807  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15808  {
15809  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15810  VMA_ASSERT(pBlockVector);
15811  VkResult localRes = pBlockVector->CheckCorruption();
15812  switch(localRes)
15813  {
15814  case VK_ERROR_FEATURE_NOT_PRESENT:
15815  break;
15816  case VK_SUCCESS:
15817  finalRes = VK_SUCCESS;
15818  break;
15819  default:
15820  return localRes;
15821  }
15822  }
15823  }
15824 
15825  // Process custom pools.
15826  {
15827  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15828  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15829  {
15830  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15831  {
15832  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15833  switch(localRes)
15834  {
15835  case VK_ERROR_FEATURE_NOT_PRESENT:
15836  break;
15837  case VK_SUCCESS:
15838  finalRes = VK_SUCCESS;
15839  break;
15840  default:
15841  return localRes;
15842  }
15843  }
15844  }
15845  }
15846 
15847  return finalRes;
15848 }
15849 
15850 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15851 {
15852  *pAllocation = m_AllocationObjectAllocator.Allocate();
15853  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15854  (*pAllocation)->InitLost();
15855 }
15856 
15857 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15858 {
15859  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15860 
15861  // HeapSizeLimit is in effect for this heap.
15862  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15863  {
15864  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15865  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15866  for(;;)
15867  {
15868  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15869  if(blockBytesAfterAllocation > heapSize)
15870  {
15871  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15872  }
15873  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15874  {
15875  break;
15876  }
15877  }
15878  }
15879  else
15880  {
15881  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15882  }
15883 
15884  // VULKAN CALL vkAllocateMemory.
15885  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15886 
15887  if(res == VK_SUCCESS)
15888  {
15889 #if VMA_MEMORY_BUDGET
15890  ++m_Budget.m_OperationsSinceBudgetFetch;
15891 #endif
15892 
15893  // Informative callback.
15894  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15895  {
15896  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15897  }
15898  }
15899  else
15900  {
15901  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15902  }
15903 
15904  return res;
15905 }
15906 
15907 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15908 {
15909  // Informative callback.
15910  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15911  {
15912  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15913  }
15914 
15915  // VULKAN CALL vkFreeMemory.
15916  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15917 
15918  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
15919 }
15920 
15921 VkResult VmaAllocator_T::BindVulkanBuffer(
15922  VkDeviceMemory memory,
15923  VkDeviceSize memoryOffset,
15924  VkBuffer buffer,
15925  const void* pNext)
15926 {
15927  if(pNext != VMA_NULL)
15928  {
15929 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15930  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15931  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15932  {
15933  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15934  bindBufferMemoryInfo.pNext = pNext;
15935  bindBufferMemoryInfo.buffer = buffer;
15936  bindBufferMemoryInfo.memory = memory;
15937  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15938  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15939  }
15940  else
15941 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15942  {
15943  return VK_ERROR_EXTENSION_NOT_PRESENT;
15944  }
15945  }
15946  else
15947  {
15948  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15949  }
15950 }
15951 
15952 VkResult VmaAllocator_T::BindVulkanImage(
15953  VkDeviceMemory memory,
15954  VkDeviceSize memoryOffset,
15955  VkImage image,
15956  const void* pNext)
15957 {
15958  if(pNext != VMA_NULL)
15959  {
15960 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15961  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15962  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15963  {
15964  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15965  bindBufferMemoryInfo.pNext = pNext;
15966  bindBufferMemoryInfo.image = image;
15967  bindBufferMemoryInfo.memory = memory;
15968  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15969  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15970  }
15971  else
15972 #endif // #if VMA_BIND_MEMORY2
15973  {
15974  return VK_ERROR_EXTENSION_NOT_PRESENT;
15975  }
15976  }
15977  else
15978  {
15979  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15980  }
15981 }
15982 
15983 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15984 {
15985  if(hAllocation->CanBecomeLost())
15986  {
15987  return VK_ERROR_MEMORY_MAP_FAILED;
15988  }
15989 
15990  switch(hAllocation->GetType())
15991  {
15992  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15993  {
15994  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15995  char *pBytes = VMA_NULL;
15996  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15997  if(res == VK_SUCCESS)
15998  {
15999  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16000  hAllocation->BlockAllocMap();
16001  }
16002  return res;
16003  }
16004  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16005  return hAllocation->DedicatedAllocMap(this, ppData);
16006  default:
16007  VMA_ASSERT(0);
16008  return VK_ERROR_MEMORY_MAP_FAILED;
16009  }
16010 }
16011 
16012 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16013 {
16014  switch(hAllocation->GetType())
16015  {
16016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16017  {
16018  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16019  hAllocation->BlockAllocUnmap();
16020  pBlock->Unmap(this, 1);
16021  }
16022  break;
16023  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16024  hAllocation->DedicatedAllocUnmap(this);
16025  break;
16026  default:
16027  VMA_ASSERT(0);
16028  }
16029 }
16030 
16031 VkResult VmaAllocator_T::BindBufferMemory(
16032  VmaAllocation hAllocation,
16033  VkDeviceSize allocationLocalOffset,
16034  VkBuffer hBuffer,
16035  const void* pNext)
16036 {
16037  VkResult res = VK_SUCCESS;
16038  switch(hAllocation->GetType())
16039  {
16040  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16041  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16042  break;
16043  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16044  {
16045  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16046  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16047  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16048  break;
16049  }
16050  default:
16051  VMA_ASSERT(0);
16052  }
16053  return res;
16054 }
16055 
16056 VkResult VmaAllocator_T::BindImageMemory(
16057  VmaAllocation hAllocation,
16058  VkDeviceSize allocationLocalOffset,
16059  VkImage hImage,
16060  const void* pNext)
16061 {
16062  VkResult res = VK_SUCCESS;
16063  switch(hAllocation->GetType())
16064  {
16065  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16066  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16067  break;
16068  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16069  {
16070  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16071  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16072  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16073  break;
16074  }
16075  default:
16076  VMA_ASSERT(0);
16077  }
16078  return res;
16079 }
16080 
16081 void VmaAllocator_T::FlushOrInvalidateAllocation(
16082  VmaAllocation hAllocation,
16083  VkDeviceSize offset, VkDeviceSize size,
16084  VMA_CACHE_OPERATION op)
16085 {
16086  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16087  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16088  {
16089  const VkDeviceSize allocationSize = hAllocation->GetSize();
16090  VMA_ASSERT(offset <= allocationSize);
16091 
16092  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16093 
16094  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16095  memRange.memory = hAllocation->GetMemory();
16096 
16097  switch(hAllocation->GetType())
16098  {
16099  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16100  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16101  if(size == VK_WHOLE_SIZE)
16102  {
16103  memRange.size = allocationSize - memRange.offset;
16104  }
16105  else
16106  {
16107  VMA_ASSERT(offset + size <= allocationSize);
16108  memRange.size = VMA_MIN(
16109  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16110  allocationSize - memRange.offset);
16111  }
16112  break;
16113 
16114  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16115  {
16116  // 1. Still within this allocation.
16117  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16118  if(size == VK_WHOLE_SIZE)
16119  {
16120  size = allocationSize - offset;
16121  }
16122  else
16123  {
16124  VMA_ASSERT(offset + size <= allocationSize);
16125  }
16126  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16127 
16128  // 2. Adjust to whole block.
16129  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16130  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16131  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16132  memRange.offset += allocationOffset;
16133  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16134 
16135  break;
16136  }
16137 
16138  default:
16139  VMA_ASSERT(0);
16140  }
16141 
16142  switch(op)
16143  {
16144  case VMA_CACHE_FLUSH:
16145  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16146  break;
16147  case VMA_CACHE_INVALIDATE:
16148  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16149  break;
16150  default:
16151  VMA_ASSERT(0);
16152  }
16153  }
16154  // else: Just ignore this call.
16155 }
16156 
16157 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16158 {
16159  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16160 
16161  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16162  {
16163  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16164  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16165  VMA_ASSERT(pDedicatedAllocations);
16166  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16167  VMA_ASSERT(success);
16168  }
16169 
16170  VkDeviceMemory hMemory = allocation->GetMemory();
16171 
16172  /*
16173  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16174  before vkFreeMemory.
16175 
16176  if(allocation->GetMappedData() != VMA_NULL)
16177  {
16178  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16179  }
16180  */
16181 
16182  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16183 
16184  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16185 }
16186 
16187 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16188 {
16189  VkBufferCreateInfo dummyBufCreateInfo;
16190  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16191 
16192  uint32_t memoryTypeBits = 0;
16193 
16194  // Create buffer.
16195  VkBuffer buf = VK_NULL_HANDLE;
16196  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16197  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16198  if(res == VK_SUCCESS)
16199  {
16200  // Query for supported memory types.
16201  VkMemoryRequirements memReq;
16202  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16203  memoryTypeBits = memReq.memoryTypeBits;
16204 
16205  // Destroy buffer.
16206  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16207  }
16208 
16209  return memoryTypeBits;
16210 }
16211 
16212 #if VMA_MEMORY_BUDGET
16213 
16214 void VmaAllocator_T::UpdateVulkanBudget()
16215 {
16216  VMA_ASSERT(m_UseExtMemoryBudget);
16217 
16218  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16219 
16220  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16221  memProps.pNext = &budgetProps;
16222 
16223  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16224 
16225  {
16226  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16227 
16228  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16229  {
16230  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16231  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16232  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16233  }
16234  m_Budget.m_OperationsSinceBudgetFetch = 0;
16235  }
16236 }
16237 
16238 #endif // #if VMA_MEMORY_BUDGET
16239 
16240 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16241 {
16242  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16243  !hAllocation->CanBecomeLost() &&
16244  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16245  {
16246  void* pData = VMA_NULL;
16247  VkResult res = Map(hAllocation, &pData);
16248  if(res == VK_SUCCESS)
16249  {
16250  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16251  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16252  Unmap(hAllocation);
16253  }
16254  else
16255  {
16256  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16257  }
16258  }
16259 }
16260 
16261 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16262 {
16263  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16264  if(memoryTypeBits == UINT32_MAX)
16265  {
16266  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16267  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16268  }
16269  return memoryTypeBits;
16270 }
16271 
16272 #if VMA_STATS_STRING_ENABLED
16273 
16274 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16275 {
16276  bool dedicatedAllocationsStarted = false;
16277  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16278  {
16279  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16280  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16281  VMA_ASSERT(pDedicatedAllocVector);
16282  if(pDedicatedAllocVector->empty() == false)
16283  {
16284  if(dedicatedAllocationsStarted == false)
16285  {
16286  dedicatedAllocationsStarted = true;
16287  json.WriteString("DedicatedAllocations");
16288  json.BeginObject();
16289  }
16290 
16291  json.BeginString("Type ");
16292  json.ContinueString(memTypeIndex);
16293  json.EndString();
16294 
16295  json.BeginArray();
16296 
16297  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16298  {
16299  json.BeginObject(true);
16300  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16301  hAlloc->PrintParameters(json);
16302  json.EndObject();
16303  }
16304 
16305  json.EndArray();
16306  }
16307  }
16308  if(dedicatedAllocationsStarted)
16309  {
16310  json.EndObject();
16311  }
16312 
16313  {
16314  bool allocationsStarted = false;
16315  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16316  {
16317  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16318  {
16319  if(allocationsStarted == false)
16320  {
16321  allocationsStarted = true;
16322  json.WriteString("DefaultPools");
16323  json.BeginObject();
16324  }
16325 
16326  json.BeginString("Type ");
16327  json.ContinueString(memTypeIndex);
16328  json.EndString();
16329 
16330  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16331  }
16332  }
16333  if(allocationsStarted)
16334  {
16335  json.EndObject();
16336  }
16337  }
16338 
16339  // Custom pools
16340  {
16341  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16342  const size_t poolCount = m_Pools.size();
16343  if(poolCount > 0)
16344  {
16345  json.WriteString("Pools");
16346  json.BeginObject();
16347  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16348  {
16349  json.BeginString();
16350  json.ContinueString(m_Pools[poolIndex]->GetId());
16351  json.EndString();
16352 
16353  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16354  }
16355  json.EndObject();
16356  }
16357  }
16358 }
16359 
16360 #endif // #if VMA_STATS_STRING_ENABLED
16361 
16363 // Public interface
16364 
16365 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16366  const VmaAllocatorCreateInfo* pCreateInfo,
16367  VmaAllocator* pAllocator)
16368 {
16369  VMA_ASSERT(pCreateInfo && pAllocator);
16370  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16371  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
16372  VMA_DEBUG_LOG("vmaCreateAllocator");
16373  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16374  return (*pAllocator)->Init(pCreateInfo);
16375 }
16376 
16377 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16378  VmaAllocator allocator)
16379 {
16380  if(allocator != VK_NULL_HANDLE)
16381  {
16382  VMA_DEBUG_LOG("vmaDestroyAllocator");
16383  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
16384  vma_delete(&allocationCallbacks, allocator);
16385  }
16386 }
16387 
16388 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16389  VmaAllocator allocator,
16390  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16391 {
16392  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16393  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16394 }
16395 
16396 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16397  VmaAllocator allocator,
16398  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16399 {
16400  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16401  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16402 }
16403 
16404 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16405  VmaAllocator allocator,
16406  uint32_t memoryTypeIndex,
16407  VkMemoryPropertyFlags* pFlags)
16408 {
16409  VMA_ASSERT(allocator && pFlags);
16410  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16411  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16412 }
16413 
16414 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16415  VmaAllocator allocator,
16416  uint32_t frameIndex)
16417 {
16418  VMA_ASSERT(allocator);
16419  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
16420 
16421  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16422 
16423  allocator->SetCurrentFrameIndex(frameIndex);
16424 }
16425 
16426 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
16427  VmaAllocator allocator,
16428  VmaStats* pStats)
16429 {
16430  VMA_ASSERT(allocator && pStats);
16431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16432  allocator->CalculateStats(pStats);
16433 }
16434 
16435 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
16436  VmaAllocator allocator,
16437  VmaBudget* pBudget)
16438 {
16439  VMA_ASSERT(allocator && pBudget);
16440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16441  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
16442 }
16443 
16444 #if VMA_STATS_STRING_ENABLED
16445 
16446 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16447  VmaAllocator allocator,
16448  char** ppStatsString,
16449  VkBool32 detailedMap)
16450 {
16451  VMA_ASSERT(allocator && ppStatsString);
16452  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16453 
16454  VmaStringBuilder sb(allocator);
16455  {
16456  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16457  json.BeginObject();
16458 
16459  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
16460  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
16461 
16462  VmaStats stats;
16463  allocator->CalculateStats(&stats);
16464 
16465  json.WriteString("Total");
16466  VmaPrintStatInfo(json, stats.total);
16467 
16468  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16469  {
16470  json.BeginString("Heap ");
16471  json.ContinueString(heapIndex);
16472  json.EndString();
16473  json.BeginObject();
16474 
16475  json.WriteString("Size");
16476  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
16477 
16478  json.WriteString("Flags");
16479  json.BeginArray(true);
16480  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
16481  {
16482  json.WriteString("DEVICE_LOCAL");
16483  }
16484  json.EndArray();
16485 
16486  json.WriteString("Budget");
16487  json.BeginObject();
16488  {
16489  json.WriteString("BlockBytes");
16490  json.WriteNumber(budget[heapIndex].blockBytes);
16491  json.WriteString("AllocationBytes");
16492  json.WriteNumber(budget[heapIndex].allocationBytes);
16493  json.WriteString("Usage");
16494  json.WriteNumber(budget[heapIndex].usage);
16495  json.WriteString("Budget");
16496  json.WriteNumber(budget[heapIndex].budget);
16497  }
16498  json.EndObject();
16499 
16500  if(stats.memoryHeap[heapIndex].blockCount > 0)
16501  {
16502  json.WriteString("Stats");
16503  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
16504  }
16505 
16506  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16507  {
16508  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16509  {
16510  json.BeginString("Type ");
16511  json.ContinueString(typeIndex);
16512  json.EndString();
16513 
16514  json.BeginObject();
16515 
16516  json.WriteString("Flags");
16517  json.BeginArray(true);
16518  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16519  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
16520  {
16521  json.WriteString("DEVICE_LOCAL");
16522  }
16523  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16524  {
16525  json.WriteString("HOST_VISIBLE");
16526  }
16527  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
16528  {
16529  json.WriteString("HOST_COHERENT");
16530  }
16531  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
16532  {
16533  json.WriteString("HOST_CACHED");
16534  }
16535  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
16536  {
16537  json.WriteString("LAZILY_ALLOCATED");
16538  }
16539  json.EndArray();
16540 
16541  if(stats.memoryType[typeIndex].blockCount > 0)
16542  {
16543  json.WriteString("Stats");
16544  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
16545  }
16546 
16547  json.EndObject();
16548  }
16549  }
16550 
16551  json.EndObject();
16552  }
16553  if(detailedMap == VK_TRUE)
16554  {
16555  allocator->PrintDetailedMap(json);
16556  }
16557 
16558  json.EndObject();
16559  }
16560 
16561  const size_t len = sb.GetLength();
16562  char* const pChars = vma_new_array(allocator, char, len + 1);
16563  if(len > 0)
16564  {
16565  memcpy(pChars, sb.GetData(), len);
16566  }
16567  pChars[len] = '\0';
16568  *ppStatsString = pChars;
16569 }
16570 
16571 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
16572  VmaAllocator allocator,
16573  char* pStatsString)
16574 {
16575  if(pStatsString != VMA_NULL)
16576  {
16577  VMA_ASSERT(allocator);
16578  size_t len = strlen(pStatsString);
16579  vma_delete_array(allocator, pStatsString, len + 1);
16580  }
16581 }
16582 
16583 #endif // #if VMA_STATS_STRING_ENABLED
16584 
16585 /*
16586 This function is not protected by any mutex because it just reads immutable data.
16587 */
16588 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
16589  VmaAllocator allocator,
16590  uint32_t memoryTypeBits,
16591  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16592  uint32_t* pMemoryTypeIndex)
16593 {
16594  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16597 
16598  if(pAllocationCreateInfo->memoryTypeBits != 0)
16599  {
16600  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
16601  }
16602 
16603  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
16604  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
16605  uint32_t notPreferredFlags = 0;
16606 
16607  // Convert usage to requiredFlags and preferredFlags.
16608  switch(pAllocationCreateInfo->usage)
16609  {
16611  break;
16613  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16614  {
16615  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16616  }
16617  break;
16619  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
16620  break;
16622  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16623  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16624  {
16625  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16626  }
16627  break;
16629  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16630  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16631  break;
16633  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16634  break;
16636  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
16637  break;
16638  default:
16639  VMA_ASSERT(0);
16640  break;
16641  }
16642 
16643  *pMemoryTypeIndex = UINT32_MAX;
16644  uint32_t minCost = UINT32_MAX;
16645  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16646  memTypeIndex < allocator->GetMemoryTypeCount();
16647  ++memTypeIndex, memTypeBit <<= 1)
16648  {
16649  // This memory type is acceptable according to memoryTypeBits bitmask.
16650  if((memTypeBit & memoryTypeBits) != 0)
16651  {
16652  const VkMemoryPropertyFlags currFlags =
16653  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16654  // This memory type contains requiredFlags.
16655  if((requiredFlags & ~currFlags) == 0)
16656  {
16657  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16658  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
16659  VmaCountBitsSet(currFlags & notPreferredFlags);
16660  // Remember memory type with lowest cost.
16661  if(currCost < minCost)
16662  {
16663  *pMemoryTypeIndex = memTypeIndex;
16664  if(currCost == 0)
16665  {
16666  return VK_SUCCESS;
16667  }
16668  minCost = currCost;
16669  }
16670  }
16671  }
16672  }
16673  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16674 }
16675 
16676 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
16677  VmaAllocator allocator,
16678  const VkBufferCreateInfo* pBufferCreateInfo,
16679  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16680  uint32_t* pMemoryTypeIndex)
16681 {
16682  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16683  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16684  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16685  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16686 
16687  const VkDevice hDev = allocator->m_hDevice;
16688  VkBuffer hBuffer = VK_NULL_HANDLE;
16689  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16690  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16691  if(res == VK_SUCCESS)
16692  {
16693  VkMemoryRequirements memReq = {};
16694  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16695  hDev, hBuffer, &memReq);
16696 
16697  res = vmaFindMemoryTypeIndex(
16698  allocator,
16699  memReq.memoryTypeBits,
16700  pAllocationCreateInfo,
16701  pMemoryTypeIndex);
16702 
16703  allocator->GetVulkanFunctions().vkDestroyBuffer(
16704  hDev, hBuffer, allocator->GetAllocationCallbacks());
16705  }
16706  return res;
16707 }
16708 
16709 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
16710  VmaAllocator allocator,
16711  const VkImageCreateInfo* pImageCreateInfo,
16712  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16713  uint32_t* pMemoryTypeIndex)
16714 {
16715  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16716  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16717  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16718  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16719 
16720  const VkDevice hDev = allocator->m_hDevice;
16721  VkImage hImage = VK_NULL_HANDLE;
16722  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16723  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16724  if(res == VK_SUCCESS)
16725  {
16726  VkMemoryRequirements memReq = {};
16727  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16728  hDev, hImage, &memReq);
16729 
16730  res = vmaFindMemoryTypeIndex(
16731  allocator,
16732  memReq.memoryTypeBits,
16733  pAllocationCreateInfo,
16734  pMemoryTypeIndex);
16735 
16736  allocator->GetVulkanFunctions().vkDestroyImage(
16737  hDev, hImage, allocator->GetAllocationCallbacks());
16738  }
16739  return res;
16740 }
16741 
16742 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
16743  VmaAllocator allocator,
16744  const VmaPoolCreateInfo* pCreateInfo,
16745  VmaPool* pPool)
16746 {
16747  VMA_ASSERT(allocator && pCreateInfo && pPool);
16748 
16749  VMA_DEBUG_LOG("vmaCreatePool");
16750 
16751  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16752 
16753  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16754 
16755 #if VMA_RECORDING_ENABLED
16756  if(allocator->GetRecorder() != VMA_NULL)
16757  {
16758  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16759  }
16760 #endif
16761 
16762  return res;
16763 }
16764 
16765 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
16766  VmaAllocator allocator,
16767  VmaPool pool)
16768 {
16769  VMA_ASSERT(allocator);
16770 
16771  if(pool == VK_NULL_HANDLE)
16772  {
16773  return;
16774  }
16775 
16776  VMA_DEBUG_LOG("vmaDestroyPool");
16777 
16778  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16779 
16780 #if VMA_RECORDING_ENABLED
16781  if(allocator->GetRecorder() != VMA_NULL)
16782  {
16783  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16784  }
16785 #endif
16786 
16787  allocator->DestroyPool(pool);
16788 }
16789 
16790 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
16791  VmaAllocator allocator,
16792  VmaPool pool,
16793  VmaPoolStats* pPoolStats)
16794 {
16795  VMA_ASSERT(allocator && pool && pPoolStats);
16796 
16797  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16798 
16799  allocator->GetPoolStats(pool, pPoolStats);
16800 }
16801 
16802 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
16803  VmaAllocator allocator,
16804  VmaPool pool,
16805  size_t* pLostAllocationCount)
16806 {
16807  VMA_ASSERT(allocator && pool);
16808 
16809  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16810 
16811 #if VMA_RECORDING_ENABLED
16812  if(allocator->GetRecorder() != VMA_NULL)
16813  {
16814  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16815  }
16816 #endif
16817 
16818  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16819 }
16820 
16821 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16822 {
16823  VMA_ASSERT(allocator && pool);
16824 
16825  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16826 
16827  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16828 
16829  return allocator->CheckPoolCorruption(pool);
16830 }
16831 
16832 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
16833  VmaAllocator allocator,
16834  VmaPool pool,
16835  const char** ppName)
16836 {
16837  VMA_ASSERT(allocator && pool);
16838 
16839  VMA_DEBUG_LOG("vmaGetPoolName");
16840 
16841  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16842 
16843  *ppName = pool->GetName();
16844 }
16845 
16846 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
16847  VmaAllocator allocator,
16848  VmaPool pool,
16849  const char* pName)
16850 {
16851  VMA_ASSERT(allocator && pool);
16852 
16853  VMA_DEBUG_LOG("vmaSetPoolName");
16854 
16855  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16856 
16857  pool->SetName(pName);
16858 
16859 #if VMA_RECORDING_ENABLED
16860  if(allocator->GetRecorder() != VMA_NULL)
16861  {
16862  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
16863  }
16864 #endif
16865 }
16866 
16867 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
16868  VmaAllocator allocator,
16869  const VkMemoryRequirements* pVkMemoryRequirements,
16870  const VmaAllocationCreateInfo* pCreateInfo,
16871  VmaAllocation* pAllocation,
16872  VmaAllocationInfo* pAllocationInfo)
16873 {
16874  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16875 
16876  VMA_DEBUG_LOG("vmaAllocateMemory");
16877 
16878  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16879 
16880  VkResult result = allocator->AllocateMemory(
16881  *pVkMemoryRequirements,
16882  false, // requiresDedicatedAllocation
16883  false, // prefersDedicatedAllocation
16884  VK_NULL_HANDLE, // dedicatedBuffer
16885  VK_NULL_HANDLE, // dedicatedImage
16886  *pCreateInfo,
16887  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16888  1, // allocationCount
16889  pAllocation);
16890 
16891 #if VMA_RECORDING_ENABLED
16892  if(allocator->GetRecorder() != VMA_NULL)
16893  {
16894  allocator->GetRecorder()->RecordAllocateMemory(
16895  allocator->GetCurrentFrameIndex(),
16896  *pVkMemoryRequirements,
16897  *pCreateInfo,
16898  *pAllocation);
16899  }
16900 #endif
16901 
16902  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16903  {
16904  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16905  }
16906 
16907  return result;
16908 }
16909 
16910 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
16911  VmaAllocator allocator,
16912  const VkMemoryRequirements* pVkMemoryRequirements,
16913  const VmaAllocationCreateInfo* pCreateInfo,
16914  size_t allocationCount,
16915  VmaAllocation* pAllocations,
16916  VmaAllocationInfo* pAllocationInfo)
16917 {
16918  if(allocationCount == 0)
16919  {
16920  return VK_SUCCESS;
16921  }
16922 
16923  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16924 
16925  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16926 
16927  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16928 
16929  VkResult result = allocator->AllocateMemory(
16930  *pVkMemoryRequirements,
16931  false, // requiresDedicatedAllocation
16932  false, // prefersDedicatedAllocation
16933  VK_NULL_HANDLE, // dedicatedBuffer
16934  VK_NULL_HANDLE, // dedicatedImage
16935  *pCreateInfo,
16936  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16937  allocationCount,
16938  pAllocations);
16939 
16940 #if VMA_RECORDING_ENABLED
16941  if(allocator->GetRecorder() != VMA_NULL)
16942  {
16943  allocator->GetRecorder()->RecordAllocateMemoryPages(
16944  allocator->GetCurrentFrameIndex(),
16945  *pVkMemoryRequirements,
16946  *pCreateInfo,
16947  (uint64_t)allocationCount,
16948  pAllocations);
16949  }
16950 #endif
16951 
16952  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16953  {
16954  for(size_t i = 0; i < allocationCount; ++i)
16955  {
16956  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16957  }
16958  }
16959 
16960  return result;
16961 }
16962 
16963 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
16964  VmaAllocator allocator,
16965  VkBuffer buffer,
16966  const VmaAllocationCreateInfo* pCreateInfo,
16967  VmaAllocation* pAllocation,
16968  VmaAllocationInfo* pAllocationInfo)
16969 {
16970  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16971 
16972  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16973 
16974  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16975 
16976  VkMemoryRequirements vkMemReq = {};
16977  bool requiresDedicatedAllocation = false;
16978  bool prefersDedicatedAllocation = false;
16979  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16980  requiresDedicatedAllocation,
16981  prefersDedicatedAllocation);
16982 
16983  VkResult result = allocator->AllocateMemory(
16984  vkMemReq,
16985  requiresDedicatedAllocation,
16986  prefersDedicatedAllocation,
16987  buffer, // dedicatedBuffer
16988  VK_NULL_HANDLE, // dedicatedImage
16989  *pCreateInfo,
16990  VMA_SUBALLOCATION_TYPE_BUFFER,
16991  1, // allocationCount
16992  pAllocation);
16993 
16994 #if VMA_RECORDING_ENABLED
16995  if(allocator->GetRecorder() != VMA_NULL)
16996  {
16997  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16998  allocator->GetCurrentFrameIndex(),
16999  vkMemReq,
17000  requiresDedicatedAllocation,
17001  prefersDedicatedAllocation,
17002  *pCreateInfo,
17003  *pAllocation);
17004  }
17005 #endif
17006 
17007  if(pAllocationInfo && result == VK_SUCCESS)
17008  {
17009  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17010  }
17011 
17012  return result;
17013 }
17014 
17015 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17016  VmaAllocator allocator,
17017  VkImage image,
17018  const VmaAllocationCreateInfo* pCreateInfo,
17019  VmaAllocation* pAllocation,
17020  VmaAllocationInfo* pAllocationInfo)
17021 {
17022  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17023 
17024  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17025 
17026  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17027 
17028  VkMemoryRequirements vkMemReq = {};
17029  bool requiresDedicatedAllocation = false;
17030  bool prefersDedicatedAllocation = false;
17031  allocator->GetImageMemoryRequirements(image, vkMemReq,
17032  requiresDedicatedAllocation, prefersDedicatedAllocation);
17033 
17034  VkResult result = allocator->AllocateMemory(
17035  vkMemReq,
17036  requiresDedicatedAllocation,
17037  prefersDedicatedAllocation,
17038  VK_NULL_HANDLE, // dedicatedBuffer
17039  image, // dedicatedImage
17040  *pCreateInfo,
17041  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17042  1, // allocationCount
17043  pAllocation);
17044 
17045 #if VMA_RECORDING_ENABLED
17046  if(allocator->GetRecorder() != VMA_NULL)
17047  {
17048  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17049  allocator->GetCurrentFrameIndex(),
17050  vkMemReq,
17051  requiresDedicatedAllocation,
17052  prefersDedicatedAllocation,
17053  *pCreateInfo,
17054  *pAllocation);
17055  }
17056 #endif
17057 
17058  if(pAllocationInfo && result == VK_SUCCESS)
17059  {
17060  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17061  }
17062 
17063  return result;
17064 }
17065 
17066 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17067  VmaAllocator allocator,
17068  VmaAllocation allocation)
17069 {
17070  VMA_ASSERT(allocator);
17071 
17072  if(allocation == VK_NULL_HANDLE)
17073  {
17074  return;
17075  }
17076 
17077  VMA_DEBUG_LOG("vmaFreeMemory");
17078 
17079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17080 
17081 #if VMA_RECORDING_ENABLED
17082  if(allocator->GetRecorder() != VMA_NULL)
17083  {
17084  allocator->GetRecorder()->RecordFreeMemory(
17085  allocator->GetCurrentFrameIndex(),
17086  allocation);
17087  }
17088 #endif
17089 
17090  allocator->FreeMemory(
17091  1, // allocationCount
17092  &allocation);
17093 }
17094 
17095 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17096  VmaAllocator allocator,
17097  size_t allocationCount,
17098  VmaAllocation* pAllocations)
17099 {
17100  if(allocationCount == 0)
17101  {
17102  return;
17103  }
17104 
17105  VMA_ASSERT(allocator);
17106 
17107  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17108 
17109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17110 
17111 #if VMA_RECORDING_ENABLED
17112  if(allocator->GetRecorder() != VMA_NULL)
17113  {
17114  allocator->GetRecorder()->RecordFreeMemoryPages(
17115  allocator->GetCurrentFrameIndex(),
17116  (uint64_t)allocationCount,
17117  pAllocations);
17118  }
17119 #endif
17120 
17121  allocator->FreeMemory(allocationCount, pAllocations);
17122 }
17123 
17124 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17125  VmaAllocator allocator,
17126  VmaAllocation allocation,
17127  VkDeviceSize newSize)
17128 {
17129  VMA_ASSERT(allocator && allocation);
17130 
17131  VMA_DEBUG_LOG("vmaResizeAllocation");
17132 
17133  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17134 
17135  return allocator->ResizeAllocation(allocation, newSize);
17136 }
17137 
17138 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17139  VmaAllocator allocator,
17140  VmaAllocation allocation,
17141  VmaAllocationInfo* pAllocationInfo)
17142 {
17143  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17144 
17145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17146 
17147 #if VMA_RECORDING_ENABLED
17148  if(allocator->GetRecorder() != VMA_NULL)
17149  {
17150  allocator->GetRecorder()->RecordGetAllocationInfo(
17151  allocator->GetCurrentFrameIndex(),
17152  allocation);
17153  }
17154 #endif
17155 
17156  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17157 }
17158 
17159 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17160  VmaAllocator allocator,
17161  VmaAllocation allocation)
17162 {
17163  VMA_ASSERT(allocator && allocation);
17164 
17165  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17166 
17167 #if VMA_RECORDING_ENABLED
17168  if(allocator->GetRecorder() != VMA_NULL)
17169  {
17170  allocator->GetRecorder()->RecordTouchAllocation(
17171  allocator->GetCurrentFrameIndex(),
17172  allocation);
17173  }
17174 #endif
17175 
17176  return allocator->TouchAllocation(allocation);
17177 }
17178 
17179 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17180  VmaAllocator allocator,
17181  VmaAllocation allocation,
17182  void* pUserData)
17183 {
17184  VMA_ASSERT(allocator && allocation);
17185 
17186  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17187 
17188  allocation->SetUserData(allocator, pUserData);
17189 
17190 #if VMA_RECORDING_ENABLED
17191  if(allocator->GetRecorder() != VMA_NULL)
17192  {
17193  allocator->GetRecorder()->RecordSetAllocationUserData(
17194  allocator->GetCurrentFrameIndex(),
17195  allocation,
17196  pUserData);
17197  }
17198 #endif
17199 }
17200 
17201 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17202  VmaAllocator allocator,
17203  VmaAllocation* pAllocation)
17204 {
17205  VMA_ASSERT(allocator && pAllocation);
17206 
17207  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17208 
17209  allocator->CreateLostAllocation(pAllocation);
17210 
17211 #if VMA_RECORDING_ENABLED
17212  if(allocator->GetRecorder() != VMA_NULL)
17213  {
17214  allocator->GetRecorder()->RecordCreateLostAllocation(
17215  allocator->GetCurrentFrameIndex(),
17216  *pAllocation);
17217  }
17218 #endif
17219 }
17220 
17221 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17222  VmaAllocator allocator,
17223  VmaAllocation allocation,
17224  void** ppData)
17225 {
17226  VMA_ASSERT(allocator && allocation && ppData);
17227 
17228  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17229 
17230  VkResult res = allocator->Map(allocation, ppData);
17231 
17232 #if VMA_RECORDING_ENABLED
17233  if(allocator->GetRecorder() != VMA_NULL)
17234  {
17235  allocator->GetRecorder()->RecordMapMemory(
17236  allocator->GetCurrentFrameIndex(),
17237  allocation);
17238  }
17239 #endif
17240 
17241  return res;
17242 }
17243 
17244 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17245  VmaAllocator allocator,
17246  VmaAllocation allocation)
17247 {
17248  VMA_ASSERT(allocator && allocation);
17249 
17250  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17251 
17252 #if VMA_RECORDING_ENABLED
17253  if(allocator->GetRecorder() != VMA_NULL)
17254  {
17255  allocator->GetRecorder()->RecordUnmapMemory(
17256  allocator->GetCurrentFrameIndex(),
17257  allocation);
17258  }
17259 #endif
17260 
17261  allocator->Unmap(allocation);
17262 }
17263 
17264 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17265 {
17266  VMA_ASSERT(allocator && allocation);
17267 
17268  VMA_DEBUG_LOG("vmaFlushAllocation");
17269 
17270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17271 
17272  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17273 
17274 #if VMA_RECORDING_ENABLED
17275  if(allocator->GetRecorder() != VMA_NULL)
17276  {
17277  allocator->GetRecorder()->RecordFlushAllocation(
17278  allocator->GetCurrentFrameIndex(),
17279  allocation, offset, size);
17280  }
17281 #endif
17282 }
17283 
17284 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17285 {
17286  VMA_ASSERT(allocator && allocation);
17287 
17288  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17289 
17290  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17291 
17292  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17293 
17294 #if VMA_RECORDING_ENABLED
17295  if(allocator->GetRecorder() != VMA_NULL)
17296  {
17297  allocator->GetRecorder()->RecordInvalidateAllocation(
17298  allocator->GetCurrentFrameIndex(),
17299  allocation, offset, size);
17300  }
17301 #endif
17302 }
17303 
17304 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17305 {
17306  VMA_ASSERT(allocator);
17307 
17308  VMA_DEBUG_LOG("vmaCheckCorruption");
17309 
17310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17311 
17312  return allocator->CheckCorruption(memoryTypeBits);
17313 }
17314 
17315 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17316  VmaAllocator allocator,
17317  VmaAllocation* pAllocations,
17318  size_t allocationCount,
17319  VkBool32* pAllocationsChanged,
17320  const VmaDefragmentationInfo *pDefragmentationInfo,
17321  VmaDefragmentationStats* pDefragmentationStats)
17322 {
17323  // Deprecated interface, reimplemented using new one.
17324 
17325  VmaDefragmentationInfo2 info2 = {};
17326  info2.allocationCount = (uint32_t)allocationCount;
17327  info2.pAllocations = pAllocations;
17328  info2.pAllocationsChanged = pAllocationsChanged;
17329  if(pDefragmentationInfo != VMA_NULL)
17330  {
17331  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17332  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17333  }
17334  else
17335  {
17336  info2.maxCpuAllocationsToMove = UINT32_MAX;
17337  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
17338  }
17339  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
17340 
17342  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
17343  if(res == VK_NOT_READY)
17344  {
17345  res = vmaDefragmentationEnd( allocator, ctx);
17346  }
17347  return res;
17348 }
17349 
17350 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
17351  VmaAllocator allocator,
17352  const VmaDefragmentationInfo2* pInfo,
17353  VmaDefragmentationStats* pStats,
17354  VmaDefragmentationContext *pContext)
17355 {
17356  VMA_ASSERT(allocator && pInfo && pContext);
17357 
17358  // Degenerate case: Nothing to defragment.
17359  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
17360  {
17361  return VK_SUCCESS;
17362  }
17363 
17364  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
17365  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
17366  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
17367  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17368 
17369  VMA_DEBUG_LOG("vmaDefragmentationBegin");
17370 
17371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17372 
17373  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
17374 
17375 #if VMA_RECORDING_ENABLED
17376  if(allocator->GetRecorder() != VMA_NULL)
17377  {
17378  allocator->GetRecorder()->RecordDefragmentationBegin(
17379  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
17380  }
17381 #endif
17382 
17383  return res;
17384 }
17385 
17386 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
17387  VmaAllocator allocator,
17388  VmaDefragmentationContext context)
17389 {
17390  VMA_ASSERT(allocator);
17391 
17392  VMA_DEBUG_LOG("vmaDefragmentationEnd");
17393 
17394  if(context != VK_NULL_HANDLE)
17395  {
17396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17397 
17398 #if VMA_RECORDING_ENABLED
17399  if(allocator->GetRecorder() != VMA_NULL)
17400  {
17401  allocator->GetRecorder()->RecordDefragmentationEnd(
17402  allocator->GetCurrentFrameIndex(), context);
17403  }
17404 #endif
17405 
17406  return allocator->DefragmentationEnd(context);
17407  }
17408  else
17409  {
17410  return VK_SUCCESS;
17411  }
17412 }
17413 
17414 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
17415  VmaAllocator allocator,
17416  VmaAllocation allocation,
17417  VkBuffer buffer)
17418 {
17419  VMA_ASSERT(allocator && allocation && buffer);
17420 
17421  VMA_DEBUG_LOG("vmaBindBufferMemory");
17422 
17423  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17424 
17425  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17426 }
17427 
17428 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
17429  VmaAllocator allocator,
17430  VmaAllocation allocation,
17431  VkDeviceSize allocationLocalOffset,
17432  VkBuffer buffer,
17433  const void* pNext)
17434 {
17435  VMA_ASSERT(allocator && allocation && buffer);
17436 
17437  VMA_DEBUG_LOG("vmaBindBufferMemory2");
17438 
17439  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17440 
17441  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17442 }
17443 
17444 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
17445  VmaAllocator allocator,
17446  VmaAllocation allocation,
17447  VkImage image)
17448 {
17449  VMA_ASSERT(allocator && allocation && image);
17450 
17451  VMA_DEBUG_LOG("vmaBindImageMemory");
17452 
17453  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17454 
17455  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17456 }
17457 
17458 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
17459  VmaAllocator allocator,
17460  VmaAllocation allocation,
17461  VkDeviceSize allocationLocalOffset,
17462  VkImage image,
17463  const void* pNext)
17464 {
17465  VMA_ASSERT(allocator && allocation && image);
17466 
17467  VMA_DEBUG_LOG("vmaBindImageMemory2");
17468 
17469  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17470 
17471  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
17472 }
17473 
17474 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
17475  VmaAllocator allocator,
17476  const VkBufferCreateInfo* pBufferCreateInfo,
17477  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17478  VkBuffer* pBuffer,
17479  VmaAllocation* pAllocation,
17480  VmaAllocationInfo* pAllocationInfo)
17481 {
17482  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
17483 
17484  if(pBufferCreateInfo->size == 0)
17485  {
17486  return VK_ERROR_VALIDATION_FAILED_EXT;
17487  }
17488 
17489  VMA_DEBUG_LOG("vmaCreateBuffer");
17490 
17491  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17492 
17493  *pBuffer = VK_NULL_HANDLE;
17494  *pAllocation = VK_NULL_HANDLE;
17495 
17496  // 1. Create VkBuffer.
17497  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17498  allocator->m_hDevice,
17499  pBufferCreateInfo,
17500  allocator->GetAllocationCallbacks(),
17501  pBuffer);
17502  if(res >= 0)
17503  {
17504  // 2. vkGetBufferMemoryRequirements.
17505  VkMemoryRequirements vkMemReq = {};
17506  bool requiresDedicatedAllocation = false;
17507  bool prefersDedicatedAllocation = false;
17508  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17509  requiresDedicatedAllocation, prefersDedicatedAllocation);
17510 
17511  // Make sure alignment requirements for specific buffer usages reported
17512  // in Physical Device Properties are included in alignment reported by memory requirements.
17513  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
17514  {
17515  VMA_ASSERT(vkMemReq.alignment %
17516  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
17517  }
17518  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
17519  {
17520  VMA_ASSERT(vkMemReq.alignment %
17521  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
17522  }
17523  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
17524  {
17525  VMA_ASSERT(vkMemReq.alignment %
17526  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
17527  }
17528 
17529  // 3. Allocate memory using allocator.
17530  res = allocator->AllocateMemory(
17531  vkMemReq,
17532  requiresDedicatedAllocation,
17533  prefersDedicatedAllocation,
17534  *pBuffer, // dedicatedBuffer
17535  VK_NULL_HANDLE, // dedicatedImage
17536  *pAllocationCreateInfo,
17537  VMA_SUBALLOCATION_TYPE_BUFFER,
17538  1, // allocationCount
17539  pAllocation);
17540 
17541 #if VMA_RECORDING_ENABLED
17542  if(allocator->GetRecorder() != VMA_NULL)
17543  {
17544  allocator->GetRecorder()->RecordCreateBuffer(
17545  allocator->GetCurrentFrameIndex(),
17546  *pBufferCreateInfo,
17547  *pAllocationCreateInfo,
17548  *pAllocation);
17549  }
17550 #endif
17551 
17552  if(res >= 0)
17553  {
17554  // 3. Bind buffer with memory.
17555  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17556  {
17557  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17558  }
17559  if(res >= 0)
17560  {
17561  // All steps succeeded.
17562  #if VMA_STATS_STRING_ENABLED
17563  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17564  #endif
17565  if(pAllocationInfo != VMA_NULL)
17566  {
17567  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17568  }
17569 
17570  return VK_SUCCESS;
17571  }
17572  allocator->FreeMemory(
17573  1, // allocationCount
17574  pAllocation);
17575  *pAllocation = VK_NULL_HANDLE;
17576  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17577  *pBuffer = VK_NULL_HANDLE;
17578  return res;
17579  }
17580  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17581  *pBuffer = VK_NULL_HANDLE;
17582  return res;
17583  }
17584  return res;
17585 }
17586 
17587 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
17588  VmaAllocator allocator,
17589  VkBuffer buffer,
17590  VmaAllocation allocation)
17591 {
17592  VMA_ASSERT(allocator);
17593 
17594  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17595  {
17596  return;
17597  }
17598 
17599  VMA_DEBUG_LOG("vmaDestroyBuffer");
17600 
17601  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17602 
17603 #if VMA_RECORDING_ENABLED
17604  if(allocator->GetRecorder() != VMA_NULL)
17605  {
17606  allocator->GetRecorder()->RecordDestroyBuffer(
17607  allocator->GetCurrentFrameIndex(),
17608  allocation);
17609  }
17610 #endif
17611 
17612  if(buffer != VK_NULL_HANDLE)
17613  {
17614  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17615  }
17616 
17617  if(allocation != VK_NULL_HANDLE)
17618  {
17619  allocator->FreeMemory(
17620  1, // allocationCount
17621  &allocation);
17622  }
17623 }
17624 
17625 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
17626  VmaAllocator allocator,
17627  const VkImageCreateInfo* pImageCreateInfo,
17628  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17629  VkImage* pImage,
17630  VmaAllocation* pAllocation,
17631  VmaAllocationInfo* pAllocationInfo)
17632 {
17633  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17634 
17635  if(pImageCreateInfo->extent.width == 0 ||
17636  pImageCreateInfo->extent.height == 0 ||
17637  pImageCreateInfo->extent.depth == 0 ||
17638  pImageCreateInfo->mipLevels == 0 ||
17639  pImageCreateInfo->arrayLayers == 0)
17640  {
17641  return VK_ERROR_VALIDATION_FAILED_EXT;
17642  }
17643 
17644  VMA_DEBUG_LOG("vmaCreateImage");
17645 
17646  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17647 
17648  *pImage = VK_NULL_HANDLE;
17649  *pAllocation = VK_NULL_HANDLE;
17650 
17651  // 1. Create VkImage.
17652  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17653  allocator->m_hDevice,
17654  pImageCreateInfo,
17655  allocator->GetAllocationCallbacks(),
17656  pImage);
17657  if(res >= 0)
17658  {
17659  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17660  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17661  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17662 
17663  // 2. Allocate memory using allocator.
17664  VkMemoryRequirements vkMemReq = {};
17665  bool requiresDedicatedAllocation = false;
17666  bool prefersDedicatedAllocation = false;
17667  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17668  requiresDedicatedAllocation, prefersDedicatedAllocation);
17669 
17670  res = allocator->AllocateMemory(
17671  vkMemReq,
17672  requiresDedicatedAllocation,
17673  prefersDedicatedAllocation,
17674  VK_NULL_HANDLE, // dedicatedBuffer
17675  *pImage, // dedicatedImage
17676  *pAllocationCreateInfo,
17677  suballocType,
17678  1, // allocationCount
17679  pAllocation);
17680 
17681 #if VMA_RECORDING_ENABLED
17682  if(allocator->GetRecorder() != VMA_NULL)
17683  {
17684  allocator->GetRecorder()->RecordCreateImage(
17685  allocator->GetCurrentFrameIndex(),
17686  *pImageCreateInfo,
17687  *pAllocationCreateInfo,
17688  *pAllocation);
17689  }
17690 #endif
17691 
17692  if(res >= 0)
17693  {
17694  // 3. Bind image with memory.
17695  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17696  {
17697  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17698  }
17699  if(res >= 0)
17700  {
17701  // All steps succeeded.
17702  #if VMA_STATS_STRING_ENABLED
17703  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17704  #endif
17705  if(pAllocationInfo != VMA_NULL)
17706  {
17707  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17708  }
17709 
17710  return VK_SUCCESS;
17711  }
17712  allocator->FreeMemory(
17713  1, // allocationCount
17714  pAllocation);
17715  *pAllocation = VK_NULL_HANDLE;
17716  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17717  *pImage = VK_NULL_HANDLE;
17718  return res;
17719  }
17720  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17721  *pImage = VK_NULL_HANDLE;
17722  return res;
17723  }
17724  return res;
17725 }
17726 
17727 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
17728  VmaAllocator allocator,
17729  VkImage image,
17730  VmaAllocation allocation)
17731 {
17732  VMA_ASSERT(allocator);
17733 
17734  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17735  {
17736  return;
17737  }
17738 
17739  VMA_DEBUG_LOG("vmaDestroyImage");
17740 
17741  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17742 
17743 #if VMA_RECORDING_ENABLED
17744  if(allocator->GetRecorder() != VMA_NULL)
17745  {
17746  allocator->GetRecorder()->RecordDestroyImage(
17747  allocator->GetCurrentFrameIndex(),
17748  allocation);
17749  }
17750 #endif
17751 
17752  if(image != VK_NULL_HANDLE)
17753  {
17754  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17755  }
17756  if(allocation != VK_NULL_HANDLE)
17757  {
17758  allocator->FreeMemory(
17759  1, // allocationCount
17760  &allocation);
17761  }
17762 }
17763 
17764 #endif // #ifdef VMA_IMPLEMENTATION
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1979
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1937
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1863
VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1974
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1935
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1840
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2000
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2576
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3125
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2026
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:1924
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2177
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2322
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2648
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2405
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1872
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2654
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2385
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1966
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1859
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2372
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2006
VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1972
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1940
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2791
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2145
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2479
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2151
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1939
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1766
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2416
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2202
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1951
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1994
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2346
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3111
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2667
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2409
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1897
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3205
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2400
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2260
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2141
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2604
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2616
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3143
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2657
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1928
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1926
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3110
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2796
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2328
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1936
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2597
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2472
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2159
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2335
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:1912
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3122
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1944
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3174
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3209
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2651
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1938
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1976
VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2292
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3159
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2314
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2070
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1943
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1945
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3188
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1846
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2155
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2621
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2085
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2138
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3213
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2282
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2149
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2147
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2009
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1867
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2629
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2601
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1861
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaPool
Represents custom memory pool.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2308
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2379
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2607
VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2324
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2143
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1942
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3119
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3200
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2181
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2595
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2453
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2426
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2150
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2012
VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2430
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2003
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2051
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2213
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1934
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2810
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1997
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3113
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1949
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2151
VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2299
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2423
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2420
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2448
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2150
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2801
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1982
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1941
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2815
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2559
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2158
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2063
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2670
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2437
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2157
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2076
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2265
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3179
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1948
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2643
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1950
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3164
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2782
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2359
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1947
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2660
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2541
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3211
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2151
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2390
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2442
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1946
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2439
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2458
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3140
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3215
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2466
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3217
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1990
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2150
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2777
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2587
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2192
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2435
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3134
VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2591
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3169
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3195
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2396