Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1764 /*
1765 Define this macro to 0/1 to disable/enable support for recording functionality,
1766 available through VmaAllocatorCreateInfo::pRecordSettings.
1767 */
1768 #ifndef VMA_RECORDING_ENABLED
1769  #define VMA_RECORDING_ENABLED 0
1770 #endif
1771 
1772 #ifndef NOMINMAX
1773  #define NOMINMAX // For windows.h
1774 #endif
1775 
1776 #ifndef VULKAN_H_
1777  #include <vulkan/vulkan.h>
1778 #endif
1779 
1780 #if VMA_RECORDING_ENABLED
1781  #include <windows.h>
1782 #endif
1783 
1784 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
1785 // where AAA = major, BBB = minor, CCC = patch.
1786 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
1787 #if !defined(VMA_VULKAN_VERSION)
1788  #if defined(VK_VERSION_1_1)
1789  #define VMA_VULKAN_VERSION 1001000
1790  #else
1791  #define VMA_VULKAN_VERSION 1000000
1792  #endif
1793 #endif
1794 
1795 #if !defined(VMA_DEDICATED_ALLOCATION)
1796  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1797  #define VMA_DEDICATED_ALLOCATION 1
1798  #else
1799  #define VMA_DEDICATED_ALLOCATION 0
1800  #endif
1801 #endif
1802 
1803 #if !defined(VMA_BIND_MEMORY2)
1804  #if VK_KHR_bind_memory2
1805  #define VMA_BIND_MEMORY2 1
1806  #else
1807  #define VMA_BIND_MEMORY2 0
1808  #endif
1809 #endif
1810 
1811 #if !defined(VMA_MEMORY_BUDGET)
1812  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
1813  #define VMA_MEMORY_BUDGET 1
1814  #else
1815  #define VMA_MEMORY_BUDGET 0
1816  #endif
1817 #endif
1818 
1819 // Define these macros to decorate all public functions with additional code,
1820 // before and after returned type, appropriately. This may be useful for
1821 // exporing the functions when compiling VMA as a separate library. Example:
1822 // #define VMA_CALL_PRE __declspec(dllexport)
1823 // #define VMA_CALL_POST __cdecl
1824 #ifndef VMA_CALL_PRE
1825  #define VMA_CALL_PRE
1826 #endif
1827 #ifndef VMA_CALL_POST
1828  #define VMA_CALL_POST
1829 #endif
1830 
1840 VK_DEFINE_HANDLE(VmaAllocator)
1841 
1842 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1844  VmaAllocator allocator,
1845  uint32_t memoryType,
1846  VkDeviceMemory memory,
1847  VkDeviceSize size);
1849 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1850  VmaAllocator allocator,
1851  uint32_t memoryType,
1852  VkDeviceMemory memory,
1853  VkDeviceSize size);
1854 
1868 
1928 
1931 typedef VkFlags VmaAllocatorCreateFlags;
1932 
1937 typedef struct VmaVulkanFunctions {
1938  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1939  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1940  PFN_vkAllocateMemory vkAllocateMemory;
1941  PFN_vkFreeMemory vkFreeMemory;
1942  PFN_vkMapMemory vkMapMemory;
1943  PFN_vkUnmapMemory vkUnmapMemory;
1944  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1945  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1946  PFN_vkBindBufferMemory vkBindBufferMemory;
1947  PFN_vkBindImageMemory vkBindImageMemory;
1948  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1949  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1950  PFN_vkCreateBuffer vkCreateBuffer;
1951  PFN_vkDestroyBuffer vkDestroyBuffer;
1952  PFN_vkCreateImage vkCreateImage;
1953  PFN_vkDestroyImage vkDestroyImage;
1954  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1955 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
1956  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1957  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1958 #endif
1959 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
1960  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1961  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1962 #endif
1963 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
1964  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
1965 #endif
1967 
1969 typedef enum VmaRecordFlagBits {
1976 
1979 typedef VkFlags VmaRecordFlags;
1980 
1982 typedef struct VmaRecordSettings
1983 {
1993  const char* pFilePath;
1995 
1998 {
2002 
2003  VkPhysicalDevice physicalDevice;
2005 
2006  VkDevice device;
2008 
2011 
2012  const VkAllocationCallbacks* pAllocationCallbacks;
2014 
2054  const VkDeviceSize* pHeapSizeLimit;
2079  VkInstance instance;
2090 
2092 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2093  const VmaAllocatorCreateInfo* pCreateInfo,
2094  VmaAllocator* pAllocator);
2095 
2097 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2098  VmaAllocator allocator);
2099 
2104 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2105  VmaAllocator allocator,
2106  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2107 
2112 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2113  VmaAllocator allocator,
2114  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2115 
2122 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2123  VmaAllocator allocator,
2124  uint32_t memoryTypeIndex,
2125  VkMemoryPropertyFlags* pFlags);
2126 
2135 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2136  VmaAllocator allocator,
2137  uint32_t frameIndex);
2138 
2141 typedef struct VmaStatInfo
2142 {
2144  uint32_t blockCount;
2150  VkDeviceSize usedBytes;
2152  VkDeviceSize unusedBytes;
2155 } VmaStatInfo;
2156 
2158 typedef struct VmaStats
2159 {
2160  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2161  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2163 } VmaStats;
2164 
2174 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2175  VmaAllocator allocator,
2176  VmaStats* pStats);
2177 
2180 typedef struct VmaBudget
2181 {
2184  VkDeviceSize blockBytes;
2185 
2195  VkDeviceSize allocationBytes;
2196 
2205  VkDeviceSize usage;
2206 
2216  VkDeviceSize budget;
2217 } VmaBudget;
2218 
2229 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2230  VmaAllocator allocator,
2231  VmaBudget* pBudget);
2232 
2233 #ifndef VMA_STATS_STRING_ENABLED
2234 #define VMA_STATS_STRING_ENABLED 1
2235 #endif
2236 
2237 #if VMA_STATS_STRING_ENABLED
2238 
2240 
2242 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2243  VmaAllocator allocator,
2244  char** ppStatsString,
2245  VkBool32 detailedMap);
2246 
2247 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2248  VmaAllocator allocator,
2249  char* pStatsString);
2250 
2251 #endif // #if VMA_STATS_STRING_ENABLED
2252 
2261 VK_DEFINE_HANDLE(VmaPool)
2262 
2263 typedef enum VmaMemoryUsage
2264 {
2326 
2328 } VmaMemoryUsage;
2329 
2339 
2404 
2420 
2430 
2437 
2441 
2443 {
2456  VkMemoryPropertyFlags requiredFlags;
2461  VkMemoryPropertyFlags preferredFlags;
2469  uint32_t memoryTypeBits;
2482  void* pUserData;
2484 
2501 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2502  VmaAllocator allocator,
2503  uint32_t memoryTypeBits,
2504  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2505  uint32_t* pMemoryTypeIndex);
2506 
2519 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2520  VmaAllocator allocator,
2521  const VkBufferCreateInfo* pBufferCreateInfo,
2522  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2523  uint32_t* pMemoryTypeIndex);
2524 
2537 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2538  VmaAllocator allocator,
2539  const VkImageCreateInfo* pImageCreateInfo,
2540  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2541  uint32_t* pMemoryTypeIndex);
2542 
2563 
2580 
2591 
2597 
2600 typedef VkFlags VmaPoolCreateFlags;
2601 
2604 typedef struct VmaPoolCreateInfo {
2619  VkDeviceSize blockSize;
2648 
2651 typedef struct VmaPoolStats {
2654  VkDeviceSize size;
2657  VkDeviceSize unusedSize;
2670  VkDeviceSize unusedRangeSizeMax;
2673  size_t blockCount;
2674 } VmaPoolStats;
2675 
2682 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
2683  VmaAllocator allocator,
2684  const VmaPoolCreateInfo* pCreateInfo,
2685  VmaPool* pPool);
2686 
2689 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
2690  VmaAllocator allocator,
2691  VmaPool pool);
2692 
2699 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
2700  VmaAllocator allocator,
2701  VmaPool pool,
2702  VmaPoolStats* pPoolStats);
2703 
2710 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
2711  VmaAllocator allocator,
2712  VmaPool pool,
2713  size_t* pLostAllocationCount);
2714 
2729 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2730 
2737 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
2738  VmaAllocator allocator,
2739  VmaPool pool,
2740  const char** ppName);
2741 
2747 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
2748  VmaAllocator allocator,
2749  VmaPool pool,
2750  const char* pName);
2751 
2776 VK_DEFINE_HANDLE(VmaAllocation)
2777 
2778 
2780 typedef struct VmaAllocationInfo {
2785  uint32_t memoryType;
2794  VkDeviceMemory deviceMemory;
2799  VkDeviceSize offset;
2804  VkDeviceSize size;
2818  void* pUserData;
2820 
2831 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
2832  VmaAllocator allocator,
2833  const VkMemoryRequirements* pVkMemoryRequirements,
2834  const VmaAllocationCreateInfo* pCreateInfo,
2835  VmaAllocation* pAllocation,
2836  VmaAllocationInfo* pAllocationInfo);
2837 
2857 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
2858  VmaAllocator allocator,
2859  const VkMemoryRequirements* pVkMemoryRequirements,
2860  const VmaAllocationCreateInfo* pCreateInfo,
2861  size_t allocationCount,
2862  VmaAllocation* pAllocations,
2863  VmaAllocationInfo* pAllocationInfo);
2864 
2871 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
2872  VmaAllocator allocator,
2873  VkBuffer buffer,
2874  const VmaAllocationCreateInfo* pCreateInfo,
2875  VmaAllocation* pAllocation,
2876  VmaAllocationInfo* pAllocationInfo);
2877 
2879 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
2880  VmaAllocator allocator,
2881  VkImage image,
2882  const VmaAllocationCreateInfo* pCreateInfo,
2883  VmaAllocation* pAllocation,
2884  VmaAllocationInfo* pAllocationInfo);
2885 
2890 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
2891  VmaAllocator allocator,
2892  VmaAllocation allocation);
2893 
2904 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
2905  VmaAllocator allocator,
2906  size_t allocationCount,
2907  VmaAllocation* pAllocations);
2908 
2915 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
2916  VmaAllocator allocator,
2917  VmaAllocation allocation,
2918  VkDeviceSize newSize);
2919 
2936 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
2937  VmaAllocator allocator,
2938  VmaAllocation allocation,
2939  VmaAllocationInfo* pAllocationInfo);
2940 
2955 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
2956  VmaAllocator allocator,
2957  VmaAllocation allocation);
2958 
2972 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
2973  VmaAllocator allocator,
2974  VmaAllocation allocation,
2975  void* pUserData);
2976 
2987 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
2988  VmaAllocator allocator,
2989  VmaAllocation* pAllocation);
2990 
3029 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3030  VmaAllocator allocator,
3031  VmaAllocation allocation,
3032  void** ppData);
3033 
3042 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3043  VmaAllocator allocator,
3044  VmaAllocation allocation);
3045 
3064 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3065 
3084 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3085 
3102 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3103 
3110 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3111 
3112 typedef enum VmaDefragmentationFlagBits {
3116 typedef VkFlags VmaDefragmentationFlags;
3117 
3122 typedef struct VmaDefragmentationInfo2 {
3146  uint32_t poolCount;
3167  VkDeviceSize maxCpuBytesToMove;
3177  VkDeviceSize maxGpuBytesToMove;
3191  VkCommandBuffer commandBuffer;
3193 
3198 typedef struct VmaDefragmentationInfo {
3203  VkDeviceSize maxBytesToMove;
3210 
3212 typedef struct VmaDefragmentationStats {
3214  VkDeviceSize bytesMoved;
3216  VkDeviceSize bytesFreed;
3222 
3252 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3253  VmaAllocator allocator,
3254  const VmaDefragmentationInfo2* pInfo,
3255  VmaDefragmentationStats* pStats,
3256  VmaDefragmentationContext *pContext);
3257 
3263 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3264  VmaAllocator allocator,
3265  VmaDefragmentationContext context);
3266 
3307 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3308  VmaAllocator allocator,
3309  VmaAllocation* pAllocations,
3310  size_t allocationCount,
3311  VkBool32* pAllocationsChanged,
3312  const VmaDefragmentationInfo *pDefragmentationInfo,
3313  VmaDefragmentationStats* pDefragmentationStats);
3314 
3327 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3328  VmaAllocator allocator,
3329  VmaAllocation allocation,
3330  VkBuffer buffer);
3331 
3342 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3343  VmaAllocator allocator,
3344  VmaAllocation allocation,
3345  VkDeviceSize allocationLocalOffset,
3346  VkBuffer buffer,
3347  const void* pNext);
3348 
3361 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3362  VmaAllocator allocator,
3363  VmaAllocation allocation,
3364  VkImage image);
3365 
3376 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3377  VmaAllocator allocator,
3378  VmaAllocation allocation,
3379  VkDeviceSize allocationLocalOffset,
3380  VkImage image,
3381  const void* pNext);
3382 
3409 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3410  VmaAllocator allocator,
3411  const VkBufferCreateInfo* pBufferCreateInfo,
3412  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3413  VkBuffer* pBuffer,
3414  VmaAllocation* pAllocation,
3415  VmaAllocationInfo* pAllocationInfo);
3416 
3428 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3429  VmaAllocator allocator,
3430  VkBuffer buffer,
3431  VmaAllocation allocation);
3432 
3434 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3435  VmaAllocator allocator,
3436  const VkImageCreateInfo* pImageCreateInfo,
3437  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3438  VkImage* pImage,
3439  VmaAllocation* pAllocation,
3440  VmaAllocationInfo* pAllocationInfo);
3441 
3453 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3454  VmaAllocator allocator,
3455  VkImage image,
3456  VmaAllocation allocation);
3457 
3458 #ifdef __cplusplus
3459 }
3460 #endif
3461 
3462 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3463 
3464 // For Visual Studio IntelliSense.
3465 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3466 #define VMA_IMPLEMENTATION
3467 #endif
3468 
3469 #ifdef VMA_IMPLEMENTATION
3470 #undef VMA_IMPLEMENTATION
3471 
3472 #include <cstdint>
3473 #include <cstdlib>
3474 #include <cstring>
3475 
3476 /*******************************************************************************
3477 CONFIGURATION SECTION
3478 
3479 Define some of these macros before each #include of this header or change them
3480 here if you need other then default behavior depending on your environment.
3481 */
3482 
3483 /*
3484 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3485 internally, like:
3486 
3487  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3488 
3489 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3490 VmaAllocatorCreateInfo::pVulkanFunctions.
3491 */
3492 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3493 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3494 #endif
3495 
3496 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3497 //#define VMA_USE_STL_CONTAINERS 1
3498 
3499 /* Set this macro to 1 to make the library including and using STL containers:
3500 std::pair, std::vector, std::list, std::unordered_map.
3501 
3502 Set it to 0 or undefined to make the library using its own implementation of
3503 the containers.
3504 */
3505 #if VMA_USE_STL_CONTAINERS
3506  #define VMA_USE_STL_VECTOR 1
3507  #define VMA_USE_STL_UNORDERED_MAP 1
3508  #define VMA_USE_STL_LIST 1
3509 #endif
3510 
3511 #ifndef VMA_USE_STL_SHARED_MUTEX
3512  // Compiler conforms to C++17.
3513  #if __cplusplus >= 201703L
3514  #define VMA_USE_STL_SHARED_MUTEX 1
3515  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3516  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3517  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3518  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3519  #define VMA_USE_STL_SHARED_MUTEX 1
3520  #else
3521  #define VMA_USE_STL_SHARED_MUTEX 0
3522  #endif
3523 #endif
3524 
3525 /*
3526 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3527 Library has its own container implementation.
3528 */
3529 #if VMA_USE_STL_VECTOR
3530  #include <vector>
3531 #endif
3532 
3533 #if VMA_USE_STL_UNORDERED_MAP
3534  #include <unordered_map>
3535 #endif
3536 
3537 #if VMA_USE_STL_LIST
3538  #include <list>
3539 #endif
3540 
3541 /*
3542 Following headers are used in this CONFIGURATION section only, so feel free to
3543 remove them if not needed.
3544 */
3545 #include <cassert> // for assert
3546 #include <algorithm> // for min, max
3547 #include <mutex>
3548 
3549 #ifndef VMA_NULL
3550  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3551  #define VMA_NULL nullptr
3552 #endif
3553 
3554 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3555 #include <cstdlib>
3556 void *aligned_alloc(size_t alignment, size_t size)
3557 {
3558  // alignment must be >= sizeof(void*)
3559  if(alignment < sizeof(void*))
3560  {
3561  alignment = sizeof(void*);
3562  }
3563 
3564  return memalign(alignment, size);
3565 }
3566 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3567 #include <cstdlib>
3568 void *aligned_alloc(size_t alignment, size_t size)
3569 {
3570  // alignment must be >= sizeof(void*)
3571  if(alignment < sizeof(void*))
3572  {
3573  alignment = sizeof(void*);
3574  }
3575 
3576  void *pointer;
3577  if(posix_memalign(&pointer, alignment, size) == 0)
3578  return pointer;
3579  return VMA_NULL;
3580 }
3581 #endif
3582 
3583 // If your compiler is not compatible with C++11 and definition of
3584 // aligned_alloc() function is missing, uncommeting following line may help:
3585 
3586 //#include <malloc.h>
3587 
3588 // Normal assert to check for programmer's errors, especially in Debug configuration.
3589 #ifndef VMA_ASSERT
3590  #ifdef NDEBUG
3591  #define VMA_ASSERT(expr)
3592  #else
3593  #define VMA_ASSERT(expr) assert(expr)
3594  #endif
3595 #endif
3596 
3597 // Assert that will be called very often, like inside data structures e.g. operator[].
3598 // Making it non-empty can make program slow.
3599 #ifndef VMA_HEAVY_ASSERT
3600  #ifdef NDEBUG
3601  #define VMA_HEAVY_ASSERT(expr)
3602  #else
3603  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3604  #endif
3605 #endif
3606 
3607 #ifndef VMA_ALIGN_OF
3608  #define VMA_ALIGN_OF(type) (__alignof(type))
3609 #endif
3610 
3611 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3612  #if defined(_WIN32)
3613  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3614  #else
3615  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3616  #endif
3617 #endif
3618 
3619 #ifndef VMA_SYSTEM_FREE
3620  #if defined(_WIN32)
3621  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3622  #else
3623  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3624  #endif
3625 #endif
3626 
3627 #ifndef VMA_MIN
3628  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3629 #endif
3630 
3631 #ifndef VMA_MAX
3632  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3633 #endif
3634 
3635 #ifndef VMA_SWAP
3636  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3637 #endif
3638 
3639 #ifndef VMA_SORT
3640  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3641 #endif
3642 
3643 #ifndef VMA_DEBUG_LOG
3644  #define VMA_DEBUG_LOG(format, ...)
3645  /*
3646  #define VMA_DEBUG_LOG(format, ...) do { \
3647  printf(format, __VA_ARGS__); \
3648  printf("\n"); \
3649  } while(false)
3650  */
3651 #endif
3652 
3653 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3654 #if VMA_STATS_STRING_ENABLED
3655  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3656  {
3657  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3658  }
3659  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3660  {
3661  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3662  }
3663  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3664  {
3665  snprintf(outStr, strLen, "%p", ptr);
3666  }
3667 #endif
3668 
3669 #ifndef VMA_MUTEX
3670  class VmaMutex
3671  {
3672  public:
3673  void Lock() { m_Mutex.lock(); }
3674  void Unlock() { m_Mutex.unlock(); }
3675  private:
3676  std::mutex m_Mutex;
3677  };
3678  #define VMA_MUTEX VmaMutex
3679 #endif
3680 
3681 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3682 #ifndef VMA_RW_MUTEX
3683  #if VMA_USE_STL_SHARED_MUTEX
3684  // Use std::shared_mutex from C++17.
3685  #include <shared_mutex>
3686  class VmaRWMutex
3687  {
3688  public:
3689  void LockRead() { m_Mutex.lock_shared(); }
3690  void UnlockRead() { m_Mutex.unlock_shared(); }
3691  void LockWrite() { m_Mutex.lock(); }
3692  void UnlockWrite() { m_Mutex.unlock(); }
3693  private:
3694  std::shared_mutex m_Mutex;
3695  };
3696  #define VMA_RW_MUTEX VmaRWMutex
3697  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3698  // Use SRWLOCK from WinAPI.
3699  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3700  class VmaRWMutex
3701  {
3702  public:
3703  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3704  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3705  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3706  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3707  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3708  private:
3709  SRWLOCK m_Lock;
3710  };
3711  #define VMA_RW_MUTEX VmaRWMutex
3712  #else
3713  // Less efficient fallback: Use normal mutex.
3714  class VmaRWMutex
3715  {
3716  public:
3717  void LockRead() { m_Mutex.Lock(); }
3718  void UnlockRead() { m_Mutex.Unlock(); }
3719  void LockWrite() { m_Mutex.Lock(); }
3720  void UnlockWrite() { m_Mutex.Unlock(); }
3721  private:
3722  VMA_MUTEX m_Mutex;
3723  };
3724  #define VMA_RW_MUTEX VmaRWMutex
3725  #endif // #if VMA_USE_STL_SHARED_MUTEX
3726 #endif // #ifndef VMA_RW_MUTEX
3727 
3728 /*
3729 If providing your own implementation, you need to implement a subset of std::atomic.
3730 */
3731 #ifndef VMA_ATOMIC_UINT32
3732  #include <atomic>
3733  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3734 #endif
3735 
3736 #ifndef VMA_ATOMIC_UINT64
3737  #include <atomic>
3738  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3739 #endif
3740 
3741 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3742 
3746  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3747 #endif
3748 
3749 #ifndef VMA_DEBUG_ALIGNMENT
3750 
3754  #define VMA_DEBUG_ALIGNMENT (1)
3755 #endif
3756 
3757 #ifndef VMA_DEBUG_MARGIN
3758 
3762  #define VMA_DEBUG_MARGIN (0)
3763 #endif
3764 
3765 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3766 
3770  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3771 #endif
3772 
3773 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3774 
3779  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3780 #endif
3781 
3782 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3783 
3787  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3788 #endif
3789 
3790 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3791 
3795  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3796 #endif
3797 
3798 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3799  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3801 #endif
3802 
3803 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3804  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3806 #endif
3807 
3808 #ifndef VMA_CLASS_NO_COPY
3809  #define VMA_CLASS_NO_COPY(className) \
3810  private: \
3811  className(const className&) = delete; \
3812  className& operator=(const className&) = delete;
3813 #endif
3814 
3815 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3816 
3817 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3818 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3819 
3820 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3821 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3822 
3823 /*******************************************************************************
3824 END OF CONFIGURATION
3825 */
3826 
3827 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3828 
3829 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3830  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3831 
3832 // Returns number of bits set to 1 in (v).
3833 static inline uint32_t VmaCountBitsSet(uint32_t v)
3834 {
3835  uint32_t c = v - ((v >> 1) & 0x55555555);
3836  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3837  c = ((c >> 4) + c) & 0x0F0F0F0F;
3838  c = ((c >> 8) + c) & 0x00FF00FF;
3839  c = ((c >> 16) + c) & 0x0000FFFF;
3840  return c;
3841 }
3842 
3843 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3844 // Use types like uint32_t, uint64_t as T.
3845 template <typename T>
3846 static inline T VmaAlignUp(T val, T align)
3847 {
3848  return (val + align - 1) / align * align;
3849 }
3850 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3851 // Use types like uint32_t, uint64_t as T.
3852 template <typename T>
3853 static inline T VmaAlignDown(T val, T align)
3854 {
3855  return val / align * align;
3856 }
3857 
3858 // Division with mathematical rounding to nearest number.
3859 template <typename T>
3860 static inline T VmaRoundDiv(T x, T y)
3861 {
3862  return (x + (y / (T)2)) / y;
3863 }
3864 
3865 /*
3866 Returns true if given number is a power of two.
3867 T must be unsigned integer number or signed integer but always nonnegative.
3868 For 0 returns true.
3869 */
3870 template <typename T>
3871 inline bool VmaIsPow2(T x)
3872 {
3873  return (x & (x-1)) == 0;
3874 }
3875 
3876 // Returns smallest power of 2 greater or equal to v.
3877 static inline uint32_t VmaNextPow2(uint32_t v)
3878 {
3879  v--;
3880  v |= v >> 1;
3881  v |= v >> 2;
3882  v |= v >> 4;
3883  v |= v >> 8;
3884  v |= v >> 16;
3885  v++;
3886  return v;
3887 }
3888 static inline uint64_t VmaNextPow2(uint64_t v)
3889 {
3890  v--;
3891  v |= v >> 1;
3892  v |= v >> 2;
3893  v |= v >> 4;
3894  v |= v >> 8;
3895  v |= v >> 16;
3896  v |= v >> 32;
3897  v++;
3898  return v;
3899 }
3900 
3901 // Returns largest power of 2 less or equal to v.
3902 static inline uint32_t VmaPrevPow2(uint32_t v)
3903 {
3904  v |= v >> 1;
3905  v |= v >> 2;
3906  v |= v >> 4;
3907  v |= v >> 8;
3908  v |= v >> 16;
3909  v = v ^ (v >> 1);
3910  return v;
3911 }
3912 static inline uint64_t VmaPrevPow2(uint64_t v)
3913 {
3914  v |= v >> 1;
3915  v |= v >> 2;
3916  v |= v >> 4;
3917  v |= v >> 8;
3918  v |= v >> 16;
3919  v |= v >> 32;
3920  v = v ^ (v >> 1);
3921  return v;
3922 }
3923 
3924 static inline bool VmaStrIsEmpty(const char* pStr)
3925 {
3926  return pStr == VMA_NULL || *pStr == '\0';
3927 }
3928 
3929 #if VMA_STATS_STRING_ENABLED
3930 
3931 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3932 {
3933  switch(algorithm)
3934  {
3936  return "Linear";
3938  return "Buddy";
3939  case 0:
3940  return "Default";
3941  default:
3942  VMA_ASSERT(0);
3943  return "";
3944  }
3945 }
3946 
3947 #endif // #if VMA_STATS_STRING_ENABLED
3948 
3949 #ifndef VMA_SORT
3950 
3951 template<typename Iterator, typename Compare>
3952 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3953 {
3954  Iterator centerValue = end; --centerValue;
3955  Iterator insertIndex = beg;
3956  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3957  {
3958  if(cmp(*memTypeIndex, *centerValue))
3959  {
3960  if(insertIndex != memTypeIndex)
3961  {
3962  VMA_SWAP(*memTypeIndex, *insertIndex);
3963  }
3964  ++insertIndex;
3965  }
3966  }
3967  if(insertIndex != centerValue)
3968  {
3969  VMA_SWAP(*insertIndex, *centerValue);
3970  }
3971  return insertIndex;
3972 }
3973 
3974 template<typename Iterator, typename Compare>
3975 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3976 {
3977  if(beg < end)
3978  {
3979  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3980  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3981  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3982  }
3983 }
3984 
3985 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3986 
3987 #endif // #ifndef VMA_SORT
3988 
3989 /*
3990 Returns true if two memory blocks occupy overlapping pages.
3991 ResourceA must be in less memory offset than ResourceB.
3992 
3993 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3994 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3995 */
3996 static inline bool VmaBlocksOnSamePage(
3997  VkDeviceSize resourceAOffset,
3998  VkDeviceSize resourceASize,
3999  VkDeviceSize resourceBOffset,
4000  VkDeviceSize pageSize)
4001 {
4002  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4003  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4004  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4005  VkDeviceSize resourceBStart = resourceBOffset;
4006  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4007  return resourceAEndPage == resourceBStartPage;
4008 }
4009 
4010 enum VmaSuballocationType
4011 {
4012  VMA_SUBALLOCATION_TYPE_FREE = 0,
4013  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4014  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4015  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4016  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4017  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4018  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4019 };
4020 
4021 /*
4022 Returns true if given suballocation types could conflict and must respect
4023 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4024 or linear image and another one is optimal image. If type is unknown, behave
4025 conservatively.
4026 */
4027 static inline bool VmaIsBufferImageGranularityConflict(
4028  VmaSuballocationType suballocType1,
4029  VmaSuballocationType suballocType2)
4030 {
4031  if(suballocType1 > suballocType2)
4032  {
4033  VMA_SWAP(suballocType1, suballocType2);
4034  }
4035 
4036  switch(suballocType1)
4037  {
4038  case VMA_SUBALLOCATION_TYPE_FREE:
4039  return false;
4040  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4041  return true;
4042  case VMA_SUBALLOCATION_TYPE_BUFFER:
4043  return
4044  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4045  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4046  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4047  return
4048  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4049  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4050  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4051  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4052  return
4053  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4054  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4055  return false;
4056  default:
4057  VMA_ASSERT(0);
4058  return true;
4059  }
4060 }
4061 
4062 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4063 {
4064 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4065  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4066  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4067  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4068  {
4069  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4070  }
4071 #else
4072  // no-op
4073 #endif
4074 }
4075 
4076 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4077 {
4078 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4079  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4080  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4081  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4082  {
4083  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4084  {
4085  return false;
4086  }
4087  }
4088 #endif
4089  return true;
4090 }
4091 
4092 /*
4093 Fills structure with parameters of an example buffer to be used for transfers
4094 during GPU memory defragmentation.
4095 */
4096 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4097 {
4098  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4099  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4100  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4101  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4102 }
4103 
4104 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4105 struct VmaMutexLock
4106 {
4107  VMA_CLASS_NO_COPY(VmaMutexLock)
4108 public:
4109  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4110  m_pMutex(useMutex ? &mutex : VMA_NULL)
4111  { if(m_pMutex) { m_pMutex->Lock(); } }
4112  ~VmaMutexLock()
4113  { if(m_pMutex) { m_pMutex->Unlock(); } }
4114 private:
4115  VMA_MUTEX* m_pMutex;
4116 };
4117 
4118 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4119 struct VmaMutexLockRead
4120 {
4121  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4122 public:
4123  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4124  m_pMutex(useMutex ? &mutex : VMA_NULL)
4125  { if(m_pMutex) { m_pMutex->LockRead(); } }
4126  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4127 private:
4128  VMA_RW_MUTEX* m_pMutex;
4129 };
4130 
4131 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4132 struct VmaMutexLockWrite
4133 {
4134  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4135 public:
4136  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4137  m_pMutex(useMutex ? &mutex : VMA_NULL)
4138  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4139  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4140 private:
4141  VMA_RW_MUTEX* m_pMutex;
4142 };
4143 
4144 #if VMA_DEBUG_GLOBAL_MUTEX
4145  static VMA_MUTEX gDebugGlobalMutex;
4146  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4147 #else
4148  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4149 #endif
4150 
4151 // Minimum size of a free suballocation to register it in the free suballocation collection.
4152 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4153 
4154 /*
4155 Performs binary search and returns iterator to first element that is greater or
4156 equal to (key), according to comparison (cmp).
4157 
4158 Cmp should return true if first argument is less than second argument.
4159 
4160 Returned value is the found element, if present in the collection or place where
4161 new element with value (key) should be inserted.
4162 */
4163 template <typename CmpLess, typename IterT, typename KeyT>
4164 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4165 {
4166  size_t down = 0, up = (end - beg);
4167  while(down < up)
4168  {
4169  const size_t mid = (down + up) / 2;
4170  if(cmp(*(beg+mid), key))
4171  {
4172  down = mid + 1;
4173  }
4174  else
4175  {
4176  up = mid;
4177  }
4178  }
4179  return beg + down;
4180 }
4181 
4182 template<typename CmpLess, typename IterT, typename KeyT>
4183 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4184 {
4185  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4186  beg, end, value, cmp);
4187  if(it == end ||
4188  (!cmp(*it, value) && !cmp(value, *it)))
4189  {
4190  return it;
4191  }
4192  return end;
4193 }
4194 
4195 /*
4196 Returns true if all pointers in the array are not-null and unique.
4197 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4198 T must be pointer type, e.g. VmaAllocation, VmaPool.
4199 */
4200 template<typename T>
4201 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4202 {
4203  for(uint32_t i = 0; i < count; ++i)
4204  {
4205  const T iPtr = arr[i];
4206  if(iPtr == VMA_NULL)
4207  {
4208  return false;
4209  }
4210  for(uint32_t j = i + 1; j < count; ++j)
4211  {
4212  if(iPtr == arr[j])
4213  {
4214  return false;
4215  }
4216  }
4217  }
4218  return true;
4219 }
4220 
4222 // Memory allocation
4223 
4224 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4225 {
4226  if((pAllocationCallbacks != VMA_NULL) &&
4227  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4228  {
4229  return (*pAllocationCallbacks->pfnAllocation)(
4230  pAllocationCallbacks->pUserData,
4231  size,
4232  alignment,
4233  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4234  }
4235  else
4236  {
4237  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4238  }
4239 }
4240 
4241 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4242 {
4243  if((pAllocationCallbacks != VMA_NULL) &&
4244  (pAllocationCallbacks->pfnFree != VMA_NULL))
4245  {
4246  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4247  }
4248  else
4249  {
4250  VMA_SYSTEM_FREE(ptr);
4251  }
4252 }
4253 
4254 template<typename T>
4255 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4256 {
4257  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4258 }
4259 
4260 template<typename T>
4261 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4262 {
4263  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4264 }
4265 
4266 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4267 
4268 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4269 
4270 template<typename T>
4271 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4272 {
4273  ptr->~T();
4274  VmaFree(pAllocationCallbacks, ptr);
4275 }
4276 
4277 template<typename T>
4278 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4279 {
4280  if(ptr != VMA_NULL)
4281  {
4282  for(size_t i = count; i--; )
4283  {
4284  ptr[i].~T();
4285  }
4286  VmaFree(pAllocationCallbacks, ptr);
4287  }
4288 }
4289 
4290 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4291 {
4292  if(srcStr != VMA_NULL)
4293  {
4294  const size_t len = strlen(srcStr);
4295  char* const result = vma_new_array(allocs, char, len + 1);
4296  memcpy(result, srcStr, len + 1);
4297  return result;
4298  }
4299  else
4300  {
4301  return VMA_NULL;
4302  }
4303 }
4304 
4305 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4306 {
4307  if(str != VMA_NULL)
4308  {
4309  const size_t len = strlen(str);
4310  vma_delete_array(allocs, str, len + 1);
4311  }
4312 }
4313 
4314 // STL-compatible allocator.
4315 template<typename T>
4316 class VmaStlAllocator
4317 {
4318 public:
4319  const VkAllocationCallbacks* const m_pCallbacks;
4320  typedef T value_type;
4321 
4322  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4323  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4324 
4325  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4326  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4327 
4328  template<typename U>
4329  bool operator==(const VmaStlAllocator<U>& rhs) const
4330  {
4331  return m_pCallbacks == rhs.m_pCallbacks;
4332  }
4333  template<typename U>
4334  bool operator!=(const VmaStlAllocator<U>& rhs) const
4335  {
4336  return m_pCallbacks != rhs.m_pCallbacks;
4337  }
4338 
4339  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4340 };
4341 
4342 #if VMA_USE_STL_VECTOR
4343 
4344 #define VmaVector std::vector
4345 
4346 template<typename T, typename allocatorT>
4347 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4348 {
4349  vec.insert(vec.begin() + index, item);
4350 }
4351 
4352 template<typename T, typename allocatorT>
4353 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4354 {
4355  vec.erase(vec.begin() + index);
4356 }
4357 
4358 #else // #if VMA_USE_STL_VECTOR
4359 
4360 /* Class with interface compatible with subset of std::vector.
4361 T must be POD because constructors and destructors are not called and memcpy is
4362 used for these objects. */
4363 template<typename T, typename AllocatorT>
4364 class VmaVector
4365 {
4366 public:
4367  typedef T value_type;
4368 
4369  VmaVector(const AllocatorT& allocator) :
4370  m_Allocator(allocator),
4371  m_pArray(VMA_NULL),
4372  m_Count(0),
4373  m_Capacity(0)
4374  {
4375  }
4376 
4377  VmaVector(size_t count, const AllocatorT& allocator) :
4378  m_Allocator(allocator),
4379  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4380  m_Count(count),
4381  m_Capacity(count)
4382  {
4383  }
4384 
4385  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4386  // value is unused.
4387  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4388  : VmaVector(count, allocator) {}
4389 
4390  VmaVector(const VmaVector<T, AllocatorT>& src) :
4391  m_Allocator(src.m_Allocator),
4392  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4393  m_Count(src.m_Count),
4394  m_Capacity(src.m_Count)
4395  {
4396  if(m_Count != 0)
4397  {
4398  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4399  }
4400  }
4401 
4402  ~VmaVector()
4403  {
4404  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4405  }
4406 
4407  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4408  {
4409  if(&rhs != this)
4410  {
4411  resize(rhs.m_Count);
4412  if(m_Count != 0)
4413  {
4414  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4415  }
4416  }
4417  return *this;
4418  }
4419 
4420  bool empty() const { return m_Count == 0; }
4421  size_t size() const { return m_Count; }
4422  T* data() { return m_pArray; }
4423  const T* data() const { return m_pArray; }
4424 
4425  T& operator[](size_t index)
4426  {
4427  VMA_HEAVY_ASSERT(index < m_Count);
4428  return m_pArray[index];
4429  }
4430  const T& operator[](size_t index) const
4431  {
4432  VMA_HEAVY_ASSERT(index < m_Count);
4433  return m_pArray[index];
4434  }
4435 
4436  T& front()
4437  {
4438  VMA_HEAVY_ASSERT(m_Count > 0);
4439  return m_pArray[0];
4440  }
4441  const T& front() const
4442  {
4443  VMA_HEAVY_ASSERT(m_Count > 0);
4444  return m_pArray[0];
4445  }
4446  T& back()
4447  {
4448  VMA_HEAVY_ASSERT(m_Count > 0);
4449  return m_pArray[m_Count - 1];
4450  }
4451  const T& back() const
4452  {
4453  VMA_HEAVY_ASSERT(m_Count > 0);
4454  return m_pArray[m_Count - 1];
4455  }
4456 
4457  void reserve(size_t newCapacity, bool freeMemory = false)
4458  {
4459  newCapacity = VMA_MAX(newCapacity, m_Count);
4460 
4461  if((newCapacity < m_Capacity) && !freeMemory)
4462  {
4463  newCapacity = m_Capacity;
4464  }
4465 
4466  if(newCapacity != m_Capacity)
4467  {
4468  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4469  if(m_Count != 0)
4470  {
4471  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4472  }
4473  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4474  m_Capacity = newCapacity;
4475  m_pArray = newArray;
4476  }
4477  }
4478 
4479  void resize(size_t newCount, bool freeMemory = false)
4480  {
4481  size_t newCapacity = m_Capacity;
4482  if(newCount > m_Capacity)
4483  {
4484  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4485  }
4486  else if(freeMemory)
4487  {
4488  newCapacity = newCount;
4489  }
4490 
4491  if(newCapacity != m_Capacity)
4492  {
4493  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4494  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4495  if(elementsToCopy != 0)
4496  {
4497  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4498  }
4499  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4500  m_Capacity = newCapacity;
4501  m_pArray = newArray;
4502  }
4503 
4504  m_Count = newCount;
4505  }
4506 
4507  void clear(bool freeMemory = false)
4508  {
4509  resize(0, freeMemory);
4510  }
4511 
4512  void insert(size_t index, const T& src)
4513  {
4514  VMA_HEAVY_ASSERT(index <= m_Count);
4515  const size_t oldCount = size();
4516  resize(oldCount + 1);
4517  if(index < oldCount)
4518  {
4519  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4520  }
4521  m_pArray[index] = src;
4522  }
4523 
4524  void remove(size_t index)
4525  {
4526  VMA_HEAVY_ASSERT(index < m_Count);
4527  const size_t oldCount = size();
4528  if(index < oldCount - 1)
4529  {
4530  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4531  }
4532  resize(oldCount - 1);
4533  }
4534 
4535  void push_back(const T& src)
4536  {
4537  const size_t newIndex = size();
4538  resize(newIndex + 1);
4539  m_pArray[newIndex] = src;
4540  }
4541 
4542  void pop_back()
4543  {
4544  VMA_HEAVY_ASSERT(m_Count > 0);
4545  resize(size() - 1);
4546  }
4547 
4548  void push_front(const T& src)
4549  {
4550  insert(0, src);
4551  }
4552 
4553  void pop_front()
4554  {
4555  VMA_HEAVY_ASSERT(m_Count > 0);
4556  remove(0);
4557  }
4558 
4559  typedef T* iterator;
4560 
4561  iterator begin() { return m_pArray; }
4562  iterator end() { return m_pArray + m_Count; }
4563 
4564 private:
4565  AllocatorT m_Allocator;
4566  T* m_pArray;
4567  size_t m_Count;
4568  size_t m_Capacity;
4569 };
4570 
4571 template<typename T, typename allocatorT>
4572 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4573 {
4574  vec.insert(index, item);
4575 }
4576 
4577 template<typename T, typename allocatorT>
4578 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4579 {
4580  vec.remove(index);
4581 }
4582 
4583 #endif // #if VMA_USE_STL_VECTOR
4584 
4585 template<typename CmpLess, typename VectorT>
4586 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4587 {
4588  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4589  vector.data(),
4590  vector.data() + vector.size(),
4591  value,
4592  CmpLess()) - vector.data();
4593  VmaVectorInsert(vector, indexToInsert, value);
4594  return indexToInsert;
4595 }
4596 
4597 template<typename CmpLess, typename VectorT>
4598 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4599 {
4600  CmpLess comparator;
4601  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4602  vector.begin(),
4603  vector.end(),
4604  value,
4605  comparator);
4606  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4607  {
4608  size_t indexToRemove = it - vector.begin();
4609  VmaVectorRemove(vector, indexToRemove);
4610  return true;
4611  }
4612  return false;
4613 }
4614 
4616 // class VmaPoolAllocator
4617 
4618 /*
4619 Allocator for objects of type T using a list of arrays (pools) to speed up
4620 allocation. Number of elements that can be allocated is not bounded because
4621 allocator can create multiple blocks.
4622 */
4623 template<typename T>
4624 class VmaPoolAllocator
4625 {
4626  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4627 public:
4628  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4629  ~VmaPoolAllocator();
4630  T* Alloc();
4631  void Free(T* ptr);
4632 
4633 private:
4634  union Item
4635  {
4636  uint32_t NextFreeIndex;
4637  alignas(T) char Value[sizeof(T)];
4638  };
4639 
4640  struct ItemBlock
4641  {
4642  Item* pItems;
4643  uint32_t Capacity;
4644  uint32_t FirstFreeIndex;
4645  };
4646 
4647  const VkAllocationCallbacks* m_pAllocationCallbacks;
4648  const uint32_t m_FirstBlockCapacity;
4649  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4650 
4651  ItemBlock& CreateNewBlock();
4652 };
4653 
4654 template<typename T>
4655 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4656  m_pAllocationCallbacks(pAllocationCallbacks),
4657  m_FirstBlockCapacity(firstBlockCapacity),
4658  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4659 {
4660  VMA_ASSERT(m_FirstBlockCapacity > 1);
4661 }
4662 
4663 template<typename T>
4664 VmaPoolAllocator<T>::~VmaPoolAllocator()
4665 {
4666  for(size_t i = m_ItemBlocks.size(); i--; )
4667  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4668  m_ItemBlocks.clear();
4669 }
4670 
4671 template<typename T>
4672 T* VmaPoolAllocator<T>::Alloc()
4673 {
4674  for(size_t i = m_ItemBlocks.size(); i--; )
4675  {
4676  ItemBlock& block = m_ItemBlocks[i];
4677  // This block has some free items: Use first one.
4678  if(block.FirstFreeIndex != UINT32_MAX)
4679  {
4680  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4681  block.FirstFreeIndex = pItem->NextFreeIndex;
4682  T* result = (T*)&pItem->Value;
4683  new(result)T(); // Explicit constructor call.
4684  return result;
4685  }
4686  }
4687 
4688  // No block has free item: Create new one and use it.
4689  ItemBlock& newBlock = CreateNewBlock();
4690  Item* const pItem = &newBlock.pItems[0];
4691  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4692  T* result = (T*)&pItem->Value;
4693  new(result)T(); // Explicit constructor call.
4694  return result;
4695 }
4696 
4697 template<typename T>
4698 void VmaPoolAllocator<T>::Free(T* ptr)
4699 {
4700  // Search all memory blocks to find ptr.
4701  for(size_t i = m_ItemBlocks.size(); i--; )
4702  {
4703  ItemBlock& block = m_ItemBlocks[i];
4704 
4705  // Casting to union.
4706  Item* pItemPtr;
4707  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4708 
4709  // Check if pItemPtr is in address range of this block.
4710  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4711  {
4712  ptr->~T(); // Explicit destructor call.
4713  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4714  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4715  block.FirstFreeIndex = index;
4716  return;
4717  }
4718  }
4719  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4720 }
4721 
4722 template<typename T>
4723 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4724 {
4725  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4726  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4727 
4728  const ItemBlock newBlock = {
4729  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4730  newBlockCapacity,
4731  0 };
4732 
4733  m_ItemBlocks.push_back(newBlock);
4734 
4735  // Setup singly-linked list of all free items in this block.
4736  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4737  newBlock.pItems[i].NextFreeIndex = i + 1;
4738  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4739  return m_ItemBlocks.back();
4740 }
4741 
4743 // class VmaRawList, VmaList
4744 
4745 #if VMA_USE_STL_LIST
4746 
4747 #define VmaList std::list
4748 
4749 #else // #if VMA_USE_STL_LIST
4750 
4751 template<typename T>
4752 struct VmaListItem
4753 {
4754  VmaListItem* pPrev;
4755  VmaListItem* pNext;
4756  T Value;
4757 };
4758 
4759 // Doubly linked list.
4760 template<typename T>
4761 class VmaRawList
4762 {
4763  VMA_CLASS_NO_COPY(VmaRawList)
4764 public:
4765  typedef VmaListItem<T> ItemType;
4766 
4767  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4768  ~VmaRawList();
4769  void Clear();
4770 
4771  size_t GetCount() const { return m_Count; }
4772  bool IsEmpty() const { return m_Count == 0; }
4773 
4774  ItemType* Front() { return m_pFront; }
4775  const ItemType* Front() const { return m_pFront; }
4776  ItemType* Back() { return m_pBack; }
4777  const ItemType* Back() const { return m_pBack; }
4778 
4779  ItemType* PushBack();
4780  ItemType* PushFront();
4781  ItemType* PushBack(const T& value);
4782  ItemType* PushFront(const T& value);
4783  void PopBack();
4784  void PopFront();
4785 
4786  // Item can be null - it means PushBack.
4787  ItemType* InsertBefore(ItemType* pItem);
4788  // Item can be null - it means PushFront.
4789  ItemType* InsertAfter(ItemType* pItem);
4790 
4791  ItemType* InsertBefore(ItemType* pItem, const T& value);
4792  ItemType* InsertAfter(ItemType* pItem, const T& value);
4793 
4794  void Remove(ItemType* pItem);
4795 
4796 private:
4797  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4798  VmaPoolAllocator<ItemType> m_ItemAllocator;
4799  ItemType* m_pFront;
4800  ItemType* m_pBack;
4801  size_t m_Count;
4802 };
4803 
4804 template<typename T>
4805 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4806  m_pAllocationCallbacks(pAllocationCallbacks),
4807  m_ItemAllocator(pAllocationCallbacks, 128),
4808  m_pFront(VMA_NULL),
4809  m_pBack(VMA_NULL),
4810  m_Count(0)
4811 {
4812 }
4813 
4814 template<typename T>
4815 VmaRawList<T>::~VmaRawList()
4816 {
4817  // Intentionally not calling Clear, because that would be unnecessary
4818  // computations to return all items to m_ItemAllocator as free.
4819 }
4820 
4821 template<typename T>
4822 void VmaRawList<T>::Clear()
4823 {
4824  if(IsEmpty() == false)
4825  {
4826  ItemType* pItem = m_pBack;
4827  while(pItem != VMA_NULL)
4828  {
4829  ItemType* const pPrevItem = pItem->pPrev;
4830  m_ItemAllocator.Free(pItem);
4831  pItem = pPrevItem;
4832  }
4833  m_pFront = VMA_NULL;
4834  m_pBack = VMA_NULL;
4835  m_Count = 0;
4836  }
4837 }
4838 
4839 template<typename T>
4840 VmaListItem<T>* VmaRawList<T>::PushBack()
4841 {
4842  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4843  pNewItem->pNext = VMA_NULL;
4844  if(IsEmpty())
4845  {
4846  pNewItem->pPrev = VMA_NULL;
4847  m_pFront = pNewItem;
4848  m_pBack = pNewItem;
4849  m_Count = 1;
4850  }
4851  else
4852  {
4853  pNewItem->pPrev = m_pBack;
4854  m_pBack->pNext = pNewItem;
4855  m_pBack = pNewItem;
4856  ++m_Count;
4857  }
4858  return pNewItem;
4859 }
4860 
4861 template<typename T>
4862 VmaListItem<T>* VmaRawList<T>::PushFront()
4863 {
4864  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4865  pNewItem->pPrev = VMA_NULL;
4866  if(IsEmpty())
4867  {
4868  pNewItem->pNext = VMA_NULL;
4869  m_pFront = pNewItem;
4870  m_pBack = pNewItem;
4871  m_Count = 1;
4872  }
4873  else
4874  {
4875  pNewItem->pNext = m_pFront;
4876  m_pFront->pPrev = pNewItem;
4877  m_pFront = pNewItem;
4878  ++m_Count;
4879  }
4880  return pNewItem;
4881 }
4882 
4883 template<typename T>
4884 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4885 {
4886  ItemType* const pNewItem = PushBack();
4887  pNewItem->Value = value;
4888  return pNewItem;
4889 }
4890 
4891 template<typename T>
4892 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4893 {
4894  ItemType* const pNewItem = PushFront();
4895  pNewItem->Value = value;
4896  return pNewItem;
4897 }
4898 
4899 template<typename T>
4900 void VmaRawList<T>::PopBack()
4901 {
4902  VMA_HEAVY_ASSERT(m_Count > 0);
4903  ItemType* const pBackItem = m_pBack;
4904  ItemType* const pPrevItem = pBackItem->pPrev;
4905  if(pPrevItem != VMA_NULL)
4906  {
4907  pPrevItem->pNext = VMA_NULL;
4908  }
4909  m_pBack = pPrevItem;
4910  m_ItemAllocator.Free(pBackItem);
4911  --m_Count;
4912 }
4913 
4914 template<typename T>
4915 void VmaRawList<T>::PopFront()
4916 {
4917  VMA_HEAVY_ASSERT(m_Count > 0);
4918  ItemType* const pFrontItem = m_pFront;
4919  ItemType* const pNextItem = pFrontItem->pNext;
4920  if(pNextItem != VMA_NULL)
4921  {
4922  pNextItem->pPrev = VMA_NULL;
4923  }
4924  m_pFront = pNextItem;
4925  m_ItemAllocator.Free(pFrontItem);
4926  --m_Count;
4927 }
4928 
4929 template<typename T>
4930 void VmaRawList<T>::Remove(ItemType* pItem)
4931 {
4932  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4933  VMA_HEAVY_ASSERT(m_Count > 0);
4934 
4935  if(pItem->pPrev != VMA_NULL)
4936  {
4937  pItem->pPrev->pNext = pItem->pNext;
4938  }
4939  else
4940  {
4941  VMA_HEAVY_ASSERT(m_pFront == pItem);
4942  m_pFront = pItem->pNext;
4943  }
4944 
4945  if(pItem->pNext != VMA_NULL)
4946  {
4947  pItem->pNext->pPrev = pItem->pPrev;
4948  }
4949  else
4950  {
4951  VMA_HEAVY_ASSERT(m_pBack == pItem);
4952  m_pBack = pItem->pPrev;
4953  }
4954 
4955  m_ItemAllocator.Free(pItem);
4956  --m_Count;
4957 }
4958 
4959 template<typename T>
4960 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4961 {
4962  if(pItem != VMA_NULL)
4963  {
4964  ItemType* const prevItem = pItem->pPrev;
4965  ItemType* const newItem = m_ItemAllocator.Alloc();
4966  newItem->pPrev = prevItem;
4967  newItem->pNext = pItem;
4968  pItem->pPrev = newItem;
4969  if(prevItem != VMA_NULL)
4970  {
4971  prevItem->pNext = newItem;
4972  }
4973  else
4974  {
4975  VMA_HEAVY_ASSERT(m_pFront == pItem);
4976  m_pFront = newItem;
4977  }
4978  ++m_Count;
4979  return newItem;
4980  }
4981  else
4982  return PushBack();
4983 }
4984 
4985 template<typename T>
4986 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4987 {
4988  if(pItem != VMA_NULL)
4989  {
4990  ItemType* const nextItem = pItem->pNext;
4991  ItemType* const newItem = m_ItemAllocator.Alloc();
4992  newItem->pNext = nextItem;
4993  newItem->pPrev = pItem;
4994  pItem->pNext = newItem;
4995  if(nextItem != VMA_NULL)
4996  {
4997  nextItem->pPrev = newItem;
4998  }
4999  else
5000  {
5001  VMA_HEAVY_ASSERT(m_pBack == pItem);
5002  m_pBack = newItem;
5003  }
5004  ++m_Count;
5005  return newItem;
5006  }
5007  else
5008  return PushFront();
5009 }
5010 
5011 template<typename T>
5012 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5013 {
5014  ItemType* const newItem = InsertBefore(pItem);
5015  newItem->Value = value;
5016  return newItem;
5017 }
5018 
5019 template<typename T>
5020 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5021 {
5022  ItemType* const newItem = InsertAfter(pItem);
5023  newItem->Value = value;
5024  return newItem;
5025 }
5026 
5027 template<typename T, typename AllocatorT>
5028 class VmaList
5029 {
5030  VMA_CLASS_NO_COPY(VmaList)
5031 public:
5032  class iterator
5033  {
5034  public:
5035  iterator() :
5036  m_pList(VMA_NULL),
5037  m_pItem(VMA_NULL)
5038  {
5039  }
5040 
5041  T& operator*() const
5042  {
5043  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5044  return m_pItem->Value;
5045  }
5046  T* operator->() const
5047  {
5048  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5049  return &m_pItem->Value;
5050  }
5051 
5052  iterator& operator++()
5053  {
5054  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5055  m_pItem = m_pItem->pNext;
5056  return *this;
5057  }
5058  iterator& operator--()
5059  {
5060  if(m_pItem != VMA_NULL)
5061  {
5062  m_pItem = m_pItem->pPrev;
5063  }
5064  else
5065  {
5066  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5067  m_pItem = m_pList->Back();
5068  }
5069  return *this;
5070  }
5071 
5072  iterator operator++(int)
5073  {
5074  iterator result = *this;
5075  ++*this;
5076  return result;
5077  }
5078  iterator operator--(int)
5079  {
5080  iterator result = *this;
5081  --*this;
5082  return result;
5083  }
5084 
5085  bool operator==(const iterator& rhs) const
5086  {
5087  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5088  return m_pItem == rhs.m_pItem;
5089  }
5090  bool operator!=(const iterator& rhs) const
5091  {
5092  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5093  return m_pItem != rhs.m_pItem;
5094  }
5095 
5096  private:
5097  VmaRawList<T>* m_pList;
5098  VmaListItem<T>* m_pItem;
5099 
5100  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5101  m_pList(pList),
5102  m_pItem(pItem)
5103  {
5104  }
5105 
5106  friend class VmaList<T, AllocatorT>;
5107  };
5108 
5109  class const_iterator
5110  {
5111  public:
5112  const_iterator() :
5113  m_pList(VMA_NULL),
5114  m_pItem(VMA_NULL)
5115  {
5116  }
5117 
5118  const_iterator(const iterator& src) :
5119  m_pList(src.m_pList),
5120  m_pItem(src.m_pItem)
5121  {
5122  }
5123 
5124  const T& operator*() const
5125  {
5126  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5127  return m_pItem->Value;
5128  }
5129  const T* operator->() const
5130  {
5131  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5132  return &m_pItem->Value;
5133  }
5134 
5135  const_iterator& operator++()
5136  {
5137  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5138  m_pItem = m_pItem->pNext;
5139  return *this;
5140  }
5141  const_iterator& operator--()
5142  {
5143  if(m_pItem != VMA_NULL)
5144  {
5145  m_pItem = m_pItem->pPrev;
5146  }
5147  else
5148  {
5149  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5150  m_pItem = m_pList->Back();
5151  }
5152  return *this;
5153  }
5154 
5155  const_iterator operator++(int)
5156  {
5157  const_iterator result = *this;
5158  ++*this;
5159  return result;
5160  }
5161  const_iterator operator--(int)
5162  {
5163  const_iterator result = *this;
5164  --*this;
5165  return result;
5166  }
5167 
5168  bool operator==(const const_iterator& rhs) const
5169  {
5170  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5171  return m_pItem == rhs.m_pItem;
5172  }
5173  bool operator!=(const const_iterator& rhs) const
5174  {
5175  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5176  return m_pItem != rhs.m_pItem;
5177  }
5178 
5179  private:
5180  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5181  m_pList(pList),
5182  m_pItem(pItem)
5183  {
5184  }
5185 
5186  const VmaRawList<T>* m_pList;
5187  const VmaListItem<T>* m_pItem;
5188 
5189  friend class VmaList<T, AllocatorT>;
5190  };
5191 
5192  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5193 
5194  bool empty() const { return m_RawList.IsEmpty(); }
5195  size_t size() const { return m_RawList.GetCount(); }
5196 
5197  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5198  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5199 
5200  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5201  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5202 
5203  void clear() { m_RawList.Clear(); }
5204  void push_back(const T& value) { m_RawList.PushBack(value); }
5205  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5206  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5207 
5208 private:
5209  VmaRawList<T> m_RawList;
5210 };
5211 
5212 #endif // #if VMA_USE_STL_LIST
5213 
5215 // class VmaMap
5216 
5217 // Unused in this version.
5218 #if 0
5219 
5220 #if VMA_USE_STL_UNORDERED_MAP
5221 
5222 #define VmaPair std::pair
5223 
5224 #define VMA_MAP_TYPE(KeyT, ValueT) \
5225  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5226 
5227 #else // #if VMA_USE_STL_UNORDERED_MAP
5228 
5229 template<typename T1, typename T2>
5230 struct VmaPair
5231 {
5232  T1 first;
5233  T2 second;
5234 
5235  VmaPair() : first(), second() { }
5236  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5237 };
5238 
5239 /* Class compatible with subset of interface of std::unordered_map.
5240 KeyT, ValueT must be POD because they will be stored in VmaVector.
5241 */
5242 template<typename KeyT, typename ValueT>
5243 class VmaMap
5244 {
5245 public:
5246  typedef VmaPair<KeyT, ValueT> PairType;
5247  typedef PairType* iterator;
5248 
5249  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5250 
5251  iterator begin() { return m_Vector.begin(); }
5252  iterator end() { return m_Vector.end(); }
5253 
5254  void insert(const PairType& pair);
5255  iterator find(const KeyT& key);
5256  void erase(iterator it);
5257 
5258 private:
5259  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5260 };
5261 
5262 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5263 
5264 template<typename FirstT, typename SecondT>
5265 struct VmaPairFirstLess
5266 {
5267  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5268  {
5269  return lhs.first < rhs.first;
5270  }
5271  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5272  {
5273  return lhs.first < rhsFirst;
5274  }
5275 };
5276 
5277 template<typename KeyT, typename ValueT>
5278 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5279 {
5280  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5281  m_Vector.data(),
5282  m_Vector.data() + m_Vector.size(),
5283  pair,
5284  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5285  VmaVectorInsert(m_Vector, indexToInsert, pair);
5286 }
5287 
5288 template<typename KeyT, typename ValueT>
5289 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5290 {
5291  PairType* it = VmaBinaryFindFirstNotLess(
5292  m_Vector.data(),
5293  m_Vector.data() + m_Vector.size(),
5294  key,
5295  VmaPairFirstLess<KeyT, ValueT>());
5296  if((it != m_Vector.end()) && (it->first == key))
5297  {
5298  return it;
5299  }
5300  else
5301  {
5302  return m_Vector.end();
5303  }
5304 }
5305 
5306 template<typename KeyT, typename ValueT>
5307 void VmaMap<KeyT, ValueT>::erase(iterator it)
5308 {
5309  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5310 }
5311 
5312 #endif // #if VMA_USE_STL_UNORDERED_MAP
5313 
5314 #endif // #if 0
5315 
5317 
5318 class VmaDeviceMemoryBlock;
5319 
5320 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5321 
5322 struct VmaAllocation_T
5323 {
5324 private:
5325  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5326 
5327  enum FLAGS
5328  {
5329  FLAG_USER_DATA_STRING = 0x01,
5330  };
5331 
5332 public:
5333  enum ALLOCATION_TYPE
5334  {
5335  ALLOCATION_TYPE_NONE,
5336  ALLOCATION_TYPE_BLOCK,
5337  ALLOCATION_TYPE_DEDICATED,
5338  };
5339 
5340  /*
5341  This struct is allocated using VmaPoolAllocator.
5342  */
5343 
5344  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5345  {
5346  m_Alignment = 1;
5347  m_Size = 0;
5348  m_MemoryTypeIndex = 0;
5349  m_pUserData = VMA_NULL;
5350  m_LastUseFrameIndex = currentFrameIndex;
5351  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5352  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5353  m_MapCount = 0;
5354  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5355 
5356 #if VMA_STATS_STRING_ENABLED
5357  m_CreationFrameIndex = currentFrameIndex;
5358  m_BufferImageUsage = 0;
5359 #endif
5360  }
5361 
5362  void Dtor()
5363  {
5364  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5365 
5366  // Check if owned string was freed.
5367  VMA_ASSERT(m_pUserData == VMA_NULL);
5368  }
5369 
5370  void InitBlockAllocation(
5371  VmaDeviceMemoryBlock* block,
5372  VkDeviceSize offset,
5373  VkDeviceSize alignment,
5374  VkDeviceSize size,
5375  uint32_t memoryTypeIndex,
5376  VmaSuballocationType suballocationType,
5377  bool mapped,
5378  bool canBecomeLost)
5379  {
5380  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5381  VMA_ASSERT(block != VMA_NULL);
5382  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5383  m_Alignment = alignment;
5384  m_Size = size;
5385  m_MemoryTypeIndex = memoryTypeIndex;
5386  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5387  m_SuballocationType = (uint8_t)suballocationType;
5388  m_BlockAllocation.m_Block = block;
5389  m_BlockAllocation.m_Offset = offset;
5390  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5391  }
5392 
5393  void InitLost()
5394  {
5395  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5396  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5397  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5398  m_MemoryTypeIndex = 0;
5399  m_BlockAllocation.m_Block = VMA_NULL;
5400  m_BlockAllocation.m_Offset = 0;
5401  m_BlockAllocation.m_CanBecomeLost = true;
5402  }
5403 
5404  void ChangeBlockAllocation(
5405  VmaAllocator hAllocator,
5406  VmaDeviceMemoryBlock* block,
5407  VkDeviceSize offset);
5408 
5409  void ChangeOffset(VkDeviceSize newOffset);
5410 
5411  // pMappedData not null means allocation is created with MAPPED flag.
5412  void InitDedicatedAllocation(
5413  uint32_t memoryTypeIndex,
5414  VkDeviceMemory hMemory,
5415  VmaSuballocationType suballocationType,
5416  void* pMappedData,
5417  VkDeviceSize size)
5418  {
5419  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5420  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5421  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5422  m_Alignment = 0;
5423  m_Size = size;
5424  m_MemoryTypeIndex = memoryTypeIndex;
5425  m_SuballocationType = (uint8_t)suballocationType;
5426  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5427  m_DedicatedAllocation.m_hMemory = hMemory;
5428  m_DedicatedAllocation.m_pMappedData = pMappedData;
5429  }
5430 
5431  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5432  VkDeviceSize GetAlignment() const { return m_Alignment; }
5433  VkDeviceSize GetSize() const { return m_Size; }
5434  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5435  void* GetUserData() const { return m_pUserData; }
5436  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5437  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5438 
5439  VmaDeviceMemoryBlock* GetBlock() const
5440  {
5441  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5442  return m_BlockAllocation.m_Block;
5443  }
5444  VkDeviceSize GetOffset() const;
5445  VkDeviceMemory GetMemory() const;
5446  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5447  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5448  void* GetMappedData() const;
5449  bool CanBecomeLost() const;
5450 
5451  uint32_t GetLastUseFrameIndex() const
5452  {
5453  return m_LastUseFrameIndex.load();
5454  }
5455  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5456  {
5457  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5458  }
5459  /*
5460  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5461  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5462  - Else, returns false.
5463 
5464  If hAllocation is already lost, assert - you should not call it then.
5465  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5466  */
5467  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5468 
5469  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5470  {
5471  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5472  outInfo.blockCount = 1;
5473  outInfo.allocationCount = 1;
5474  outInfo.unusedRangeCount = 0;
5475  outInfo.usedBytes = m_Size;
5476  outInfo.unusedBytes = 0;
5477  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5478  outInfo.unusedRangeSizeMin = UINT64_MAX;
5479  outInfo.unusedRangeSizeMax = 0;
5480  }
5481 
5482  void BlockAllocMap();
5483  void BlockAllocUnmap();
5484  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5485  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5486 
5487 #if VMA_STATS_STRING_ENABLED
5488  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5489  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5490 
5491  void InitBufferImageUsage(uint32_t bufferImageUsage)
5492  {
5493  VMA_ASSERT(m_BufferImageUsage == 0);
5494  m_BufferImageUsage = bufferImageUsage;
5495  }
5496 
5497  void PrintParameters(class VmaJsonWriter& json) const;
5498 #endif
5499 
5500 private:
5501  VkDeviceSize m_Alignment;
5502  VkDeviceSize m_Size;
5503  void* m_pUserData;
5504  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5505  uint32_t m_MemoryTypeIndex;
5506  uint8_t m_Type; // ALLOCATION_TYPE
5507  uint8_t m_SuballocationType; // VmaSuballocationType
5508  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5509  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5510  uint8_t m_MapCount;
5511  uint8_t m_Flags; // enum FLAGS
5512 
5513  // Allocation out of VmaDeviceMemoryBlock.
5514  struct BlockAllocation
5515  {
5516  VmaDeviceMemoryBlock* m_Block;
5517  VkDeviceSize m_Offset;
5518  bool m_CanBecomeLost;
5519  };
5520 
5521  // Allocation for an object that has its own private VkDeviceMemory.
5522  struct DedicatedAllocation
5523  {
5524  VkDeviceMemory m_hMemory;
5525  void* m_pMappedData; // Not null means memory is mapped.
5526  };
5527 
5528  union
5529  {
5530  // Allocation out of VmaDeviceMemoryBlock.
5531  BlockAllocation m_BlockAllocation;
5532  // Allocation for an object that has its own private VkDeviceMemory.
5533  DedicatedAllocation m_DedicatedAllocation;
5534  };
5535 
5536 #if VMA_STATS_STRING_ENABLED
5537  uint32_t m_CreationFrameIndex;
5538  uint32_t m_BufferImageUsage; // 0 if unknown.
5539 #endif
5540 
5541  void FreeUserDataString(VmaAllocator hAllocator);
5542 };
5543 
5544 /*
5545 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5546 allocated memory block or free.
5547 */
5548 struct VmaSuballocation
5549 {
5550  VkDeviceSize offset;
5551  VkDeviceSize size;
5552  VmaAllocation hAllocation;
5553  VmaSuballocationType type;
5554 };
5555 
5556 // Comparator for offsets.
5557 struct VmaSuballocationOffsetLess
5558 {
5559  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5560  {
5561  return lhs.offset < rhs.offset;
5562  }
5563 };
5564 struct VmaSuballocationOffsetGreater
5565 {
5566  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5567  {
5568  return lhs.offset > rhs.offset;
5569  }
5570 };
5571 
5572 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5573 
5574 // Cost of one additional allocation lost, as equivalent in bytes.
5575 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5576 
5577 enum class VmaAllocationRequestType
5578 {
5579  Normal,
5580  // Used by "Linear" algorithm.
5581  UpperAddress,
5582  EndOf1st,
5583  EndOf2nd,
5584 };
5585 
5586 /*
5587 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5588 
5589 If canMakeOtherLost was false:
5590 - item points to a FREE suballocation.
5591 - itemsToMakeLostCount is 0.
5592 
5593 If canMakeOtherLost was true:
5594 - item points to first of sequence of suballocations, which are either FREE,
5595  or point to VmaAllocations that can become lost.
5596 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5597  the requested allocation to succeed.
5598 */
5599 struct VmaAllocationRequest
5600 {
5601  VkDeviceSize offset;
5602  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5603  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5604  VmaSuballocationList::iterator item;
5605  size_t itemsToMakeLostCount;
5606  void* customData;
5607  VmaAllocationRequestType type;
5608 
5609  VkDeviceSize CalcCost() const
5610  {
5611  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5612  }
5613 };
5614 
5615 /*
5616 Data structure used for bookkeeping of allocations and unused ranges of memory
5617 in a single VkDeviceMemory block.
5618 */
5619 class VmaBlockMetadata
5620 {
5621 public:
5622  VmaBlockMetadata(VmaAllocator hAllocator);
5623  virtual ~VmaBlockMetadata() { }
5624  virtual void Init(VkDeviceSize size) { m_Size = size; }
5625 
5626  // Validates all data structures inside this object. If not valid, returns false.
5627  virtual bool Validate() const = 0;
5628  VkDeviceSize GetSize() const { return m_Size; }
5629  virtual size_t GetAllocationCount() const = 0;
5630  virtual VkDeviceSize GetSumFreeSize() const = 0;
5631  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5632  // Returns true if this block is empty - contains only single free suballocation.
5633  virtual bool IsEmpty() const = 0;
5634 
5635  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5636  // Shouldn't modify blockCount.
5637  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5638 
5639 #if VMA_STATS_STRING_ENABLED
5640  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5641 #endif
5642 
5643  // Tries to find a place for suballocation with given parameters inside this block.
5644  // If succeeded, fills pAllocationRequest and returns true.
5645  // If failed, returns false.
5646  virtual bool CreateAllocationRequest(
5647  uint32_t currentFrameIndex,
5648  uint32_t frameInUseCount,
5649  VkDeviceSize bufferImageGranularity,
5650  VkDeviceSize allocSize,
5651  VkDeviceSize allocAlignment,
5652  bool upperAddress,
5653  VmaSuballocationType allocType,
5654  bool canMakeOtherLost,
5655  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5656  uint32_t strategy,
5657  VmaAllocationRequest* pAllocationRequest) = 0;
5658 
5659  virtual bool MakeRequestedAllocationsLost(
5660  uint32_t currentFrameIndex,
5661  uint32_t frameInUseCount,
5662  VmaAllocationRequest* pAllocationRequest) = 0;
5663 
5664  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5665 
5666  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5667 
5668  // Makes actual allocation based on request. Request must already be checked and valid.
5669  virtual void Alloc(
5670  const VmaAllocationRequest& request,
5671  VmaSuballocationType type,
5672  VkDeviceSize allocSize,
5673  VmaAllocation hAllocation) = 0;
5674 
5675  // Frees suballocation assigned to given memory region.
5676  virtual void Free(const VmaAllocation allocation) = 0;
5677  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5678 
5679 protected:
5680  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5681 
5682 #if VMA_STATS_STRING_ENABLED
5683  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5684  VkDeviceSize unusedBytes,
5685  size_t allocationCount,
5686  size_t unusedRangeCount) const;
5687  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5688  VkDeviceSize offset,
5689  VmaAllocation hAllocation) const;
5690  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5691  VkDeviceSize offset,
5692  VkDeviceSize size) const;
5693  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5694 #endif
5695 
5696 private:
5697  VkDeviceSize m_Size;
5698  const VkAllocationCallbacks* m_pAllocationCallbacks;
5699 };
5700 
5701 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5702  VMA_ASSERT(0 && "Validation failed: " #cond); \
5703  return false; \
5704  } } while(false)
5705 
5706 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5707 {
5708  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5709 public:
5710  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5711  virtual ~VmaBlockMetadata_Generic();
5712  virtual void Init(VkDeviceSize size);
5713 
5714  virtual bool Validate() const;
5715  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5716  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5717  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5718  virtual bool IsEmpty() const;
5719 
5720  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5721  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5722 
5723 #if VMA_STATS_STRING_ENABLED
5724  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5725 #endif
5726 
5727  virtual bool CreateAllocationRequest(
5728  uint32_t currentFrameIndex,
5729  uint32_t frameInUseCount,
5730  VkDeviceSize bufferImageGranularity,
5731  VkDeviceSize allocSize,
5732  VkDeviceSize allocAlignment,
5733  bool upperAddress,
5734  VmaSuballocationType allocType,
5735  bool canMakeOtherLost,
5736  uint32_t strategy,
5737  VmaAllocationRequest* pAllocationRequest);
5738 
5739  virtual bool MakeRequestedAllocationsLost(
5740  uint32_t currentFrameIndex,
5741  uint32_t frameInUseCount,
5742  VmaAllocationRequest* pAllocationRequest);
5743 
5744  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5745 
5746  virtual VkResult CheckCorruption(const void* pBlockData);
5747 
5748  virtual void Alloc(
5749  const VmaAllocationRequest& request,
5750  VmaSuballocationType type,
5751  VkDeviceSize allocSize,
5752  VmaAllocation hAllocation);
5753 
5754  virtual void Free(const VmaAllocation allocation);
5755  virtual void FreeAtOffset(VkDeviceSize offset);
5756 
5758  // For defragmentation
5759 
5760  bool IsBufferImageGranularityConflictPossible(
5761  VkDeviceSize bufferImageGranularity,
5762  VmaSuballocationType& inOutPrevSuballocType) const;
5763 
5764 private:
5765  friend class VmaDefragmentationAlgorithm_Generic;
5766  friend class VmaDefragmentationAlgorithm_Fast;
5767 
5768  uint32_t m_FreeCount;
5769  VkDeviceSize m_SumFreeSize;
5770  VmaSuballocationList m_Suballocations;
5771  // Suballocations that are free and have size greater than certain threshold.
5772  // Sorted by size, ascending.
5773  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5774 
5775  bool ValidateFreeSuballocationList() const;
5776 
5777  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5778  // If yes, fills pOffset and returns true. If no, returns false.
5779  bool CheckAllocation(
5780  uint32_t currentFrameIndex,
5781  uint32_t frameInUseCount,
5782  VkDeviceSize bufferImageGranularity,
5783  VkDeviceSize allocSize,
5784  VkDeviceSize allocAlignment,
5785  VmaSuballocationType allocType,
5786  VmaSuballocationList::const_iterator suballocItem,
5787  bool canMakeOtherLost,
5788  VkDeviceSize* pOffset,
5789  size_t* itemsToMakeLostCount,
5790  VkDeviceSize* pSumFreeSize,
5791  VkDeviceSize* pSumItemSize) const;
5792  // Given free suballocation, it merges it with following one, which must also be free.
5793  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5794  // Releases given suballocation, making it free.
5795  // Merges it with adjacent free suballocations if applicable.
5796  // Returns iterator to new free suballocation at this place.
5797  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5798  // Given free suballocation, it inserts it into sorted list of
5799  // m_FreeSuballocationsBySize if it's suitable.
5800  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5801  // Given free suballocation, it removes it from sorted list of
5802  // m_FreeSuballocationsBySize if it's suitable.
5803  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5804 };
5805 
5806 /*
5807 Allocations and their references in internal data structure look like this:
5808 
5809 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5810 
5811  0 +-------+
5812  | |
5813  | |
5814  | |
5815  +-------+
5816  | Alloc | 1st[m_1stNullItemsBeginCount]
5817  +-------+
5818  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5819  +-------+
5820  | ... |
5821  +-------+
5822  | Alloc | 1st[1st.size() - 1]
5823  +-------+
5824  | |
5825  | |
5826  | |
5827 GetSize() +-------+
5828 
5829 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5830 
5831  0 +-------+
5832  | Alloc | 2nd[0]
5833  +-------+
5834  | Alloc | 2nd[1]
5835  +-------+
5836  | ... |
5837  +-------+
5838  | Alloc | 2nd[2nd.size() - 1]
5839  +-------+
5840  | |
5841  | |
5842  | |
5843  +-------+
5844  | Alloc | 1st[m_1stNullItemsBeginCount]
5845  +-------+
5846  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5847  +-------+
5848  | ... |
5849  +-------+
5850  | Alloc | 1st[1st.size() - 1]
5851  +-------+
5852  | |
5853 GetSize() +-------+
5854 
5855 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5856 
5857  0 +-------+
5858  | |
5859  | |
5860  | |
5861  +-------+
5862  | Alloc | 1st[m_1stNullItemsBeginCount]
5863  +-------+
5864  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5865  +-------+
5866  | ... |
5867  +-------+
5868  | Alloc | 1st[1st.size() - 1]
5869  +-------+
5870  | |
5871  | |
5872  | |
5873  +-------+
5874  | Alloc | 2nd[2nd.size() - 1]
5875  +-------+
5876  | ... |
5877  +-------+
5878  | Alloc | 2nd[1]
5879  +-------+
5880  | Alloc | 2nd[0]
5881 GetSize() +-------+
5882 
5883 */
5884 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5885 {
5886  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5887 public:
5888  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5889  virtual ~VmaBlockMetadata_Linear();
5890  virtual void Init(VkDeviceSize size);
5891 
5892  virtual bool Validate() const;
5893  virtual size_t GetAllocationCount() const;
5894  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5895  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5896  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5897 
5898  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5899  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5900 
5901 #if VMA_STATS_STRING_ENABLED
5902  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5903 #endif
5904 
5905  virtual bool CreateAllocationRequest(
5906  uint32_t currentFrameIndex,
5907  uint32_t frameInUseCount,
5908  VkDeviceSize bufferImageGranularity,
5909  VkDeviceSize allocSize,
5910  VkDeviceSize allocAlignment,
5911  bool upperAddress,
5912  VmaSuballocationType allocType,
5913  bool canMakeOtherLost,
5914  uint32_t strategy,
5915  VmaAllocationRequest* pAllocationRequest);
5916 
5917  virtual bool MakeRequestedAllocationsLost(
5918  uint32_t currentFrameIndex,
5919  uint32_t frameInUseCount,
5920  VmaAllocationRequest* pAllocationRequest);
5921 
5922  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5923 
5924  virtual VkResult CheckCorruption(const void* pBlockData);
5925 
5926  virtual void Alloc(
5927  const VmaAllocationRequest& request,
5928  VmaSuballocationType type,
5929  VkDeviceSize allocSize,
5930  VmaAllocation hAllocation);
5931 
5932  virtual void Free(const VmaAllocation allocation);
5933  virtual void FreeAtOffset(VkDeviceSize offset);
5934 
5935 private:
5936  /*
5937  There are two suballocation vectors, used in ping-pong way.
5938  The one with index m_1stVectorIndex is called 1st.
5939  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5940  2nd can be non-empty only when 1st is not empty.
5941  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5942  */
5943  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5944 
5945  enum SECOND_VECTOR_MODE
5946  {
5947  SECOND_VECTOR_EMPTY,
5948  /*
5949  Suballocations in 2nd vector are created later than the ones in 1st, but they
5950  all have smaller offset.
5951  */
5952  SECOND_VECTOR_RING_BUFFER,
5953  /*
5954  Suballocations in 2nd vector are upper side of double stack.
5955  They all have offsets higher than those in 1st vector.
5956  Top of this stack means smaller offsets, but higher indices in this vector.
5957  */
5958  SECOND_VECTOR_DOUBLE_STACK,
5959  };
5960 
5961  VkDeviceSize m_SumFreeSize;
5962  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5963  uint32_t m_1stVectorIndex;
5964  SECOND_VECTOR_MODE m_2ndVectorMode;
5965 
5966  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5967  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5968  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5969  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5970 
5971  // Number of items in 1st vector with hAllocation = null at the beginning.
5972  size_t m_1stNullItemsBeginCount;
5973  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5974  size_t m_1stNullItemsMiddleCount;
5975  // Number of items in 2nd vector with hAllocation = null.
5976  size_t m_2ndNullItemsCount;
5977 
5978  bool ShouldCompact1st() const;
5979  void CleanupAfterFree();
5980 
5981  bool CreateAllocationRequest_LowerAddress(
5982  uint32_t currentFrameIndex,
5983  uint32_t frameInUseCount,
5984  VkDeviceSize bufferImageGranularity,
5985  VkDeviceSize allocSize,
5986  VkDeviceSize allocAlignment,
5987  VmaSuballocationType allocType,
5988  bool canMakeOtherLost,
5989  uint32_t strategy,
5990  VmaAllocationRequest* pAllocationRequest);
5991  bool CreateAllocationRequest_UpperAddress(
5992  uint32_t currentFrameIndex,
5993  uint32_t frameInUseCount,
5994  VkDeviceSize bufferImageGranularity,
5995  VkDeviceSize allocSize,
5996  VkDeviceSize allocAlignment,
5997  VmaSuballocationType allocType,
5998  bool canMakeOtherLost,
5999  uint32_t strategy,
6000  VmaAllocationRequest* pAllocationRequest);
6001 };
6002 
6003 /*
6004 - GetSize() is the original size of allocated memory block.
6005 - m_UsableSize is this size aligned down to a power of two.
6006  All allocations and calculations happen relative to m_UsableSize.
6007 - GetUnusableSize() is the difference between them.
6008  It is repoted as separate, unused range, not available for allocations.
6009 
6010 Node at level 0 has size = m_UsableSize.
6011 Each next level contains nodes with size 2 times smaller than current level.
6012 m_LevelCount is the maximum number of levels to use in the current object.
6013 */
6014 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6015 {
6016  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6017 public:
6018  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6019  virtual ~VmaBlockMetadata_Buddy();
6020  virtual void Init(VkDeviceSize size);
6021 
6022  virtual bool Validate() const;
6023  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6024  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6025  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6026  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6027 
6028  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6029  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6030 
6031 #if VMA_STATS_STRING_ENABLED
6032  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6033 #endif
6034 
6035  virtual bool CreateAllocationRequest(
6036  uint32_t currentFrameIndex,
6037  uint32_t frameInUseCount,
6038  VkDeviceSize bufferImageGranularity,
6039  VkDeviceSize allocSize,
6040  VkDeviceSize allocAlignment,
6041  bool upperAddress,
6042  VmaSuballocationType allocType,
6043  bool canMakeOtherLost,
6044  uint32_t strategy,
6045  VmaAllocationRequest* pAllocationRequest);
6046 
6047  virtual bool MakeRequestedAllocationsLost(
6048  uint32_t currentFrameIndex,
6049  uint32_t frameInUseCount,
6050  VmaAllocationRequest* pAllocationRequest);
6051 
6052  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6053 
6054  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6055 
6056  virtual void Alloc(
6057  const VmaAllocationRequest& request,
6058  VmaSuballocationType type,
6059  VkDeviceSize allocSize,
6060  VmaAllocation hAllocation);
6061 
6062  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6063  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6064 
6065 private:
6066  static const VkDeviceSize MIN_NODE_SIZE = 32;
6067  static const size_t MAX_LEVELS = 30;
6068 
6069  struct ValidationContext
6070  {
6071  size_t calculatedAllocationCount;
6072  size_t calculatedFreeCount;
6073  VkDeviceSize calculatedSumFreeSize;
6074 
6075  ValidationContext() :
6076  calculatedAllocationCount(0),
6077  calculatedFreeCount(0),
6078  calculatedSumFreeSize(0) { }
6079  };
6080 
6081  struct Node
6082  {
6083  VkDeviceSize offset;
6084  enum TYPE
6085  {
6086  TYPE_FREE,
6087  TYPE_ALLOCATION,
6088  TYPE_SPLIT,
6089  TYPE_COUNT
6090  } type;
6091  Node* parent;
6092  Node* buddy;
6093 
6094  union
6095  {
6096  struct
6097  {
6098  Node* prev;
6099  Node* next;
6100  } free;
6101  struct
6102  {
6103  VmaAllocation alloc;
6104  } allocation;
6105  struct
6106  {
6107  Node* leftChild;
6108  } split;
6109  };
6110  };
6111 
6112  // Size of the memory block aligned down to a power of two.
6113  VkDeviceSize m_UsableSize;
6114  uint32_t m_LevelCount;
6115 
6116  Node* m_Root;
6117  struct {
6118  Node* front;
6119  Node* back;
6120  } m_FreeList[MAX_LEVELS];
6121  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6122  size_t m_AllocationCount;
6123  // Number of nodes in the tree with type == TYPE_FREE.
6124  size_t m_FreeCount;
6125  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6126  VkDeviceSize m_SumFreeSize;
6127 
6128  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6129  void DeleteNode(Node* node);
6130  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6131  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6132  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6133  // Alloc passed just for validation. Can be null.
6134  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6135  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6136  // Adds node to the front of FreeList at given level.
6137  // node->type must be FREE.
6138  // node->free.prev, next can be undefined.
6139  void AddToFreeListFront(uint32_t level, Node* node);
6140  // Removes node from FreeList at given level.
6141  // node->type must be FREE.
6142  // node->free.prev, next stay untouched.
6143  void RemoveFromFreeList(uint32_t level, Node* node);
6144 
6145 #if VMA_STATS_STRING_ENABLED
6146  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6147 #endif
6148 };
6149 
6150 /*
6151 Represents a single block of device memory (`VkDeviceMemory`) with all the
6152 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6153 
6154 Thread-safety: This class must be externally synchronized.
6155 */
6156 class VmaDeviceMemoryBlock
6157 {
6158  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6159 public:
6160  VmaBlockMetadata* m_pMetadata;
6161 
6162  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6163 
6164  ~VmaDeviceMemoryBlock()
6165  {
6166  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6167  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6168  }
6169 
6170  // Always call after construction.
6171  void Init(
6172  VmaAllocator hAllocator,
6173  VmaPool hParentPool,
6174  uint32_t newMemoryTypeIndex,
6175  VkDeviceMemory newMemory,
6176  VkDeviceSize newSize,
6177  uint32_t id,
6178  uint32_t algorithm);
6179  // Always call before destruction.
6180  void Destroy(VmaAllocator allocator);
6181 
6182  VmaPool GetParentPool() const { return m_hParentPool; }
6183  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6184  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6185  uint32_t GetId() const { return m_Id; }
6186  void* GetMappedData() const { return m_pMappedData; }
6187 
6188  // Validates all data structures inside this object. If not valid, returns false.
6189  bool Validate() const;
6190 
6191  VkResult CheckCorruption(VmaAllocator hAllocator);
6192 
6193  // ppData can be null.
6194  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6195  void Unmap(VmaAllocator hAllocator, uint32_t count);
6196 
6197  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6198  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6199 
6200  VkResult BindBufferMemory(
6201  const VmaAllocator hAllocator,
6202  const VmaAllocation hAllocation,
6203  VkDeviceSize allocationLocalOffset,
6204  VkBuffer hBuffer,
6205  const void* pNext);
6206  VkResult BindImageMemory(
6207  const VmaAllocator hAllocator,
6208  const VmaAllocation hAllocation,
6209  VkDeviceSize allocationLocalOffset,
6210  VkImage hImage,
6211  const void* pNext);
6212 
6213 private:
6214  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6215  uint32_t m_MemoryTypeIndex;
6216  uint32_t m_Id;
6217  VkDeviceMemory m_hMemory;
6218 
6219  /*
6220  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6221  Also protects m_MapCount, m_pMappedData.
6222  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6223  */
6224  VMA_MUTEX m_Mutex;
6225  uint32_t m_MapCount;
6226  void* m_pMappedData;
6227 };
6228 
6229 struct VmaPointerLess
6230 {
6231  bool operator()(const void* lhs, const void* rhs) const
6232  {
6233  return lhs < rhs;
6234  }
6235 };
6236 
6237 struct VmaDefragmentationMove
6238 {
6239  size_t srcBlockIndex;
6240  size_t dstBlockIndex;
6241  VkDeviceSize srcOffset;
6242  VkDeviceSize dstOffset;
6243  VkDeviceSize size;
6244 };
6245 
6246 class VmaDefragmentationAlgorithm;
6247 
6248 /*
6249 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6250 Vulkan memory type.
6251 
6252 Synchronized internally with a mutex.
6253 */
6254 struct VmaBlockVector
6255 {
6256  VMA_CLASS_NO_COPY(VmaBlockVector)
6257 public:
6258  VmaBlockVector(
6259  VmaAllocator hAllocator,
6260  VmaPool hParentPool,
6261  uint32_t memoryTypeIndex,
6262  VkDeviceSize preferredBlockSize,
6263  size_t minBlockCount,
6264  size_t maxBlockCount,
6265  VkDeviceSize bufferImageGranularity,
6266  uint32_t frameInUseCount,
6267  bool explicitBlockSize,
6268  uint32_t algorithm);
6269  ~VmaBlockVector();
6270 
6271  VkResult CreateMinBlocks();
6272 
6273  VmaAllocator GetAllocator() const { return m_hAllocator; }
6274  VmaPool GetParentPool() const { return m_hParentPool; }
6275  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
6276  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6277  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6278  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6279  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6280  uint32_t GetAlgorithm() const { return m_Algorithm; }
6281 
6282  void GetPoolStats(VmaPoolStats* pStats);
6283 
6284  bool IsEmpty();
6285  bool IsCorruptionDetectionEnabled() const;
6286 
6287  VkResult Allocate(
6288  uint32_t currentFrameIndex,
6289  VkDeviceSize size,
6290  VkDeviceSize alignment,
6291  const VmaAllocationCreateInfo& createInfo,
6292  VmaSuballocationType suballocType,
6293  size_t allocationCount,
6294  VmaAllocation* pAllocations);
6295 
6296  void Free(const VmaAllocation hAllocation);
6297 
6298  // Adds statistics of this BlockVector to pStats.
6299  void AddStats(VmaStats* pStats);
6300 
6301 #if VMA_STATS_STRING_ENABLED
6302  void PrintDetailedMap(class VmaJsonWriter& json);
6303 #endif
6304 
6305  void MakePoolAllocationsLost(
6306  uint32_t currentFrameIndex,
6307  size_t* pLostAllocationCount);
6308  VkResult CheckCorruption();
6309 
6310  // Saves results in pCtx->res.
6311  void Defragment(
6312  class VmaBlockVectorDefragmentationContext* pCtx,
6313  VmaDefragmentationStats* pStats,
6314  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6315  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6316  VkCommandBuffer commandBuffer);
6317  void DefragmentationEnd(
6318  class VmaBlockVectorDefragmentationContext* pCtx,
6319  VmaDefragmentationStats* pStats);
6320 
6322  // To be used only while the m_Mutex is locked. Used during defragmentation.
6323 
6324  size_t GetBlockCount() const { return m_Blocks.size(); }
6325  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6326  size_t CalcAllocationCount() const;
6327  bool IsBufferImageGranularityConflictPossible() const;
6328 
6329 private:
6330  friend class VmaDefragmentationAlgorithm_Generic;
6331 
6332  const VmaAllocator m_hAllocator;
6333  const VmaPool m_hParentPool;
6334  const uint32_t m_MemoryTypeIndex;
6335  const VkDeviceSize m_PreferredBlockSize;
6336  const size_t m_MinBlockCount;
6337  const size_t m_MaxBlockCount;
6338  const VkDeviceSize m_BufferImageGranularity;
6339  const uint32_t m_FrameInUseCount;
6340  const bool m_ExplicitBlockSize;
6341  const uint32_t m_Algorithm;
6342  VMA_RW_MUTEX m_Mutex;
6343 
6344  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
6345  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
6346  bool m_HasEmptyBlock;
6347  // Incrementally sorted by sumFreeSize, ascending.
6348  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6349  uint32_t m_NextBlockId;
6350 
6351  VkDeviceSize CalcMaxBlockSize() const;
6352 
6353  // Finds and removes given block from vector.
6354  void Remove(VmaDeviceMemoryBlock* pBlock);
6355 
6356  // Performs single step in sorting m_Blocks. They may not be fully sorted
6357  // after this call.
6358  void IncrementallySortBlocks();
6359 
6360  VkResult AllocatePage(
6361  uint32_t currentFrameIndex,
6362  VkDeviceSize size,
6363  VkDeviceSize alignment,
6364  const VmaAllocationCreateInfo& createInfo,
6365  VmaSuballocationType suballocType,
6366  VmaAllocation* pAllocation);
6367 
6368  // To be used only without CAN_MAKE_OTHER_LOST flag.
6369  VkResult AllocateFromBlock(
6370  VmaDeviceMemoryBlock* pBlock,
6371  uint32_t currentFrameIndex,
6372  VkDeviceSize size,
6373  VkDeviceSize alignment,
6374  VmaAllocationCreateFlags allocFlags,
6375  void* pUserData,
6376  VmaSuballocationType suballocType,
6377  uint32_t strategy,
6378  VmaAllocation* pAllocation);
6379 
6380  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6381 
6382  // Saves result to pCtx->res.
6383  void ApplyDefragmentationMovesCpu(
6384  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6385  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6386  // Saves result to pCtx->res.
6387  void ApplyDefragmentationMovesGpu(
6388  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6389  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6390  VkCommandBuffer commandBuffer);
6391 
6392  /*
6393  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6394  - updated with new data.
6395  */
6396  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6397 
6398  void UpdateHasEmptyBlock();
6399 };
6400 
6401 struct VmaPool_T
6402 {
6403  VMA_CLASS_NO_COPY(VmaPool_T)
6404 public:
6405  VmaBlockVector m_BlockVector;
6406 
6407  VmaPool_T(
6408  VmaAllocator hAllocator,
6409  const VmaPoolCreateInfo& createInfo,
6410  VkDeviceSize preferredBlockSize);
6411  ~VmaPool_T();
6412 
6413  uint32_t GetId() const { return m_Id; }
6414  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6415 
6416  const char* GetName() const { return m_Name; }
6417  void SetName(const char* pName);
6418 
6419 #if VMA_STATS_STRING_ENABLED
6420  //void PrintDetailedMap(class VmaStringBuilder& sb);
6421 #endif
6422 
6423 private:
6424  uint32_t m_Id;
6425  char* m_Name;
6426 };
6427 
6428 /*
6429 Performs defragmentation:
6430 
6431 - Updates `pBlockVector->m_pMetadata`.
6432 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6433 - Does not move actual data, only returns requested moves as `moves`.
6434 */
6435 class VmaDefragmentationAlgorithm
6436 {
6437  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6438 public:
6439  VmaDefragmentationAlgorithm(
6440  VmaAllocator hAllocator,
6441  VmaBlockVector* pBlockVector,
6442  uint32_t currentFrameIndex) :
6443  m_hAllocator(hAllocator),
6444  m_pBlockVector(pBlockVector),
6445  m_CurrentFrameIndex(currentFrameIndex)
6446  {
6447  }
6448  virtual ~VmaDefragmentationAlgorithm()
6449  {
6450  }
6451 
6452  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6453  virtual void AddAll() = 0;
6454 
6455  virtual VkResult Defragment(
6456  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6457  VkDeviceSize maxBytesToMove,
6458  uint32_t maxAllocationsToMove) = 0;
6459 
6460  virtual VkDeviceSize GetBytesMoved() const = 0;
6461  virtual uint32_t GetAllocationsMoved() const = 0;
6462 
6463 protected:
6464  VmaAllocator const m_hAllocator;
6465  VmaBlockVector* const m_pBlockVector;
6466  const uint32_t m_CurrentFrameIndex;
6467 
6468  struct AllocationInfo
6469  {
6470  VmaAllocation m_hAllocation;
6471  VkBool32* m_pChanged;
6472 
6473  AllocationInfo() :
6474  m_hAllocation(VK_NULL_HANDLE),
6475  m_pChanged(VMA_NULL)
6476  {
6477  }
6478  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6479  m_hAllocation(hAlloc),
6480  m_pChanged(pChanged)
6481  {
6482  }
6483  };
6484 };
6485 
6486 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6487 {
6488  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6489 public:
6490  VmaDefragmentationAlgorithm_Generic(
6491  VmaAllocator hAllocator,
6492  VmaBlockVector* pBlockVector,
6493  uint32_t currentFrameIndex,
6494  bool overlappingMoveSupported);
6495  virtual ~VmaDefragmentationAlgorithm_Generic();
6496 
6497  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6498  virtual void AddAll() { m_AllAllocations = true; }
6499 
6500  virtual VkResult Defragment(
6501  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6502  VkDeviceSize maxBytesToMove,
6503  uint32_t maxAllocationsToMove);
6504 
6505  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6506  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6507 
6508 private:
6509  uint32_t m_AllocationCount;
6510  bool m_AllAllocations;
6511 
6512  VkDeviceSize m_BytesMoved;
6513  uint32_t m_AllocationsMoved;
6514 
6515  struct AllocationInfoSizeGreater
6516  {
6517  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6518  {
6519  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6520  }
6521  };
6522 
6523  struct AllocationInfoOffsetGreater
6524  {
6525  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6526  {
6527  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6528  }
6529  };
6530 
6531  struct BlockInfo
6532  {
6533  size_t m_OriginalBlockIndex;
6534  VmaDeviceMemoryBlock* m_pBlock;
6535  bool m_HasNonMovableAllocations;
6536  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6537 
6538  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6539  m_OriginalBlockIndex(SIZE_MAX),
6540  m_pBlock(VMA_NULL),
6541  m_HasNonMovableAllocations(true),
6542  m_Allocations(pAllocationCallbacks)
6543  {
6544  }
6545 
6546  void CalcHasNonMovableAllocations()
6547  {
6548  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6549  const size_t defragmentAllocCount = m_Allocations.size();
6550  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6551  }
6552 
6553  void SortAllocationsBySizeDescending()
6554  {
6555  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6556  }
6557 
6558  void SortAllocationsByOffsetDescending()
6559  {
6560  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6561  }
6562  };
6563 
6564  struct BlockPointerLess
6565  {
6566  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6567  {
6568  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6569  }
6570  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6571  {
6572  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6573  }
6574  };
6575 
6576  // 1. Blocks with some non-movable allocations go first.
6577  // 2. Blocks with smaller sumFreeSize go first.
6578  struct BlockInfoCompareMoveDestination
6579  {
6580  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6581  {
6582  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6583  {
6584  return true;
6585  }
6586  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6587  {
6588  return false;
6589  }
6590  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6591  {
6592  return true;
6593  }
6594  return false;
6595  }
6596  };
6597 
6598  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6599  BlockInfoVector m_Blocks;
6600 
6601  VkResult DefragmentRound(
6602  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6603  VkDeviceSize maxBytesToMove,
6604  uint32_t maxAllocationsToMove);
6605 
6606  size_t CalcBlocksWithNonMovableCount() const;
6607 
6608  static bool MoveMakesSense(
6609  size_t dstBlockIndex, VkDeviceSize dstOffset,
6610  size_t srcBlockIndex, VkDeviceSize srcOffset);
6611 };
6612 
6613 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6614 {
6615  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6616 public:
6617  VmaDefragmentationAlgorithm_Fast(
6618  VmaAllocator hAllocator,
6619  VmaBlockVector* pBlockVector,
6620  uint32_t currentFrameIndex,
6621  bool overlappingMoveSupported);
6622  virtual ~VmaDefragmentationAlgorithm_Fast();
6623 
6624  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6625  virtual void AddAll() { m_AllAllocations = true; }
6626 
6627  virtual VkResult Defragment(
6628  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6629  VkDeviceSize maxBytesToMove,
6630  uint32_t maxAllocationsToMove);
6631 
6632  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6633  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6634 
6635 private:
6636  struct BlockInfo
6637  {
6638  size_t origBlockIndex;
6639  };
6640 
6641  class FreeSpaceDatabase
6642  {
6643  public:
6644  FreeSpaceDatabase()
6645  {
6646  FreeSpace s = {};
6647  s.blockInfoIndex = SIZE_MAX;
6648  for(size_t i = 0; i < MAX_COUNT; ++i)
6649  {
6650  m_FreeSpaces[i] = s;
6651  }
6652  }
6653 
6654  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6655  {
6656  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6657  {
6658  return;
6659  }
6660 
6661  // Find first invalid or the smallest structure.
6662  size_t bestIndex = SIZE_MAX;
6663  for(size_t i = 0; i < MAX_COUNT; ++i)
6664  {
6665  // Empty structure.
6666  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6667  {
6668  bestIndex = i;
6669  break;
6670  }
6671  if(m_FreeSpaces[i].size < size &&
6672  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6673  {
6674  bestIndex = i;
6675  }
6676  }
6677 
6678  if(bestIndex != SIZE_MAX)
6679  {
6680  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6681  m_FreeSpaces[bestIndex].offset = offset;
6682  m_FreeSpaces[bestIndex].size = size;
6683  }
6684  }
6685 
6686  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6687  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6688  {
6689  size_t bestIndex = SIZE_MAX;
6690  VkDeviceSize bestFreeSpaceAfter = 0;
6691  for(size_t i = 0; i < MAX_COUNT; ++i)
6692  {
6693  // Structure is valid.
6694  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6695  {
6696  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6697  // Allocation fits into this structure.
6698  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6699  {
6700  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6701  (dstOffset + size);
6702  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6703  {
6704  bestIndex = i;
6705  bestFreeSpaceAfter = freeSpaceAfter;
6706  }
6707  }
6708  }
6709  }
6710 
6711  if(bestIndex != SIZE_MAX)
6712  {
6713  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6714  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6715 
6716  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6717  {
6718  // Leave this structure for remaining empty space.
6719  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6720  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6721  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6722  }
6723  else
6724  {
6725  // This structure becomes invalid.
6726  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6727  }
6728 
6729  return true;
6730  }
6731 
6732  return false;
6733  }
6734 
6735  private:
6736  static const size_t MAX_COUNT = 4;
6737 
6738  struct FreeSpace
6739  {
6740  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6741  VkDeviceSize offset;
6742  VkDeviceSize size;
6743  } m_FreeSpaces[MAX_COUNT];
6744  };
6745 
6746  const bool m_OverlappingMoveSupported;
6747 
6748  uint32_t m_AllocationCount;
6749  bool m_AllAllocations;
6750 
6751  VkDeviceSize m_BytesMoved;
6752  uint32_t m_AllocationsMoved;
6753 
6754  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6755 
6756  void PreprocessMetadata();
6757  void PostprocessMetadata();
6758  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6759 };
6760 
6761 struct VmaBlockDefragmentationContext
6762 {
6763  enum BLOCK_FLAG
6764  {
6765  BLOCK_FLAG_USED = 0x00000001,
6766  };
6767  uint32_t flags;
6768  VkBuffer hBuffer;
6769 };
6770 
6771 class VmaBlockVectorDefragmentationContext
6772 {
6773  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6774 public:
6775  VkResult res;
6776  bool mutexLocked;
6777  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6778 
6779  VmaBlockVectorDefragmentationContext(
6780  VmaAllocator hAllocator,
6781  VmaPool hCustomPool, // Optional.
6782  VmaBlockVector* pBlockVector,
6783  uint32_t currFrameIndex);
6784  ~VmaBlockVectorDefragmentationContext();
6785 
6786  VmaPool GetCustomPool() const { return m_hCustomPool; }
6787  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6788  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6789 
6790  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6791  void AddAll() { m_AllAllocations = true; }
6792 
6793  void Begin(bool overlappingMoveSupported);
6794 
6795 private:
6796  const VmaAllocator m_hAllocator;
6797  // Null if not from custom pool.
6798  const VmaPool m_hCustomPool;
6799  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6800  VmaBlockVector* const m_pBlockVector;
6801  const uint32_t m_CurrFrameIndex;
6802  // Owner of this object.
6803  VmaDefragmentationAlgorithm* m_pAlgorithm;
6804 
6805  struct AllocInfo
6806  {
6807  VmaAllocation hAlloc;
6808  VkBool32* pChanged;
6809  };
6810  // Used between constructor and Begin.
6811  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6812  bool m_AllAllocations;
6813 };
6814 
6815 struct VmaDefragmentationContext_T
6816 {
6817 private:
6818  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6819 public:
6820  VmaDefragmentationContext_T(
6821  VmaAllocator hAllocator,
6822  uint32_t currFrameIndex,
6823  uint32_t flags,
6824  VmaDefragmentationStats* pStats);
6825  ~VmaDefragmentationContext_T();
6826 
6827  void AddPools(uint32_t poolCount, VmaPool* pPools);
6828  void AddAllocations(
6829  uint32_t allocationCount,
6830  VmaAllocation* pAllocations,
6831  VkBool32* pAllocationsChanged);
6832 
6833  /*
6834  Returns:
6835  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6836  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6837  - Negative value if error occured and object can be destroyed immediately.
6838  */
6839  VkResult Defragment(
6840  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6841  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6842  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6843 
6844 private:
6845  const VmaAllocator m_hAllocator;
6846  const uint32_t m_CurrFrameIndex;
6847  const uint32_t m_Flags;
6848  VmaDefragmentationStats* const m_pStats;
6849  // Owner of these objects.
6850  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6851  // Owner of these objects.
6852  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6853 };
6854 
6855 #if VMA_RECORDING_ENABLED
6856 
6857 class VmaRecorder
6858 {
6859 public:
6860  VmaRecorder();
6861  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6862  void WriteConfiguration(
6863  const VkPhysicalDeviceProperties& devProps,
6864  const VkPhysicalDeviceMemoryProperties& memProps,
6865  uint32_t vulkanApiVersion,
6866  bool dedicatedAllocationExtensionEnabled,
6867  bool bindMemory2ExtensionEnabled,
6868  bool memoryBudgetExtensionEnabled);
6869  ~VmaRecorder();
6870 
6871  void RecordCreateAllocator(uint32_t frameIndex);
6872  void RecordDestroyAllocator(uint32_t frameIndex);
6873  void RecordCreatePool(uint32_t frameIndex,
6874  const VmaPoolCreateInfo& createInfo,
6875  VmaPool pool);
6876  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6877  void RecordAllocateMemory(uint32_t frameIndex,
6878  const VkMemoryRequirements& vkMemReq,
6879  const VmaAllocationCreateInfo& createInfo,
6880  VmaAllocation allocation);
6881  void RecordAllocateMemoryPages(uint32_t frameIndex,
6882  const VkMemoryRequirements& vkMemReq,
6883  const VmaAllocationCreateInfo& createInfo,
6884  uint64_t allocationCount,
6885  const VmaAllocation* pAllocations);
6886  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6887  const VkMemoryRequirements& vkMemReq,
6888  bool requiresDedicatedAllocation,
6889  bool prefersDedicatedAllocation,
6890  const VmaAllocationCreateInfo& createInfo,
6891  VmaAllocation allocation);
6892  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6893  const VkMemoryRequirements& vkMemReq,
6894  bool requiresDedicatedAllocation,
6895  bool prefersDedicatedAllocation,
6896  const VmaAllocationCreateInfo& createInfo,
6897  VmaAllocation allocation);
6898  void RecordFreeMemory(uint32_t frameIndex,
6899  VmaAllocation allocation);
6900  void RecordFreeMemoryPages(uint32_t frameIndex,
6901  uint64_t allocationCount,
6902  const VmaAllocation* pAllocations);
6903  void RecordSetAllocationUserData(uint32_t frameIndex,
6904  VmaAllocation allocation,
6905  const void* pUserData);
6906  void RecordCreateLostAllocation(uint32_t frameIndex,
6907  VmaAllocation allocation);
6908  void RecordMapMemory(uint32_t frameIndex,
6909  VmaAllocation allocation);
6910  void RecordUnmapMemory(uint32_t frameIndex,
6911  VmaAllocation allocation);
6912  void RecordFlushAllocation(uint32_t frameIndex,
6913  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6914  void RecordInvalidateAllocation(uint32_t frameIndex,
6915  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6916  void RecordCreateBuffer(uint32_t frameIndex,
6917  const VkBufferCreateInfo& bufCreateInfo,
6918  const VmaAllocationCreateInfo& allocCreateInfo,
6919  VmaAllocation allocation);
6920  void RecordCreateImage(uint32_t frameIndex,
6921  const VkImageCreateInfo& imageCreateInfo,
6922  const VmaAllocationCreateInfo& allocCreateInfo,
6923  VmaAllocation allocation);
6924  void RecordDestroyBuffer(uint32_t frameIndex,
6925  VmaAllocation allocation);
6926  void RecordDestroyImage(uint32_t frameIndex,
6927  VmaAllocation allocation);
6928  void RecordTouchAllocation(uint32_t frameIndex,
6929  VmaAllocation allocation);
6930  void RecordGetAllocationInfo(uint32_t frameIndex,
6931  VmaAllocation allocation);
6932  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6933  VmaPool pool);
6934  void RecordDefragmentationBegin(uint32_t frameIndex,
6935  const VmaDefragmentationInfo2& info,
6937  void RecordDefragmentationEnd(uint32_t frameIndex,
6939  void RecordSetPoolName(uint32_t frameIndex,
6940  VmaPool pool,
6941  const char* name);
6942 
6943 private:
6944  struct CallParams
6945  {
6946  uint32_t threadId;
6947  double time;
6948  };
6949 
6950  class UserDataString
6951  {
6952  public:
6953  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6954  const char* GetString() const { return m_Str; }
6955 
6956  private:
6957  char m_PtrStr[17];
6958  const char* m_Str;
6959  };
6960 
6961  bool m_UseMutex;
6962  VmaRecordFlags m_Flags;
6963  FILE* m_File;
6964  VMA_MUTEX m_FileMutex;
6965  int64_t m_Freq;
6966  int64_t m_StartCounter;
6967 
6968  void GetBasicParams(CallParams& outParams);
6969 
6970  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6971  template<typename T>
6972  void PrintPointerList(uint64_t count, const T* pItems)
6973  {
6974  if(count)
6975  {
6976  fprintf(m_File, "%p", pItems[0]);
6977  for(uint64_t i = 1; i < count; ++i)
6978  {
6979  fprintf(m_File, " %p", pItems[i]);
6980  }
6981  }
6982  }
6983 
6984  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6985  void Flush();
6986 };
6987 
6988 #endif // #if VMA_RECORDING_ENABLED
6989 
6990 /*
6991 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6992 */
6993 class VmaAllocationObjectAllocator
6994 {
6995  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6996 public:
6997  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6998 
6999  VmaAllocation Allocate();
7000  void Free(VmaAllocation hAlloc);
7001 
7002 private:
7003  VMA_MUTEX m_Mutex;
7004  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7005 };
7006 
7007 struct VmaCurrentBudgetData
7008 {
7009  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7010  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7011 
7012 #if VMA_MEMORY_BUDGET
7013  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7014  VMA_RW_MUTEX m_BudgetMutex;
7015  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7016  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7017  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7018 #endif // #if VMA_MEMORY_BUDGET
7019 
7020  VmaCurrentBudgetData()
7021  {
7022  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7023  {
7024  m_BlockBytes[heapIndex] = 0;
7025  m_AllocationBytes[heapIndex] = 0;
7026 #if VMA_MEMORY_BUDGET
7027  m_VulkanUsage[heapIndex] = 0;
7028  m_VulkanBudget[heapIndex] = 0;
7029  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7030 #endif
7031  }
7032 
7033 #if VMA_MEMORY_BUDGET
7034  m_OperationsSinceBudgetFetch = 0;
7035 #endif
7036  }
7037 
7038  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7039  {
7040  m_AllocationBytes[heapIndex] += allocationSize;
7041 #if VMA_MEMORY_BUDGET
7042  ++m_OperationsSinceBudgetFetch;
7043 #endif
7044  }
7045 
7046  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7047  {
7048  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7049  m_AllocationBytes[heapIndex] -= allocationSize;
7050 #if VMA_MEMORY_BUDGET
7051  ++m_OperationsSinceBudgetFetch;
7052 #endif
7053  }
7054 };
7055 
7056 // Main allocator object.
7057 struct VmaAllocator_T
7058 {
7059  VMA_CLASS_NO_COPY(VmaAllocator_T)
7060 public:
7061  bool m_UseMutex;
7062  uint32_t m_VulkanApiVersion;
7063  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7064  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7065  bool m_UseExtMemoryBudget;
7066  VkDevice m_hDevice;
7067  VkInstance m_hInstance;
7068  bool m_AllocationCallbacksSpecified;
7069  VkAllocationCallbacks m_AllocationCallbacks;
7070  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7071  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7072 
7073  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7074  uint32_t m_HeapSizeLimitMask;
7075 
7076  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7077  VkPhysicalDeviceMemoryProperties m_MemProps;
7078 
7079  // Default pools.
7080  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7081 
7082  // Each vector is sorted by memory (handle value).
7083  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7084  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7085  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7086 
7087  VmaCurrentBudgetData m_Budget;
7088 
7089  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7090  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7091  ~VmaAllocator_T();
7092 
7093  const VkAllocationCallbacks* GetAllocationCallbacks() const
7094  {
7095  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7096  }
7097  const VmaVulkanFunctions& GetVulkanFunctions() const
7098  {
7099  return m_VulkanFunctions;
7100  }
7101 
7102  VkDeviceSize GetBufferImageGranularity() const
7103  {
7104  return VMA_MAX(
7105  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7106  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7107  }
7108 
7109  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7110  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7111 
7112  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7113  {
7114  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7115  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7116  }
7117  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7118  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7119  {
7120  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7121  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7122  }
7123  // Minimum alignment for all allocations in specific memory type.
7124  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7125  {
7126  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7127  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7128  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7129  }
7130 
7131  bool IsIntegratedGpu() const
7132  {
7133  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7134  }
7135 
7136 #if VMA_RECORDING_ENABLED
7137  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7138 #endif
7139 
7140  void GetBufferMemoryRequirements(
7141  VkBuffer hBuffer,
7142  VkMemoryRequirements& memReq,
7143  bool& requiresDedicatedAllocation,
7144  bool& prefersDedicatedAllocation) const;
7145  void GetImageMemoryRequirements(
7146  VkImage hImage,
7147  VkMemoryRequirements& memReq,
7148  bool& requiresDedicatedAllocation,
7149  bool& prefersDedicatedAllocation) const;
7150 
7151  // Main allocation function.
7152  VkResult AllocateMemory(
7153  const VkMemoryRequirements& vkMemReq,
7154  bool requiresDedicatedAllocation,
7155  bool prefersDedicatedAllocation,
7156  VkBuffer dedicatedBuffer,
7157  VkImage dedicatedImage,
7158  const VmaAllocationCreateInfo& createInfo,
7159  VmaSuballocationType suballocType,
7160  size_t allocationCount,
7161  VmaAllocation* pAllocations);
7162 
7163  // Main deallocation function.
7164  void FreeMemory(
7165  size_t allocationCount,
7166  const VmaAllocation* pAllocations);
7167 
7168  VkResult ResizeAllocation(
7169  const VmaAllocation alloc,
7170  VkDeviceSize newSize);
7171 
7172  void CalculateStats(VmaStats* pStats);
7173 
7174  void GetBudget(
7175  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7176 
7177 #if VMA_STATS_STRING_ENABLED
7178  void PrintDetailedMap(class VmaJsonWriter& json);
7179 #endif
7180 
7181  VkResult DefragmentationBegin(
7182  const VmaDefragmentationInfo2& info,
7183  VmaDefragmentationStats* pStats,
7184  VmaDefragmentationContext* pContext);
7185  VkResult DefragmentationEnd(
7186  VmaDefragmentationContext context);
7187 
7188  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7189  bool TouchAllocation(VmaAllocation hAllocation);
7190 
7191  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7192  void DestroyPool(VmaPool pool);
7193  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7194 
7195  void SetCurrentFrameIndex(uint32_t frameIndex);
7196  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7197 
7198  void MakePoolAllocationsLost(
7199  VmaPool hPool,
7200  size_t* pLostAllocationCount);
7201  VkResult CheckPoolCorruption(VmaPool hPool);
7202  VkResult CheckCorruption(uint32_t memoryTypeBits);
7203 
7204  void CreateLostAllocation(VmaAllocation* pAllocation);
7205 
7206  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7207  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7208  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7209  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7210  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7211  VkResult BindVulkanBuffer(
7212  VkDeviceMemory memory,
7213  VkDeviceSize memoryOffset,
7214  VkBuffer buffer,
7215  const void* pNext);
7216  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7217  VkResult BindVulkanImage(
7218  VkDeviceMemory memory,
7219  VkDeviceSize memoryOffset,
7220  VkImage image,
7221  const void* pNext);
7222 
7223  VkResult Map(VmaAllocation hAllocation, void** ppData);
7224  void Unmap(VmaAllocation hAllocation);
7225 
7226  VkResult BindBufferMemory(
7227  VmaAllocation hAllocation,
7228  VkDeviceSize allocationLocalOffset,
7229  VkBuffer hBuffer,
7230  const void* pNext);
7231  VkResult BindImageMemory(
7232  VmaAllocation hAllocation,
7233  VkDeviceSize allocationLocalOffset,
7234  VkImage hImage,
7235  const void* pNext);
7236 
7237  void FlushOrInvalidateAllocation(
7238  VmaAllocation hAllocation,
7239  VkDeviceSize offset, VkDeviceSize size,
7240  VMA_CACHE_OPERATION op);
7241 
7242  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7243 
7244  /*
7245  Returns bit mask of memory types that can support defragmentation on GPU as
7246  they support creation of required buffer for copy operations.
7247  */
7248  uint32_t GetGpuDefragmentationMemoryTypeBits();
7249 
7250 private:
7251  VkDeviceSize m_PreferredLargeHeapBlockSize;
7252 
7253  VkPhysicalDevice m_PhysicalDevice;
7254  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7255  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7256 
7257  VMA_RW_MUTEX m_PoolsMutex;
7258  // Protected by m_PoolsMutex. Sorted by pointer value.
7259  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7260  uint32_t m_NextPoolId;
7261 
7262  VmaVulkanFunctions m_VulkanFunctions;
7263 
7264 #if VMA_RECORDING_ENABLED
7265  VmaRecorder* m_pRecorder;
7266 #endif
7267 
7268  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7269 
7270  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7271 
7272  VkResult AllocateMemoryOfType(
7273  VkDeviceSize size,
7274  VkDeviceSize alignment,
7275  bool dedicatedAllocation,
7276  VkBuffer dedicatedBuffer,
7277  VkImage dedicatedImage,
7278  const VmaAllocationCreateInfo& createInfo,
7279  uint32_t memTypeIndex,
7280  VmaSuballocationType suballocType,
7281  size_t allocationCount,
7282  VmaAllocation* pAllocations);
7283 
7284  // Helper function only to be used inside AllocateDedicatedMemory.
7285  VkResult AllocateDedicatedMemoryPage(
7286  VkDeviceSize size,
7287  VmaSuballocationType suballocType,
7288  uint32_t memTypeIndex,
7289  const VkMemoryAllocateInfo& allocInfo,
7290  bool map,
7291  bool isUserDataString,
7292  void* pUserData,
7293  VmaAllocation* pAllocation);
7294 
7295  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7296  VkResult AllocateDedicatedMemory(
7297  VkDeviceSize size,
7298  VmaSuballocationType suballocType,
7299  uint32_t memTypeIndex,
7300  bool withinBudget,
7301  bool map,
7302  bool isUserDataString,
7303  void* pUserData,
7304  VkBuffer dedicatedBuffer,
7305  VkImage dedicatedImage,
7306  size_t allocationCount,
7307  VmaAllocation* pAllocations);
7308 
7309  void FreeDedicatedMemory(const VmaAllocation allocation);
7310 
7311  /*
7312  Calculates and returns bit mask of memory types that can support defragmentation
7313  on GPU as they support creation of required buffer for copy operations.
7314  */
7315  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7316 
7317 #if VMA_MEMORY_BUDGET
7318  void UpdateVulkanBudget();
7319 #endif // #if VMA_MEMORY_BUDGET
7320 };
7321 
7323 // Memory allocation #2 after VmaAllocator_T definition
7324 
7325 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7326 {
7327  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7328 }
7329 
7330 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7331 {
7332  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7333 }
7334 
7335 template<typename T>
7336 static T* VmaAllocate(VmaAllocator hAllocator)
7337 {
7338  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7339 }
7340 
7341 template<typename T>
7342 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7343 {
7344  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7345 }
7346 
7347 template<typename T>
7348 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7349 {
7350  if(ptr != VMA_NULL)
7351  {
7352  ptr->~T();
7353  VmaFree(hAllocator, ptr);
7354  }
7355 }
7356 
7357 template<typename T>
7358 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7359 {
7360  if(ptr != VMA_NULL)
7361  {
7362  for(size_t i = count; i--; )
7363  ptr[i].~T();
7364  VmaFree(hAllocator, ptr);
7365  }
7366 }
7367 
7369 // VmaStringBuilder
7370 
7371 #if VMA_STATS_STRING_ENABLED
7372 
7373 class VmaStringBuilder
7374 {
7375 public:
7376  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7377  size_t GetLength() const { return m_Data.size(); }
7378  const char* GetData() const { return m_Data.data(); }
7379 
7380  void Add(char ch) { m_Data.push_back(ch); }
7381  void Add(const char* pStr);
7382  void AddNewLine() { Add('\n'); }
7383  void AddNumber(uint32_t num);
7384  void AddNumber(uint64_t num);
7385  void AddPointer(const void* ptr);
7386 
7387 private:
7388  VmaVector< char, VmaStlAllocator<char> > m_Data;
7389 };
7390 
7391 void VmaStringBuilder::Add(const char* pStr)
7392 {
7393  const size_t strLen = strlen(pStr);
7394  if(strLen > 0)
7395  {
7396  const size_t oldCount = m_Data.size();
7397  m_Data.resize(oldCount + strLen);
7398  memcpy(m_Data.data() + oldCount, pStr, strLen);
7399  }
7400 }
7401 
7402 void VmaStringBuilder::AddNumber(uint32_t num)
7403 {
7404  char buf[11];
7405  buf[10] = '\0';
7406  char *p = &buf[10];
7407  do
7408  {
7409  *--p = '0' + (num % 10);
7410  num /= 10;
7411  }
7412  while(num);
7413  Add(p);
7414 }
7415 
7416 void VmaStringBuilder::AddNumber(uint64_t num)
7417 {
7418  char buf[21];
7419  buf[20] = '\0';
7420  char *p = &buf[20];
7421  do
7422  {
7423  *--p = '0' + (num % 10);
7424  num /= 10;
7425  }
7426  while(num);
7427  Add(p);
7428 }
7429 
7430 void VmaStringBuilder::AddPointer(const void* ptr)
7431 {
7432  char buf[21];
7433  VmaPtrToStr(buf, sizeof(buf), ptr);
7434  Add(buf);
7435 }
7436 
7437 #endif // #if VMA_STATS_STRING_ENABLED
7438 
7440 // VmaJsonWriter
7441 
7442 #if VMA_STATS_STRING_ENABLED
7443 
7444 class VmaJsonWriter
7445 {
7446  VMA_CLASS_NO_COPY(VmaJsonWriter)
7447 public:
7448  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7449  ~VmaJsonWriter();
7450 
7451  void BeginObject(bool singleLine = false);
7452  void EndObject();
7453 
7454  void BeginArray(bool singleLine = false);
7455  void EndArray();
7456 
7457  void WriteString(const char* pStr);
7458  void BeginString(const char* pStr = VMA_NULL);
7459  void ContinueString(const char* pStr);
7460  void ContinueString(uint32_t n);
7461  void ContinueString(uint64_t n);
7462  void ContinueString_Pointer(const void* ptr);
7463  void EndString(const char* pStr = VMA_NULL);
7464 
7465  void WriteNumber(uint32_t n);
7466  void WriteNumber(uint64_t n);
7467  void WriteBool(bool b);
7468  void WriteNull();
7469 
7470 private:
7471  static const char* const INDENT;
7472 
7473  enum COLLECTION_TYPE
7474  {
7475  COLLECTION_TYPE_OBJECT,
7476  COLLECTION_TYPE_ARRAY,
7477  };
7478  struct StackItem
7479  {
7480  COLLECTION_TYPE type;
7481  uint32_t valueCount;
7482  bool singleLineMode;
7483  };
7484 
7485  VmaStringBuilder& m_SB;
7486  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7487  bool m_InsideString;
7488 
7489  void BeginValue(bool isString);
7490  void WriteIndent(bool oneLess = false);
7491 };
7492 
7493 const char* const VmaJsonWriter::INDENT = " ";
7494 
7495 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7496  m_SB(sb),
7497  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7498  m_InsideString(false)
7499 {
7500 }
7501 
7502 VmaJsonWriter::~VmaJsonWriter()
7503 {
7504  VMA_ASSERT(!m_InsideString);
7505  VMA_ASSERT(m_Stack.empty());
7506 }
7507 
7508 void VmaJsonWriter::BeginObject(bool singleLine)
7509 {
7510  VMA_ASSERT(!m_InsideString);
7511 
7512  BeginValue(false);
7513  m_SB.Add('{');
7514 
7515  StackItem item;
7516  item.type = COLLECTION_TYPE_OBJECT;
7517  item.valueCount = 0;
7518  item.singleLineMode = singleLine;
7519  m_Stack.push_back(item);
7520 }
7521 
7522 void VmaJsonWriter::EndObject()
7523 {
7524  VMA_ASSERT(!m_InsideString);
7525 
7526  WriteIndent(true);
7527  m_SB.Add('}');
7528 
7529  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7530  m_Stack.pop_back();
7531 }
7532 
7533 void VmaJsonWriter::BeginArray(bool singleLine)
7534 {
7535  VMA_ASSERT(!m_InsideString);
7536 
7537  BeginValue(false);
7538  m_SB.Add('[');
7539 
7540  StackItem item;
7541  item.type = COLLECTION_TYPE_ARRAY;
7542  item.valueCount = 0;
7543  item.singleLineMode = singleLine;
7544  m_Stack.push_back(item);
7545 }
7546 
7547 void VmaJsonWriter::EndArray()
7548 {
7549  VMA_ASSERT(!m_InsideString);
7550 
7551  WriteIndent(true);
7552  m_SB.Add(']');
7553 
7554  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7555  m_Stack.pop_back();
7556 }
7557 
7558 void VmaJsonWriter::WriteString(const char* pStr)
7559 {
7560  BeginString(pStr);
7561  EndString();
7562 }
7563 
7564 void VmaJsonWriter::BeginString(const char* pStr)
7565 {
7566  VMA_ASSERT(!m_InsideString);
7567 
7568  BeginValue(true);
7569  m_SB.Add('"');
7570  m_InsideString = true;
7571  if(pStr != VMA_NULL && pStr[0] != '\0')
7572  {
7573  ContinueString(pStr);
7574  }
7575 }
7576 
7577 void VmaJsonWriter::ContinueString(const char* pStr)
7578 {
7579  VMA_ASSERT(m_InsideString);
7580 
7581  const size_t strLen = strlen(pStr);
7582  for(size_t i = 0; i < strLen; ++i)
7583  {
7584  char ch = pStr[i];
7585  if(ch == '\\')
7586  {
7587  m_SB.Add("\\\\");
7588  }
7589  else if(ch == '"')
7590  {
7591  m_SB.Add("\\\"");
7592  }
7593  else if(ch >= 32)
7594  {
7595  m_SB.Add(ch);
7596  }
7597  else switch(ch)
7598  {
7599  case '\b':
7600  m_SB.Add("\\b");
7601  break;
7602  case '\f':
7603  m_SB.Add("\\f");
7604  break;
7605  case '\n':
7606  m_SB.Add("\\n");
7607  break;
7608  case '\r':
7609  m_SB.Add("\\r");
7610  break;
7611  case '\t':
7612  m_SB.Add("\\t");
7613  break;
7614  default:
7615  VMA_ASSERT(0 && "Character not currently supported.");
7616  break;
7617  }
7618  }
7619 }
7620 
7621 void VmaJsonWriter::ContinueString(uint32_t n)
7622 {
7623  VMA_ASSERT(m_InsideString);
7624  m_SB.AddNumber(n);
7625 }
7626 
7627 void VmaJsonWriter::ContinueString(uint64_t n)
7628 {
7629  VMA_ASSERT(m_InsideString);
7630  m_SB.AddNumber(n);
7631 }
7632 
7633 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7634 {
7635  VMA_ASSERT(m_InsideString);
7636  m_SB.AddPointer(ptr);
7637 }
7638 
7639 void VmaJsonWriter::EndString(const char* pStr)
7640 {
7641  VMA_ASSERT(m_InsideString);
7642  if(pStr != VMA_NULL && pStr[0] != '\0')
7643  {
7644  ContinueString(pStr);
7645  }
7646  m_SB.Add('"');
7647  m_InsideString = false;
7648 }
7649 
7650 void VmaJsonWriter::WriteNumber(uint32_t n)
7651 {
7652  VMA_ASSERT(!m_InsideString);
7653  BeginValue(false);
7654  m_SB.AddNumber(n);
7655 }
7656 
7657 void VmaJsonWriter::WriteNumber(uint64_t n)
7658 {
7659  VMA_ASSERT(!m_InsideString);
7660  BeginValue(false);
7661  m_SB.AddNumber(n);
7662 }
7663 
7664 void VmaJsonWriter::WriteBool(bool b)
7665 {
7666  VMA_ASSERT(!m_InsideString);
7667  BeginValue(false);
7668  m_SB.Add(b ? "true" : "false");
7669 }
7670 
7671 void VmaJsonWriter::WriteNull()
7672 {
7673  VMA_ASSERT(!m_InsideString);
7674  BeginValue(false);
7675  m_SB.Add("null");
7676 }
7677 
7678 void VmaJsonWriter::BeginValue(bool isString)
7679 {
7680  if(!m_Stack.empty())
7681  {
7682  StackItem& currItem = m_Stack.back();
7683  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7684  currItem.valueCount % 2 == 0)
7685  {
7686  VMA_ASSERT(isString);
7687  }
7688 
7689  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7690  currItem.valueCount % 2 != 0)
7691  {
7692  m_SB.Add(": ");
7693  }
7694  else if(currItem.valueCount > 0)
7695  {
7696  m_SB.Add(", ");
7697  WriteIndent();
7698  }
7699  else
7700  {
7701  WriteIndent();
7702  }
7703  ++currItem.valueCount;
7704  }
7705 }
7706 
7707 void VmaJsonWriter::WriteIndent(bool oneLess)
7708 {
7709  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7710  {
7711  m_SB.AddNewLine();
7712 
7713  size_t count = m_Stack.size();
7714  if(count > 0 && oneLess)
7715  {
7716  --count;
7717  }
7718  for(size_t i = 0; i < count; ++i)
7719  {
7720  m_SB.Add(INDENT);
7721  }
7722  }
7723 }
7724 
7725 #endif // #if VMA_STATS_STRING_ENABLED
7726 
7728 
7729 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7730 {
7731  if(IsUserDataString())
7732  {
7733  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7734 
7735  FreeUserDataString(hAllocator);
7736 
7737  if(pUserData != VMA_NULL)
7738  {
7739  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
7740  }
7741  }
7742  else
7743  {
7744  m_pUserData = pUserData;
7745  }
7746 }
7747 
7748 void VmaAllocation_T::ChangeBlockAllocation(
7749  VmaAllocator hAllocator,
7750  VmaDeviceMemoryBlock* block,
7751  VkDeviceSize offset)
7752 {
7753  VMA_ASSERT(block != VMA_NULL);
7754  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7755 
7756  // Move mapping reference counter from old block to new block.
7757  if(block != m_BlockAllocation.m_Block)
7758  {
7759  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7760  if(IsPersistentMap())
7761  ++mapRefCount;
7762  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7763  block->Map(hAllocator, mapRefCount, VMA_NULL);
7764  }
7765 
7766  m_BlockAllocation.m_Block = block;
7767  m_BlockAllocation.m_Offset = offset;
7768 }
7769 
7770 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7771 {
7772  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7773  m_BlockAllocation.m_Offset = newOffset;
7774 }
7775 
7776 VkDeviceSize VmaAllocation_T::GetOffset() const
7777 {
7778  switch(m_Type)
7779  {
7780  case ALLOCATION_TYPE_BLOCK:
7781  return m_BlockAllocation.m_Offset;
7782  case ALLOCATION_TYPE_DEDICATED:
7783  return 0;
7784  default:
7785  VMA_ASSERT(0);
7786  return 0;
7787  }
7788 }
7789 
7790 VkDeviceMemory VmaAllocation_T::GetMemory() const
7791 {
7792  switch(m_Type)
7793  {
7794  case ALLOCATION_TYPE_BLOCK:
7795  return m_BlockAllocation.m_Block->GetDeviceMemory();
7796  case ALLOCATION_TYPE_DEDICATED:
7797  return m_DedicatedAllocation.m_hMemory;
7798  default:
7799  VMA_ASSERT(0);
7800  return VK_NULL_HANDLE;
7801  }
7802 }
7803 
7804 void* VmaAllocation_T::GetMappedData() const
7805 {
7806  switch(m_Type)
7807  {
7808  case ALLOCATION_TYPE_BLOCK:
7809  if(m_MapCount != 0)
7810  {
7811  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7812  VMA_ASSERT(pBlockData != VMA_NULL);
7813  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7814  }
7815  else
7816  {
7817  return VMA_NULL;
7818  }
7819  break;
7820  case ALLOCATION_TYPE_DEDICATED:
7821  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7822  return m_DedicatedAllocation.m_pMappedData;
7823  default:
7824  VMA_ASSERT(0);
7825  return VMA_NULL;
7826  }
7827 }
7828 
7829 bool VmaAllocation_T::CanBecomeLost() const
7830 {
7831  switch(m_Type)
7832  {
7833  case ALLOCATION_TYPE_BLOCK:
7834  return m_BlockAllocation.m_CanBecomeLost;
7835  case ALLOCATION_TYPE_DEDICATED:
7836  return false;
7837  default:
7838  VMA_ASSERT(0);
7839  return false;
7840  }
7841 }
7842 
7843 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7844 {
7845  VMA_ASSERT(CanBecomeLost());
7846 
7847  /*
7848  Warning: This is a carefully designed algorithm.
7849  Do not modify unless you really know what you're doing :)
7850  */
7851  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7852  for(;;)
7853  {
7854  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7855  {
7856  VMA_ASSERT(0);
7857  return false;
7858  }
7859  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7860  {
7861  return false;
7862  }
7863  else // Last use time earlier than current time.
7864  {
7865  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7866  {
7867  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7868  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7869  return true;
7870  }
7871  }
7872  }
7873 }
7874 
7875 #if VMA_STATS_STRING_ENABLED
7876 
7877 // Correspond to values of enum VmaSuballocationType.
7878 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7879  "FREE",
7880  "UNKNOWN",
7881  "BUFFER",
7882  "IMAGE_UNKNOWN",
7883  "IMAGE_LINEAR",
7884  "IMAGE_OPTIMAL",
7885 };
7886 
7887 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7888 {
7889  json.WriteString("Type");
7890  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7891 
7892  json.WriteString("Size");
7893  json.WriteNumber(m_Size);
7894 
7895  if(m_pUserData != VMA_NULL)
7896  {
7897  json.WriteString("UserData");
7898  if(IsUserDataString())
7899  {
7900  json.WriteString((const char*)m_pUserData);
7901  }
7902  else
7903  {
7904  json.BeginString();
7905  json.ContinueString_Pointer(m_pUserData);
7906  json.EndString();
7907  }
7908  }
7909 
7910  json.WriteString("CreationFrameIndex");
7911  json.WriteNumber(m_CreationFrameIndex);
7912 
7913  json.WriteString("LastUseFrameIndex");
7914  json.WriteNumber(GetLastUseFrameIndex());
7915 
7916  if(m_BufferImageUsage != 0)
7917  {
7918  json.WriteString("Usage");
7919  json.WriteNumber(m_BufferImageUsage);
7920  }
7921 }
7922 
7923 #endif
7924 
7925 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7926 {
7927  VMA_ASSERT(IsUserDataString());
7928  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
7929  m_pUserData = VMA_NULL;
7930 }
7931 
7932 void VmaAllocation_T::BlockAllocMap()
7933 {
7934  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7935 
7936  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7937  {
7938  ++m_MapCount;
7939  }
7940  else
7941  {
7942  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7943  }
7944 }
7945 
7946 void VmaAllocation_T::BlockAllocUnmap()
7947 {
7948  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7949 
7950  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7951  {
7952  --m_MapCount;
7953  }
7954  else
7955  {
7956  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7957  }
7958 }
7959 
7960 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7961 {
7962  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7963 
7964  if(m_MapCount != 0)
7965  {
7966  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7967  {
7968  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7969  *ppData = m_DedicatedAllocation.m_pMappedData;
7970  ++m_MapCount;
7971  return VK_SUCCESS;
7972  }
7973  else
7974  {
7975  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7976  return VK_ERROR_MEMORY_MAP_FAILED;
7977  }
7978  }
7979  else
7980  {
7981  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7982  hAllocator->m_hDevice,
7983  m_DedicatedAllocation.m_hMemory,
7984  0, // offset
7985  VK_WHOLE_SIZE,
7986  0, // flags
7987  ppData);
7988  if(result == VK_SUCCESS)
7989  {
7990  m_DedicatedAllocation.m_pMappedData = *ppData;
7991  m_MapCount = 1;
7992  }
7993  return result;
7994  }
7995 }
7996 
7997 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7998 {
7999  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8000 
8001  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8002  {
8003  --m_MapCount;
8004  if(m_MapCount == 0)
8005  {
8006  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8007  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8008  hAllocator->m_hDevice,
8009  m_DedicatedAllocation.m_hMemory);
8010  }
8011  }
8012  else
8013  {
8014  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8015  }
8016 }
8017 
8018 #if VMA_STATS_STRING_ENABLED
8019 
8020 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8021 {
8022  json.BeginObject();
8023 
8024  json.WriteString("Blocks");
8025  json.WriteNumber(stat.blockCount);
8026 
8027  json.WriteString("Allocations");
8028  json.WriteNumber(stat.allocationCount);
8029 
8030  json.WriteString("UnusedRanges");
8031  json.WriteNumber(stat.unusedRangeCount);
8032 
8033  json.WriteString("UsedBytes");
8034  json.WriteNumber(stat.usedBytes);
8035 
8036  json.WriteString("UnusedBytes");
8037  json.WriteNumber(stat.unusedBytes);
8038 
8039  if(stat.allocationCount > 1)
8040  {
8041  json.WriteString("AllocationSize");
8042  json.BeginObject(true);
8043  json.WriteString("Min");
8044  json.WriteNumber(stat.allocationSizeMin);
8045  json.WriteString("Avg");
8046  json.WriteNumber(stat.allocationSizeAvg);
8047  json.WriteString("Max");
8048  json.WriteNumber(stat.allocationSizeMax);
8049  json.EndObject();
8050  }
8051 
8052  if(stat.unusedRangeCount > 1)
8053  {
8054  json.WriteString("UnusedRangeSize");
8055  json.BeginObject(true);
8056  json.WriteString("Min");
8057  json.WriteNumber(stat.unusedRangeSizeMin);
8058  json.WriteString("Avg");
8059  json.WriteNumber(stat.unusedRangeSizeAvg);
8060  json.WriteString("Max");
8061  json.WriteNumber(stat.unusedRangeSizeMax);
8062  json.EndObject();
8063  }
8064 
8065  json.EndObject();
8066 }
8067 
8068 #endif // #if VMA_STATS_STRING_ENABLED
8069 
8070 struct VmaSuballocationItemSizeLess
8071 {
8072  bool operator()(
8073  const VmaSuballocationList::iterator lhs,
8074  const VmaSuballocationList::iterator rhs) const
8075  {
8076  return lhs->size < rhs->size;
8077  }
8078  bool operator()(
8079  const VmaSuballocationList::iterator lhs,
8080  VkDeviceSize rhsSize) const
8081  {
8082  return lhs->size < rhsSize;
8083  }
8084 };
8085 
8086 
8088 // class VmaBlockMetadata
8089 
8090 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8091  m_Size(0),
8092  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8093 {
8094 }
8095 
8096 #if VMA_STATS_STRING_ENABLED
8097 
8098 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8099  VkDeviceSize unusedBytes,
8100  size_t allocationCount,
8101  size_t unusedRangeCount) const
8102 {
8103  json.BeginObject();
8104 
8105  json.WriteString("TotalBytes");
8106  json.WriteNumber(GetSize());
8107 
8108  json.WriteString("UnusedBytes");
8109  json.WriteNumber(unusedBytes);
8110 
8111  json.WriteString("Allocations");
8112  json.WriteNumber((uint64_t)allocationCount);
8113 
8114  json.WriteString("UnusedRanges");
8115  json.WriteNumber((uint64_t)unusedRangeCount);
8116 
8117  json.WriteString("Suballocations");
8118  json.BeginArray();
8119 }
8120 
8121 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8122  VkDeviceSize offset,
8123  VmaAllocation hAllocation) const
8124 {
8125  json.BeginObject(true);
8126 
8127  json.WriteString("Offset");
8128  json.WriteNumber(offset);
8129 
8130  hAllocation->PrintParameters(json);
8131 
8132  json.EndObject();
8133 }
8134 
8135 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8136  VkDeviceSize offset,
8137  VkDeviceSize size) const
8138 {
8139  json.BeginObject(true);
8140 
8141  json.WriteString("Offset");
8142  json.WriteNumber(offset);
8143 
8144  json.WriteString("Type");
8145  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8146 
8147  json.WriteString("Size");
8148  json.WriteNumber(size);
8149 
8150  json.EndObject();
8151 }
8152 
8153 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8154 {
8155  json.EndArray();
8156  json.EndObject();
8157 }
8158 
8159 #endif // #if VMA_STATS_STRING_ENABLED
8160 
8162 // class VmaBlockMetadata_Generic
8163 
8164 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8165  VmaBlockMetadata(hAllocator),
8166  m_FreeCount(0),
8167  m_SumFreeSize(0),
8168  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8169  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8170 {
8171 }
8172 
8173 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8174 {
8175 }
8176 
8177 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8178 {
8179  VmaBlockMetadata::Init(size);
8180 
8181  m_FreeCount = 1;
8182  m_SumFreeSize = size;
8183 
8184  VmaSuballocation suballoc = {};
8185  suballoc.offset = 0;
8186  suballoc.size = size;
8187  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8188  suballoc.hAllocation = VK_NULL_HANDLE;
8189 
8190  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8191  m_Suballocations.push_back(suballoc);
8192  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8193  --suballocItem;
8194  m_FreeSuballocationsBySize.push_back(suballocItem);
8195 }
8196 
8197 bool VmaBlockMetadata_Generic::Validate() const
8198 {
8199  VMA_VALIDATE(!m_Suballocations.empty());
8200 
8201  // Expected offset of new suballocation as calculated from previous ones.
8202  VkDeviceSize calculatedOffset = 0;
8203  // Expected number of free suballocations as calculated from traversing their list.
8204  uint32_t calculatedFreeCount = 0;
8205  // Expected sum size of free suballocations as calculated from traversing their list.
8206  VkDeviceSize calculatedSumFreeSize = 0;
8207  // Expected number of free suballocations that should be registered in
8208  // m_FreeSuballocationsBySize calculated from traversing their list.
8209  size_t freeSuballocationsToRegister = 0;
8210  // True if previous visited suballocation was free.
8211  bool prevFree = false;
8212 
8213  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8214  suballocItem != m_Suballocations.cend();
8215  ++suballocItem)
8216  {
8217  const VmaSuballocation& subAlloc = *suballocItem;
8218 
8219  // Actual offset of this suballocation doesn't match expected one.
8220  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8221 
8222  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8223  // Two adjacent free suballocations are invalid. They should be merged.
8224  VMA_VALIDATE(!prevFree || !currFree);
8225 
8226  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8227 
8228  if(currFree)
8229  {
8230  calculatedSumFreeSize += subAlloc.size;
8231  ++calculatedFreeCount;
8232  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8233  {
8234  ++freeSuballocationsToRegister;
8235  }
8236 
8237  // Margin required between allocations - every free space must be at least that large.
8238  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8239  }
8240  else
8241  {
8242  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8243  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8244 
8245  // Margin required between allocations - previous allocation must be free.
8246  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8247  }
8248 
8249  calculatedOffset += subAlloc.size;
8250  prevFree = currFree;
8251  }
8252 
8253  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8254  // match expected one.
8255  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8256 
8257  VkDeviceSize lastSize = 0;
8258  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8259  {
8260  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8261 
8262  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8263  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8264  // They must be sorted by size ascending.
8265  VMA_VALIDATE(suballocItem->size >= lastSize);
8266 
8267  lastSize = suballocItem->size;
8268  }
8269 
8270  // Check if totals match calculacted values.
8271  VMA_VALIDATE(ValidateFreeSuballocationList());
8272  VMA_VALIDATE(calculatedOffset == GetSize());
8273  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8274  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8275 
8276  return true;
8277 }
8278 
8279 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8280 {
8281  if(!m_FreeSuballocationsBySize.empty())
8282  {
8283  return m_FreeSuballocationsBySize.back()->size;
8284  }
8285  else
8286  {
8287  return 0;
8288  }
8289 }
8290 
8291 bool VmaBlockMetadata_Generic::IsEmpty() const
8292 {
8293  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8294 }
8295 
8296 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8297 {
8298  outInfo.blockCount = 1;
8299 
8300  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8301  outInfo.allocationCount = rangeCount - m_FreeCount;
8302  outInfo.unusedRangeCount = m_FreeCount;
8303 
8304  outInfo.unusedBytes = m_SumFreeSize;
8305  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8306 
8307  outInfo.allocationSizeMin = UINT64_MAX;
8308  outInfo.allocationSizeMax = 0;
8309  outInfo.unusedRangeSizeMin = UINT64_MAX;
8310  outInfo.unusedRangeSizeMax = 0;
8311 
8312  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8313  suballocItem != m_Suballocations.cend();
8314  ++suballocItem)
8315  {
8316  const VmaSuballocation& suballoc = *suballocItem;
8317  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8318  {
8319  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8320  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8321  }
8322  else
8323  {
8324  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8325  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8326  }
8327  }
8328 }
8329 
8330 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8331 {
8332  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8333 
8334  inoutStats.size += GetSize();
8335  inoutStats.unusedSize += m_SumFreeSize;
8336  inoutStats.allocationCount += rangeCount - m_FreeCount;
8337  inoutStats.unusedRangeCount += m_FreeCount;
8338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8339 }
8340 
8341 #if VMA_STATS_STRING_ENABLED
8342 
8343 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8344 {
8345  PrintDetailedMap_Begin(json,
8346  m_SumFreeSize, // unusedBytes
8347  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8348  m_FreeCount); // unusedRangeCount
8349 
8350  size_t i = 0;
8351  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8352  suballocItem != m_Suballocations.cend();
8353  ++suballocItem, ++i)
8354  {
8355  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8356  {
8357  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8358  }
8359  else
8360  {
8361  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8362  }
8363  }
8364 
8365  PrintDetailedMap_End(json);
8366 }
8367 
8368 #endif // #if VMA_STATS_STRING_ENABLED
8369 
8370 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8371  uint32_t currentFrameIndex,
8372  uint32_t frameInUseCount,
8373  VkDeviceSize bufferImageGranularity,
8374  VkDeviceSize allocSize,
8375  VkDeviceSize allocAlignment,
8376  bool upperAddress,
8377  VmaSuballocationType allocType,
8378  bool canMakeOtherLost,
8379  uint32_t strategy,
8380  VmaAllocationRequest* pAllocationRequest)
8381 {
8382  VMA_ASSERT(allocSize > 0);
8383  VMA_ASSERT(!upperAddress);
8384  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8385  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8386  VMA_HEAVY_ASSERT(Validate());
8387 
8388  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8389 
8390  // There is not enough total free space in this block to fullfill the request: Early return.
8391  if(canMakeOtherLost == false &&
8392  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8393  {
8394  return false;
8395  }
8396 
8397  // New algorithm, efficiently searching freeSuballocationsBySize.
8398  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8399  if(freeSuballocCount > 0)
8400  {
8402  {
8403  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8404  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8405  m_FreeSuballocationsBySize.data(),
8406  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8407  allocSize + 2 * VMA_DEBUG_MARGIN,
8408  VmaSuballocationItemSizeLess());
8409  size_t index = it - m_FreeSuballocationsBySize.data();
8410  for(; index < freeSuballocCount; ++index)
8411  {
8412  if(CheckAllocation(
8413  currentFrameIndex,
8414  frameInUseCount,
8415  bufferImageGranularity,
8416  allocSize,
8417  allocAlignment,
8418  allocType,
8419  m_FreeSuballocationsBySize[index],
8420  false, // canMakeOtherLost
8421  &pAllocationRequest->offset,
8422  &pAllocationRequest->itemsToMakeLostCount,
8423  &pAllocationRequest->sumFreeSize,
8424  &pAllocationRequest->sumItemSize))
8425  {
8426  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8427  return true;
8428  }
8429  }
8430  }
8431  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8432  {
8433  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8434  it != m_Suballocations.end();
8435  ++it)
8436  {
8437  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8438  currentFrameIndex,
8439  frameInUseCount,
8440  bufferImageGranularity,
8441  allocSize,
8442  allocAlignment,
8443  allocType,
8444  it,
8445  false, // canMakeOtherLost
8446  &pAllocationRequest->offset,
8447  &pAllocationRequest->itemsToMakeLostCount,
8448  &pAllocationRequest->sumFreeSize,
8449  &pAllocationRequest->sumItemSize))
8450  {
8451  pAllocationRequest->item = it;
8452  return true;
8453  }
8454  }
8455  }
8456  else // WORST_FIT, FIRST_FIT
8457  {
8458  // Search staring from biggest suballocations.
8459  for(size_t index = freeSuballocCount; index--; )
8460  {
8461  if(CheckAllocation(
8462  currentFrameIndex,
8463  frameInUseCount,
8464  bufferImageGranularity,
8465  allocSize,
8466  allocAlignment,
8467  allocType,
8468  m_FreeSuballocationsBySize[index],
8469  false, // canMakeOtherLost
8470  &pAllocationRequest->offset,
8471  &pAllocationRequest->itemsToMakeLostCount,
8472  &pAllocationRequest->sumFreeSize,
8473  &pAllocationRequest->sumItemSize))
8474  {
8475  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8476  return true;
8477  }
8478  }
8479  }
8480  }
8481 
8482  if(canMakeOtherLost)
8483  {
8484  // Brute-force algorithm. TODO: Come up with something better.
8485 
8486  bool found = false;
8487  VmaAllocationRequest tmpAllocRequest = {};
8488  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8489  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8490  suballocIt != m_Suballocations.end();
8491  ++suballocIt)
8492  {
8493  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8494  suballocIt->hAllocation->CanBecomeLost())
8495  {
8496  if(CheckAllocation(
8497  currentFrameIndex,
8498  frameInUseCount,
8499  bufferImageGranularity,
8500  allocSize,
8501  allocAlignment,
8502  allocType,
8503  suballocIt,
8504  canMakeOtherLost,
8505  &tmpAllocRequest.offset,
8506  &tmpAllocRequest.itemsToMakeLostCount,
8507  &tmpAllocRequest.sumFreeSize,
8508  &tmpAllocRequest.sumItemSize))
8509  {
8511  {
8512  *pAllocationRequest = tmpAllocRequest;
8513  pAllocationRequest->item = suballocIt;
8514  break;
8515  }
8516  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8517  {
8518  *pAllocationRequest = tmpAllocRequest;
8519  pAllocationRequest->item = suballocIt;
8520  found = true;
8521  }
8522  }
8523  }
8524  }
8525 
8526  return found;
8527  }
8528 
8529  return false;
8530 }
8531 
8532 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8533  uint32_t currentFrameIndex,
8534  uint32_t frameInUseCount,
8535  VmaAllocationRequest* pAllocationRequest)
8536 {
8537  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8538 
8539  while(pAllocationRequest->itemsToMakeLostCount > 0)
8540  {
8541  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8542  {
8543  ++pAllocationRequest->item;
8544  }
8545  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8546  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8547  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8548  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8549  {
8550  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8551  --pAllocationRequest->itemsToMakeLostCount;
8552  }
8553  else
8554  {
8555  return false;
8556  }
8557  }
8558 
8559  VMA_HEAVY_ASSERT(Validate());
8560  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8561  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8562 
8563  return true;
8564 }
8565 
8566 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8567 {
8568  uint32_t lostAllocationCount = 0;
8569  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8570  it != m_Suballocations.end();
8571  ++it)
8572  {
8573  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8574  it->hAllocation->CanBecomeLost() &&
8575  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8576  {
8577  it = FreeSuballocation(it);
8578  ++lostAllocationCount;
8579  }
8580  }
8581  return lostAllocationCount;
8582 }
8583 
8584 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8585 {
8586  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8587  it != m_Suballocations.end();
8588  ++it)
8589  {
8590  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8591  {
8592  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8593  {
8594  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8595  return VK_ERROR_VALIDATION_FAILED_EXT;
8596  }
8597  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8598  {
8599  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8600  return VK_ERROR_VALIDATION_FAILED_EXT;
8601  }
8602  }
8603  }
8604 
8605  return VK_SUCCESS;
8606 }
8607 
8608 void VmaBlockMetadata_Generic::Alloc(
8609  const VmaAllocationRequest& request,
8610  VmaSuballocationType type,
8611  VkDeviceSize allocSize,
8612  VmaAllocation hAllocation)
8613 {
8614  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8615  VMA_ASSERT(request.item != m_Suballocations.end());
8616  VmaSuballocation& suballoc = *request.item;
8617  // Given suballocation is a free block.
8618  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8619  // Given offset is inside this suballocation.
8620  VMA_ASSERT(request.offset >= suballoc.offset);
8621  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8622  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8623  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8624 
8625  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8626  // it to become used.
8627  UnregisterFreeSuballocation(request.item);
8628 
8629  suballoc.offset = request.offset;
8630  suballoc.size = allocSize;
8631  suballoc.type = type;
8632  suballoc.hAllocation = hAllocation;
8633 
8634  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8635  if(paddingEnd)
8636  {
8637  VmaSuballocation paddingSuballoc = {};
8638  paddingSuballoc.offset = request.offset + allocSize;
8639  paddingSuballoc.size = paddingEnd;
8640  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8641  VmaSuballocationList::iterator next = request.item;
8642  ++next;
8643  const VmaSuballocationList::iterator paddingEndItem =
8644  m_Suballocations.insert(next, paddingSuballoc);
8645  RegisterFreeSuballocation(paddingEndItem);
8646  }
8647 
8648  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8649  if(paddingBegin)
8650  {
8651  VmaSuballocation paddingSuballoc = {};
8652  paddingSuballoc.offset = request.offset - paddingBegin;
8653  paddingSuballoc.size = paddingBegin;
8654  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8655  const VmaSuballocationList::iterator paddingBeginItem =
8656  m_Suballocations.insert(request.item, paddingSuballoc);
8657  RegisterFreeSuballocation(paddingBeginItem);
8658  }
8659 
8660  // Update totals.
8661  m_FreeCount = m_FreeCount - 1;
8662  if(paddingBegin > 0)
8663  {
8664  ++m_FreeCount;
8665  }
8666  if(paddingEnd > 0)
8667  {
8668  ++m_FreeCount;
8669  }
8670  m_SumFreeSize -= allocSize;
8671 }
8672 
8673 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8674 {
8675  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8676  suballocItem != m_Suballocations.end();
8677  ++suballocItem)
8678  {
8679  VmaSuballocation& suballoc = *suballocItem;
8680  if(suballoc.hAllocation == allocation)
8681  {
8682  FreeSuballocation(suballocItem);
8683  VMA_HEAVY_ASSERT(Validate());
8684  return;
8685  }
8686  }
8687  VMA_ASSERT(0 && "Not found!");
8688 }
8689 
8690 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8691 {
8692  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8693  suballocItem != m_Suballocations.end();
8694  ++suballocItem)
8695  {
8696  VmaSuballocation& suballoc = *suballocItem;
8697  if(suballoc.offset == offset)
8698  {
8699  FreeSuballocation(suballocItem);
8700  return;
8701  }
8702  }
8703  VMA_ASSERT(0 && "Not found!");
8704 }
8705 
8706 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8707 {
8708  VkDeviceSize lastSize = 0;
8709  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8710  {
8711  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8712 
8713  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8714  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8715  VMA_VALIDATE(it->size >= lastSize);
8716  lastSize = it->size;
8717  }
8718  return true;
8719 }
8720 
8721 bool VmaBlockMetadata_Generic::CheckAllocation(
8722  uint32_t currentFrameIndex,
8723  uint32_t frameInUseCount,
8724  VkDeviceSize bufferImageGranularity,
8725  VkDeviceSize allocSize,
8726  VkDeviceSize allocAlignment,
8727  VmaSuballocationType allocType,
8728  VmaSuballocationList::const_iterator suballocItem,
8729  bool canMakeOtherLost,
8730  VkDeviceSize* pOffset,
8731  size_t* itemsToMakeLostCount,
8732  VkDeviceSize* pSumFreeSize,
8733  VkDeviceSize* pSumItemSize) const
8734 {
8735  VMA_ASSERT(allocSize > 0);
8736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8737  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8738  VMA_ASSERT(pOffset != VMA_NULL);
8739 
8740  *itemsToMakeLostCount = 0;
8741  *pSumFreeSize = 0;
8742  *pSumItemSize = 0;
8743 
8744  if(canMakeOtherLost)
8745  {
8746  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8747  {
8748  *pSumFreeSize = suballocItem->size;
8749  }
8750  else
8751  {
8752  if(suballocItem->hAllocation->CanBecomeLost() &&
8753  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8754  {
8755  ++*itemsToMakeLostCount;
8756  *pSumItemSize = suballocItem->size;
8757  }
8758  else
8759  {
8760  return false;
8761  }
8762  }
8763 
8764  // Remaining size is too small for this request: Early return.
8765  if(GetSize() - suballocItem->offset < allocSize)
8766  {
8767  return false;
8768  }
8769 
8770  // Start from offset equal to beginning of this suballocation.
8771  *pOffset = suballocItem->offset;
8772 
8773  // Apply VMA_DEBUG_MARGIN at the beginning.
8774  if(VMA_DEBUG_MARGIN > 0)
8775  {
8776  *pOffset += VMA_DEBUG_MARGIN;
8777  }
8778 
8779  // Apply alignment.
8780  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8781 
8782  // Check previous suballocations for BufferImageGranularity conflicts.
8783  // Make bigger alignment if necessary.
8784  if(bufferImageGranularity > 1)
8785  {
8786  bool bufferImageGranularityConflict = false;
8787  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8788  while(prevSuballocItem != m_Suballocations.cbegin())
8789  {
8790  --prevSuballocItem;
8791  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8792  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8793  {
8794  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8795  {
8796  bufferImageGranularityConflict = true;
8797  break;
8798  }
8799  }
8800  else
8801  // Already on previous page.
8802  break;
8803  }
8804  if(bufferImageGranularityConflict)
8805  {
8806  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8807  }
8808  }
8809 
8810  // Now that we have final *pOffset, check if we are past suballocItem.
8811  // If yes, return false - this function should be called for another suballocItem as starting point.
8812  if(*pOffset >= suballocItem->offset + suballocItem->size)
8813  {
8814  return false;
8815  }
8816 
8817  // Calculate padding at the beginning based on current offset.
8818  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8819 
8820  // Calculate required margin at the end.
8821  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8822 
8823  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8824  // Another early return check.
8825  if(suballocItem->offset + totalSize > GetSize())
8826  {
8827  return false;
8828  }
8829 
8830  // Advance lastSuballocItem until desired size is reached.
8831  // Update itemsToMakeLostCount.
8832  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8833  if(totalSize > suballocItem->size)
8834  {
8835  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8836  while(remainingSize > 0)
8837  {
8838  ++lastSuballocItem;
8839  if(lastSuballocItem == m_Suballocations.cend())
8840  {
8841  return false;
8842  }
8843  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8844  {
8845  *pSumFreeSize += lastSuballocItem->size;
8846  }
8847  else
8848  {
8849  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8850  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8851  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8852  {
8853  ++*itemsToMakeLostCount;
8854  *pSumItemSize += lastSuballocItem->size;
8855  }
8856  else
8857  {
8858  return false;
8859  }
8860  }
8861  remainingSize = (lastSuballocItem->size < remainingSize) ?
8862  remainingSize - lastSuballocItem->size : 0;
8863  }
8864  }
8865 
8866  // Check next suballocations for BufferImageGranularity conflicts.
8867  // If conflict exists, we must mark more allocations lost or fail.
8868  if(bufferImageGranularity > 1)
8869  {
8870  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8871  ++nextSuballocItem;
8872  while(nextSuballocItem != m_Suballocations.cend())
8873  {
8874  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8875  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8876  {
8877  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8878  {
8879  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8880  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8881  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8882  {
8883  ++*itemsToMakeLostCount;
8884  }
8885  else
8886  {
8887  return false;
8888  }
8889  }
8890  }
8891  else
8892  {
8893  // Already on next page.
8894  break;
8895  }
8896  ++nextSuballocItem;
8897  }
8898  }
8899  }
8900  else
8901  {
8902  const VmaSuballocation& suballoc = *suballocItem;
8903  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8904 
8905  *pSumFreeSize = suballoc.size;
8906 
8907  // Size of this suballocation is too small for this request: Early return.
8908  if(suballoc.size < allocSize)
8909  {
8910  return false;
8911  }
8912 
8913  // Start from offset equal to beginning of this suballocation.
8914  *pOffset = suballoc.offset;
8915 
8916  // Apply VMA_DEBUG_MARGIN at the beginning.
8917  if(VMA_DEBUG_MARGIN > 0)
8918  {
8919  *pOffset += VMA_DEBUG_MARGIN;
8920  }
8921 
8922  // Apply alignment.
8923  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8924 
8925  // Check previous suballocations for BufferImageGranularity conflicts.
8926  // Make bigger alignment if necessary.
8927  if(bufferImageGranularity > 1)
8928  {
8929  bool bufferImageGranularityConflict = false;
8930  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8931  while(prevSuballocItem != m_Suballocations.cbegin())
8932  {
8933  --prevSuballocItem;
8934  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8935  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8936  {
8937  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8938  {
8939  bufferImageGranularityConflict = true;
8940  break;
8941  }
8942  }
8943  else
8944  // Already on previous page.
8945  break;
8946  }
8947  if(bufferImageGranularityConflict)
8948  {
8949  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8950  }
8951  }
8952 
8953  // Calculate padding at the beginning based on current offset.
8954  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8955 
8956  // Calculate required margin at the end.
8957  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8958 
8959  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8960  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8961  {
8962  return false;
8963  }
8964 
8965  // Check next suballocations for BufferImageGranularity conflicts.
8966  // If conflict exists, allocation cannot be made here.
8967  if(bufferImageGranularity > 1)
8968  {
8969  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8970  ++nextSuballocItem;
8971  while(nextSuballocItem != m_Suballocations.cend())
8972  {
8973  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8974  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8975  {
8976  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8977  {
8978  return false;
8979  }
8980  }
8981  else
8982  {
8983  // Already on next page.
8984  break;
8985  }
8986  ++nextSuballocItem;
8987  }
8988  }
8989  }
8990 
8991  // All tests passed: Success. pOffset is already filled.
8992  return true;
8993 }
8994 
8995 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8996 {
8997  VMA_ASSERT(item != m_Suballocations.end());
8998  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8999 
9000  VmaSuballocationList::iterator nextItem = item;
9001  ++nextItem;
9002  VMA_ASSERT(nextItem != m_Suballocations.end());
9003  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9004 
9005  item->size += nextItem->size;
9006  --m_FreeCount;
9007  m_Suballocations.erase(nextItem);
9008 }
9009 
9010 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9011 {
9012  // Change this suballocation to be marked as free.
9013  VmaSuballocation& suballoc = *suballocItem;
9014  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9015  suballoc.hAllocation = VK_NULL_HANDLE;
9016 
9017  // Update totals.
9018  ++m_FreeCount;
9019  m_SumFreeSize += suballoc.size;
9020 
9021  // Merge with previous and/or next suballocation if it's also free.
9022  bool mergeWithNext = false;
9023  bool mergeWithPrev = false;
9024 
9025  VmaSuballocationList::iterator nextItem = suballocItem;
9026  ++nextItem;
9027  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9028  {
9029  mergeWithNext = true;
9030  }
9031 
9032  VmaSuballocationList::iterator prevItem = suballocItem;
9033  if(suballocItem != m_Suballocations.begin())
9034  {
9035  --prevItem;
9036  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9037  {
9038  mergeWithPrev = true;
9039  }
9040  }
9041 
9042  if(mergeWithNext)
9043  {
9044  UnregisterFreeSuballocation(nextItem);
9045  MergeFreeWithNext(suballocItem);
9046  }
9047 
9048  if(mergeWithPrev)
9049  {
9050  UnregisterFreeSuballocation(prevItem);
9051  MergeFreeWithNext(prevItem);
9052  RegisterFreeSuballocation(prevItem);
9053  return prevItem;
9054  }
9055  else
9056  {
9057  RegisterFreeSuballocation(suballocItem);
9058  return suballocItem;
9059  }
9060 }
9061 
9062 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9063 {
9064  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9065  VMA_ASSERT(item->size > 0);
9066 
9067  // You may want to enable this validation at the beginning or at the end of
9068  // this function, depending on what do you want to check.
9069  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9070 
9071  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9072  {
9073  if(m_FreeSuballocationsBySize.empty())
9074  {
9075  m_FreeSuballocationsBySize.push_back(item);
9076  }
9077  else
9078  {
9079  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9080  }
9081  }
9082 
9083  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9084 }
9085 
9086 
9087 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9088 {
9089  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9090  VMA_ASSERT(item->size > 0);
9091 
9092  // You may want to enable this validation at the beginning or at the end of
9093  // this function, depending on what do you want to check.
9094  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9095 
9096  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9097  {
9098  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9099  m_FreeSuballocationsBySize.data(),
9100  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9101  item,
9102  VmaSuballocationItemSizeLess());
9103  for(size_t index = it - m_FreeSuballocationsBySize.data();
9104  index < m_FreeSuballocationsBySize.size();
9105  ++index)
9106  {
9107  if(m_FreeSuballocationsBySize[index] == item)
9108  {
9109  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9110  return;
9111  }
9112  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9113  }
9114  VMA_ASSERT(0 && "Not found.");
9115  }
9116 
9117  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9118 }
9119 
9120 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9121  VkDeviceSize bufferImageGranularity,
9122  VmaSuballocationType& inOutPrevSuballocType) const
9123 {
9124  if(bufferImageGranularity == 1 || IsEmpty())
9125  {
9126  return false;
9127  }
9128 
9129  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9130  bool typeConflictFound = false;
9131  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9132  it != m_Suballocations.cend();
9133  ++it)
9134  {
9135  const VmaSuballocationType suballocType = it->type;
9136  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9137  {
9138  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9139  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9140  {
9141  typeConflictFound = true;
9142  }
9143  inOutPrevSuballocType = suballocType;
9144  }
9145  }
9146 
9147  return typeConflictFound || minAlignment >= bufferImageGranularity;
9148 }
9149 
9151 // class VmaBlockMetadata_Linear
9152 
9153 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9154  VmaBlockMetadata(hAllocator),
9155  m_SumFreeSize(0),
9156  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9157  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9158  m_1stVectorIndex(0),
9159  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9160  m_1stNullItemsBeginCount(0),
9161  m_1stNullItemsMiddleCount(0),
9162  m_2ndNullItemsCount(0)
9163 {
9164 }
9165 
9166 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9167 {
9168 }
9169 
9170 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9171 {
9172  VmaBlockMetadata::Init(size);
9173  m_SumFreeSize = size;
9174 }
9175 
9176 bool VmaBlockMetadata_Linear::Validate() const
9177 {
9178  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9179  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9180 
9181  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9182  VMA_VALIDATE(!suballocations1st.empty() ||
9183  suballocations2nd.empty() ||
9184  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9185 
9186  if(!suballocations1st.empty())
9187  {
9188  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9189  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9190  // Null item at the end should be just pop_back().
9191  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9192  }
9193  if(!suballocations2nd.empty())
9194  {
9195  // Null item at the end should be just pop_back().
9196  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9197  }
9198 
9199  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9200  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9201 
9202  VkDeviceSize sumUsedSize = 0;
9203  const size_t suballoc1stCount = suballocations1st.size();
9204  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9205 
9206  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9207  {
9208  const size_t suballoc2ndCount = suballocations2nd.size();
9209  size_t nullItem2ndCount = 0;
9210  for(size_t i = 0; i < suballoc2ndCount; ++i)
9211  {
9212  const VmaSuballocation& suballoc = suballocations2nd[i];
9213  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9214 
9215  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9216  VMA_VALIDATE(suballoc.offset >= offset);
9217 
9218  if(!currFree)
9219  {
9220  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9221  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9222  sumUsedSize += suballoc.size;
9223  }
9224  else
9225  {
9226  ++nullItem2ndCount;
9227  }
9228 
9229  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9230  }
9231 
9232  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9233  }
9234 
9235  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9236  {
9237  const VmaSuballocation& suballoc = suballocations1st[i];
9238  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9239  suballoc.hAllocation == VK_NULL_HANDLE);
9240  }
9241 
9242  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9243 
9244  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9245  {
9246  const VmaSuballocation& suballoc = suballocations1st[i];
9247  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9248 
9249  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9250  VMA_VALIDATE(suballoc.offset >= offset);
9251  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9252 
9253  if(!currFree)
9254  {
9255  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9256  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9257  sumUsedSize += suballoc.size;
9258  }
9259  else
9260  {
9261  ++nullItem1stCount;
9262  }
9263 
9264  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9265  }
9266  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9267 
9268  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9269  {
9270  const size_t suballoc2ndCount = suballocations2nd.size();
9271  size_t nullItem2ndCount = 0;
9272  for(size_t i = suballoc2ndCount; i--; )
9273  {
9274  const VmaSuballocation& suballoc = suballocations2nd[i];
9275  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9276 
9277  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9278  VMA_VALIDATE(suballoc.offset >= offset);
9279 
9280  if(!currFree)
9281  {
9282  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9283  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9284  sumUsedSize += suballoc.size;
9285  }
9286  else
9287  {
9288  ++nullItem2ndCount;
9289  }
9290 
9291  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9292  }
9293 
9294  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9295  }
9296 
9297  VMA_VALIDATE(offset <= GetSize());
9298  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9299 
9300  return true;
9301 }
9302 
9303 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9304 {
9305  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9306  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9307 }
9308 
9309 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9310 {
9311  const VkDeviceSize size = GetSize();
9312 
9313  /*
9314  We don't consider gaps inside allocation vectors with freed allocations because
9315  they are not suitable for reuse in linear allocator. We consider only space that
9316  is available for new allocations.
9317  */
9318  if(IsEmpty())
9319  {
9320  return size;
9321  }
9322 
9323  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9324 
9325  switch(m_2ndVectorMode)
9326  {
9327  case SECOND_VECTOR_EMPTY:
9328  /*
9329  Available space is after end of 1st, as well as before beginning of 1st (which
9330  whould make it a ring buffer).
9331  */
9332  {
9333  const size_t suballocations1stCount = suballocations1st.size();
9334  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9335  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9336  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9337  return VMA_MAX(
9338  firstSuballoc.offset,
9339  size - (lastSuballoc.offset + lastSuballoc.size));
9340  }
9341  break;
9342 
9343  case SECOND_VECTOR_RING_BUFFER:
9344  /*
9345  Available space is only between end of 2nd and beginning of 1st.
9346  */
9347  {
9348  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9349  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9350  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9351  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9352  }
9353  break;
9354 
9355  case SECOND_VECTOR_DOUBLE_STACK:
9356  /*
9357  Available space is only between end of 1st and top of 2nd.
9358  */
9359  {
9360  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9361  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9362  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9363  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9364  }
9365  break;
9366 
9367  default:
9368  VMA_ASSERT(0);
9369  return 0;
9370  }
9371 }
9372 
9373 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9374 {
9375  const VkDeviceSize size = GetSize();
9376  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9377  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9378  const size_t suballoc1stCount = suballocations1st.size();
9379  const size_t suballoc2ndCount = suballocations2nd.size();
9380 
9381  outInfo.blockCount = 1;
9382  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9383  outInfo.unusedRangeCount = 0;
9384  outInfo.usedBytes = 0;
9385  outInfo.allocationSizeMin = UINT64_MAX;
9386  outInfo.allocationSizeMax = 0;
9387  outInfo.unusedRangeSizeMin = UINT64_MAX;
9388  outInfo.unusedRangeSizeMax = 0;
9389 
9390  VkDeviceSize lastOffset = 0;
9391 
9392  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9393  {
9394  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9395  size_t nextAlloc2ndIndex = 0;
9396  while(lastOffset < freeSpace2ndTo1stEnd)
9397  {
9398  // Find next non-null allocation or move nextAllocIndex to the end.
9399  while(nextAlloc2ndIndex < suballoc2ndCount &&
9400  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9401  {
9402  ++nextAlloc2ndIndex;
9403  }
9404 
9405  // Found non-null allocation.
9406  if(nextAlloc2ndIndex < suballoc2ndCount)
9407  {
9408  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9409 
9410  // 1. Process free space before this allocation.
9411  if(lastOffset < suballoc.offset)
9412  {
9413  // There is free space from lastOffset to suballoc.offset.
9414  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9415  ++outInfo.unusedRangeCount;
9416  outInfo.unusedBytes += unusedRangeSize;
9417  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9418  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9419  }
9420 
9421  // 2. Process this allocation.
9422  // There is allocation with suballoc.offset, suballoc.size.
9423  outInfo.usedBytes += suballoc.size;
9424  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9425  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9426 
9427  // 3. Prepare for next iteration.
9428  lastOffset = suballoc.offset + suballoc.size;
9429  ++nextAlloc2ndIndex;
9430  }
9431  // We are at the end.
9432  else
9433  {
9434  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9435  if(lastOffset < freeSpace2ndTo1stEnd)
9436  {
9437  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9438  ++outInfo.unusedRangeCount;
9439  outInfo.unusedBytes += unusedRangeSize;
9440  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9441  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9442  }
9443 
9444  // End of loop.
9445  lastOffset = freeSpace2ndTo1stEnd;
9446  }
9447  }
9448  }
9449 
9450  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9451  const VkDeviceSize freeSpace1stTo2ndEnd =
9452  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9453  while(lastOffset < freeSpace1stTo2ndEnd)
9454  {
9455  // Find next non-null allocation or move nextAllocIndex to the end.
9456  while(nextAlloc1stIndex < suballoc1stCount &&
9457  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9458  {
9459  ++nextAlloc1stIndex;
9460  }
9461 
9462  // Found non-null allocation.
9463  if(nextAlloc1stIndex < suballoc1stCount)
9464  {
9465  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9466 
9467  // 1. Process free space before this allocation.
9468  if(lastOffset < suballoc.offset)
9469  {
9470  // There is free space from lastOffset to suballoc.offset.
9471  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9472  ++outInfo.unusedRangeCount;
9473  outInfo.unusedBytes += unusedRangeSize;
9474  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9475  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9476  }
9477 
9478  // 2. Process this allocation.
9479  // There is allocation with suballoc.offset, suballoc.size.
9480  outInfo.usedBytes += suballoc.size;
9481  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9482  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9483 
9484  // 3. Prepare for next iteration.
9485  lastOffset = suballoc.offset + suballoc.size;
9486  ++nextAlloc1stIndex;
9487  }
9488  // We are at the end.
9489  else
9490  {
9491  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9492  if(lastOffset < freeSpace1stTo2ndEnd)
9493  {
9494  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9495  ++outInfo.unusedRangeCount;
9496  outInfo.unusedBytes += unusedRangeSize;
9497  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9498  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9499  }
9500 
9501  // End of loop.
9502  lastOffset = freeSpace1stTo2ndEnd;
9503  }
9504  }
9505 
9506  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9507  {
9508  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9509  while(lastOffset < size)
9510  {
9511  // Find next non-null allocation or move nextAllocIndex to the end.
9512  while(nextAlloc2ndIndex != SIZE_MAX &&
9513  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9514  {
9515  --nextAlloc2ndIndex;
9516  }
9517 
9518  // Found non-null allocation.
9519  if(nextAlloc2ndIndex != SIZE_MAX)
9520  {
9521  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9522 
9523  // 1. Process free space before this allocation.
9524  if(lastOffset < suballoc.offset)
9525  {
9526  // There is free space from lastOffset to suballoc.offset.
9527  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9528  ++outInfo.unusedRangeCount;
9529  outInfo.unusedBytes += unusedRangeSize;
9530  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9531  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9532  }
9533 
9534  // 2. Process this allocation.
9535  // There is allocation with suballoc.offset, suballoc.size.
9536  outInfo.usedBytes += suballoc.size;
9537  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9538  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9539 
9540  // 3. Prepare for next iteration.
9541  lastOffset = suballoc.offset + suballoc.size;
9542  --nextAlloc2ndIndex;
9543  }
9544  // We are at the end.
9545  else
9546  {
9547  // There is free space from lastOffset to size.
9548  if(lastOffset < size)
9549  {
9550  const VkDeviceSize unusedRangeSize = size - lastOffset;
9551  ++outInfo.unusedRangeCount;
9552  outInfo.unusedBytes += unusedRangeSize;
9553  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9554  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9555  }
9556 
9557  // End of loop.
9558  lastOffset = size;
9559  }
9560  }
9561  }
9562 
9563  outInfo.unusedBytes = size - outInfo.usedBytes;
9564 }
9565 
9566 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9567 {
9568  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9569  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9570  const VkDeviceSize size = GetSize();
9571  const size_t suballoc1stCount = suballocations1st.size();
9572  const size_t suballoc2ndCount = suballocations2nd.size();
9573 
9574  inoutStats.size += size;
9575 
9576  VkDeviceSize lastOffset = 0;
9577 
9578  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9579  {
9580  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9581  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9582  while(lastOffset < freeSpace2ndTo1stEnd)
9583  {
9584  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9585  while(nextAlloc2ndIndex < suballoc2ndCount &&
9586  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9587  {
9588  ++nextAlloc2ndIndex;
9589  }
9590 
9591  // Found non-null allocation.
9592  if(nextAlloc2ndIndex < suballoc2ndCount)
9593  {
9594  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9595 
9596  // 1. Process free space before this allocation.
9597  if(lastOffset < suballoc.offset)
9598  {
9599  // There is free space from lastOffset to suballoc.offset.
9600  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9601  inoutStats.unusedSize += unusedRangeSize;
9602  ++inoutStats.unusedRangeCount;
9603  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9604  }
9605 
9606  // 2. Process this allocation.
9607  // There is allocation with suballoc.offset, suballoc.size.
9608  ++inoutStats.allocationCount;
9609 
9610  // 3. Prepare for next iteration.
9611  lastOffset = suballoc.offset + suballoc.size;
9612  ++nextAlloc2ndIndex;
9613  }
9614  // We are at the end.
9615  else
9616  {
9617  if(lastOffset < freeSpace2ndTo1stEnd)
9618  {
9619  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9620  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9621  inoutStats.unusedSize += unusedRangeSize;
9622  ++inoutStats.unusedRangeCount;
9623  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9624  }
9625 
9626  // End of loop.
9627  lastOffset = freeSpace2ndTo1stEnd;
9628  }
9629  }
9630  }
9631 
9632  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9633  const VkDeviceSize freeSpace1stTo2ndEnd =
9634  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9635  while(lastOffset < freeSpace1stTo2ndEnd)
9636  {
9637  // Find next non-null allocation or move nextAllocIndex to the end.
9638  while(nextAlloc1stIndex < suballoc1stCount &&
9639  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9640  {
9641  ++nextAlloc1stIndex;
9642  }
9643 
9644  // Found non-null allocation.
9645  if(nextAlloc1stIndex < suballoc1stCount)
9646  {
9647  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9648 
9649  // 1. Process free space before this allocation.
9650  if(lastOffset < suballoc.offset)
9651  {
9652  // There is free space from lastOffset to suballoc.offset.
9653  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9654  inoutStats.unusedSize += unusedRangeSize;
9655  ++inoutStats.unusedRangeCount;
9656  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9657  }
9658 
9659  // 2. Process this allocation.
9660  // There is allocation with suballoc.offset, suballoc.size.
9661  ++inoutStats.allocationCount;
9662 
9663  // 3. Prepare for next iteration.
9664  lastOffset = suballoc.offset + suballoc.size;
9665  ++nextAlloc1stIndex;
9666  }
9667  // We are at the end.
9668  else
9669  {
9670  if(lastOffset < freeSpace1stTo2ndEnd)
9671  {
9672  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9673  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9674  inoutStats.unusedSize += unusedRangeSize;
9675  ++inoutStats.unusedRangeCount;
9676  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9677  }
9678 
9679  // End of loop.
9680  lastOffset = freeSpace1stTo2ndEnd;
9681  }
9682  }
9683 
9684  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9685  {
9686  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9687  while(lastOffset < size)
9688  {
9689  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9690  while(nextAlloc2ndIndex != SIZE_MAX &&
9691  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9692  {
9693  --nextAlloc2ndIndex;
9694  }
9695 
9696  // Found non-null allocation.
9697  if(nextAlloc2ndIndex != SIZE_MAX)
9698  {
9699  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9700 
9701  // 1. Process free space before this allocation.
9702  if(lastOffset < suballoc.offset)
9703  {
9704  // There is free space from lastOffset to suballoc.offset.
9705  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9706  inoutStats.unusedSize += unusedRangeSize;
9707  ++inoutStats.unusedRangeCount;
9708  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9709  }
9710 
9711  // 2. Process this allocation.
9712  // There is allocation with suballoc.offset, suballoc.size.
9713  ++inoutStats.allocationCount;
9714 
9715  // 3. Prepare for next iteration.
9716  lastOffset = suballoc.offset + suballoc.size;
9717  --nextAlloc2ndIndex;
9718  }
9719  // We are at the end.
9720  else
9721  {
9722  if(lastOffset < size)
9723  {
9724  // There is free space from lastOffset to size.
9725  const VkDeviceSize unusedRangeSize = size - lastOffset;
9726  inoutStats.unusedSize += unusedRangeSize;
9727  ++inoutStats.unusedRangeCount;
9728  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9729  }
9730 
9731  // End of loop.
9732  lastOffset = size;
9733  }
9734  }
9735  }
9736 }
9737 
9738 #if VMA_STATS_STRING_ENABLED
9739 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9740 {
9741  const VkDeviceSize size = GetSize();
9742  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9743  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9744  const size_t suballoc1stCount = suballocations1st.size();
9745  const size_t suballoc2ndCount = suballocations2nd.size();
9746 
9747  // FIRST PASS
9748 
9749  size_t unusedRangeCount = 0;
9750  VkDeviceSize usedBytes = 0;
9751 
9752  VkDeviceSize lastOffset = 0;
9753 
9754  size_t alloc2ndCount = 0;
9755  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9756  {
9757  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9758  size_t nextAlloc2ndIndex = 0;
9759  while(lastOffset < freeSpace2ndTo1stEnd)
9760  {
9761  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9762  while(nextAlloc2ndIndex < suballoc2ndCount &&
9763  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9764  {
9765  ++nextAlloc2ndIndex;
9766  }
9767 
9768  // Found non-null allocation.
9769  if(nextAlloc2ndIndex < suballoc2ndCount)
9770  {
9771  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9772 
9773  // 1. Process free space before this allocation.
9774  if(lastOffset < suballoc.offset)
9775  {
9776  // There is free space from lastOffset to suballoc.offset.
9777  ++unusedRangeCount;
9778  }
9779 
9780  // 2. Process this allocation.
9781  // There is allocation with suballoc.offset, suballoc.size.
9782  ++alloc2ndCount;
9783  usedBytes += suballoc.size;
9784 
9785  // 3. Prepare for next iteration.
9786  lastOffset = suballoc.offset + suballoc.size;
9787  ++nextAlloc2ndIndex;
9788  }
9789  // We are at the end.
9790  else
9791  {
9792  if(lastOffset < freeSpace2ndTo1stEnd)
9793  {
9794  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9795  ++unusedRangeCount;
9796  }
9797 
9798  // End of loop.
9799  lastOffset = freeSpace2ndTo1stEnd;
9800  }
9801  }
9802  }
9803 
9804  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9805  size_t alloc1stCount = 0;
9806  const VkDeviceSize freeSpace1stTo2ndEnd =
9807  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9808  while(lastOffset < freeSpace1stTo2ndEnd)
9809  {
9810  // Find next non-null allocation or move nextAllocIndex to the end.
9811  while(nextAlloc1stIndex < suballoc1stCount &&
9812  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9813  {
9814  ++nextAlloc1stIndex;
9815  }
9816 
9817  // Found non-null allocation.
9818  if(nextAlloc1stIndex < suballoc1stCount)
9819  {
9820  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9821 
9822  // 1. Process free space before this allocation.
9823  if(lastOffset < suballoc.offset)
9824  {
9825  // There is free space from lastOffset to suballoc.offset.
9826  ++unusedRangeCount;
9827  }
9828 
9829  // 2. Process this allocation.
9830  // There is allocation with suballoc.offset, suballoc.size.
9831  ++alloc1stCount;
9832  usedBytes += suballoc.size;
9833 
9834  // 3. Prepare for next iteration.
9835  lastOffset = suballoc.offset + suballoc.size;
9836  ++nextAlloc1stIndex;
9837  }
9838  // We are at the end.
9839  else
9840  {
9841  if(lastOffset < size)
9842  {
9843  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9844  ++unusedRangeCount;
9845  }
9846 
9847  // End of loop.
9848  lastOffset = freeSpace1stTo2ndEnd;
9849  }
9850  }
9851 
9852  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9853  {
9854  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9855  while(lastOffset < size)
9856  {
9857  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9858  while(nextAlloc2ndIndex != SIZE_MAX &&
9859  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9860  {
9861  --nextAlloc2ndIndex;
9862  }
9863 
9864  // Found non-null allocation.
9865  if(nextAlloc2ndIndex != SIZE_MAX)
9866  {
9867  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9868 
9869  // 1. Process free space before this allocation.
9870  if(lastOffset < suballoc.offset)
9871  {
9872  // There is free space from lastOffset to suballoc.offset.
9873  ++unusedRangeCount;
9874  }
9875 
9876  // 2. Process this allocation.
9877  // There is allocation with suballoc.offset, suballoc.size.
9878  ++alloc2ndCount;
9879  usedBytes += suballoc.size;
9880 
9881  // 3. Prepare for next iteration.
9882  lastOffset = suballoc.offset + suballoc.size;
9883  --nextAlloc2ndIndex;
9884  }
9885  // We are at the end.
9886  else
9887  {
9888  if(lastOffset < size)
9889  {
9890  // There is free space from lastOffset to size.
9891  ++unusedRangeCount;
9892  }
9893 
9894  // End of loop.
9895  lastOffset = size;
9896  }
9897  }
9898  }
9899 
9900  const VkDeviceSize unusedBytes = size - usedBytes;
9901  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9902 
9903  // SECOND PASS
9904  lastOffset = 0;
9905 
9906  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9907  {
9908  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9909  size_t nextAlloc2ndIndex = 0;
9910  while(lastOffset < freeSpace2ndTo1stEnd)
9911  {
9912  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9913  while(nextAlloc2ndIndex < suballoc2ndCount &&
9914  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9915  {
9916  ++nextAlloc2ndIndex;
9917  }
9918 
9919  // Found non-null allocation.
9920  if(nextAlloc2ndIndex < suballoc2ndCount)
9921  {
9922  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9923 
9924  // 1. Process free space before this allocation.
9925  if(lastOffset < suballoc.offset)
9926  {
9927  // There is free space from lastOffset to suballoc.offset.
9928  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9929  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9930  }
9931 
9932  // 2. Process this allocation.
9933  // There is allocation with suballoc.offset, suballoc.size.
9934  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9935 
9936  // 3. Prepare for next iteration.
9937  lastOffset = suballoc.offset + suballoc.size;
9938  ++nextAlloc2ndIndex;
9939  }
9940  // We are at the end.
9941  else
9942  {
9943  if(lastOffset < freeSpace2ndTo1stEnd)
9944  {
9945  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9946  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9947  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9948  }
9949 
9950  // End of loop.
9951  lastOffset = freeSpace2ndTo1stEnd;
9952  }
9953  }
9954  }
9955 
9956  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9957  while(lastOffset < freeSpace1stTo2ndEnd)
9958  {
9959  // Find next non-null allocation or move nextAllocIndex to the end.
9960  while(nextAlloc1stIndex < suballoc1stCount &&
9961  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9962  {
9963  ++nextAlloc1stIndex;
9964  }
9965 
9966  // Found non-null allocation.
9967  if(nextAlloc1stIndex < suballoc1stCount)
9968  {
9969  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9970 
9971  // 1. Process free space before this allocation.
9972  if(lastOffset < suballoc.offset)
9973  {
9974  // There is free space from lastOffset to suballoc.offset.
9975  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9976  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9977  }
9978 
9979  // 2. Process this allocation.
9980  // There is allocation with suballoc.offset, suballoc.size.
9981  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9982 
9983  // 3. Prepare for next iteration.
9984  lastOffset = suballoc.offset + suballoc.size;
9985  ++nextAlloc1stIndex;
9986  }
9987  // We are at the end.
9988  else
9989  {
9990  if(lastOffset < freeSpace1stTo2ndEnd)
9991  {
9992  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9993  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9994  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9995  }
9996 
9997  // End of loop.
9998  lastOffset = freeSpace1stTo2ndEnd;
9999  }
10000  }
10001 
10002  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10003  {
10004  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10005  while(lastOffset < size)
10006  {
10007  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10008  while(nextAlloc2ndIndex != SIZE_MAX &&
10009  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10010  {
10011  --nextAlloc2ndIndex;
10012  }
10013 
10014  // Found non-null allocation.
10015  if(nextAlloc2ndIndex != SIZE_MAX)
10016  {
10017  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10018 
10019  // 1. Process free space before this allocation.
10020  if(lastOffset < suballoc.offset)
10021  {
10022  // There is free space from lastOffset to suballoc.offset.
10023  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10024  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10025  }
10026 
10027  // 2. Process this allocation.
10028  // There is allocation with suballoc.offset, suballoc.size.
10029  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10030 
10031  // 3. Prepare for next iteration.
10032  lastOffset = suballoc.offset + suballoc.size;
10033  --nextAlloc2ndIndex;
10034  }
10035  // We are at the end.
10036  else
10037  {
10038  if(lastOffset < size)
10039  {
10040  // There is free space from lastOffset to size.
10041  const VkDeviceSize unusedRangeSize = size - lastOffset;
10042  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10043  }
10044 
10045  // End of loop.
10046  lastOffset = size;
10047  }
10048  }
10049  }
10050 
10051  PrintDetailedMap_End(json);
10052 }
10053 #endif // #if VMA_STATS_STRING_ENABLED
10054 
10055 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10056  uint32_t currentFrameIndex,
10057  uint32_t frameInUseCount,
10058  VkDeviceSize bufferImageGranularity,
10059  VkDeviceSize allocSize,
10060  VkDeviceSize allocAlignment,
10061  bool upperAddress,
10062  VmaSuballocationType allocType,
10063  bool canMakeOtherLost,
10064  uint32_t strategy,
10065  VmaAllocationRequest* pAllocationRequest)
10066 {
10067  VMA_ASSERT(allocSize > 0);
10068  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10069  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10070  VMA_HEAVY_ASSERT(Validate());
10071  return upperAddress ?
10072  CreateAllocationRequest_UpperAddress(
10073  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10074  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10075  CreateAllocationRequest_LowerAddress(
10076  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10077  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10078 }
10079 
10080 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10081  uint32_t currentFrameIndex,
10082  uint32_t frameInUseCount,
10083  VkDeviceSize bufferImageGranularity,
10084  VkDeviceSize allocSize,
10085  VkDeviceSize allocAlignment,
10086  VmaSuballocationType allocType,
10087  bool canMakeOtherLost,
10088  uint32_t strategy,
10089  VmaAllocationRequest* pAllocationRequest)
10090 {
10091  const VkDeviceSize size = GetSize();
10092  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10093  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10094 
10095  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10096  {
10097  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10098  return false;
10099  }
10100 
10101  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10102  if(allocSize > size)
10103  {
10104  return false;
10105  }
10106  VkDeviceSize resultBaseOffset = size - allocSize;
10107  if(!suballocations2nd.empty())
10108  {
10109  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10110  resultBaseOffset = lastSuballoc.offset - allocSize;
10111  if(allocSize > lastSuballoc.offset)
10112  {
10113  return false;
10114  }
10115  }
10116 
10117  // Start from offset equal to end of free space.
10118  VkDeviceSize resultOffset = resultBaseOffset;
10119 
10120  // Apply VMA_DEBUG_MARGIN at the end.
10121  if(VMA_DEBUG_MARGIN > 0)
10122  {
10123  if(resultOffset < VMA_DEBUG_MARGIN)
10124  {
10125  return false;
10126  }
10127  resultOffset -= VMA_DEBUG_MARGIN;
10128  }
10129 
10130  // Apply alignment.
10131  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10132 
10133  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10134  // Make bigger alignment if necessary.
10135  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10136  {
10137  bool bufferImageGranularityConflict = false;
10138  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10139  {
10140  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10141  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10142  {
10143  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10144  {
10145  bufferImageGranularityConflict = true;
10146  break;
10147  }
10148  }
10149  else
10150  // Already on previous page.
10151  break;
10152  }
10153  if(bufferImageGranularityConflict)
10154  {
10155  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10156  }
10157  }
10158 
10159  // There is enough free space.
10160  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10161  suballocations1st.back().offset + suballocations1st.back().size :
10162  0;
10163  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10164  {
10165  // Check previous suballocations for BufferImageGranularity conflicts.
10166  // If conflict exists, allocation cannot be made here.
10167  if(bufferImageGranularity > 1)
10168  {
10169  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10170  {
10171  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10172  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10173  {
10174  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10175  {
10176  return false;
10177  }
10178  }
10179  else
10180  {
10181  // Already on next page.
10182  break;
10183  }
10184  }
10185  }
10186 
10187  // All tests passed: Success.
10188  pAllocationRequest->offset = resultOffset;
10189  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10190  pAllocationRequest->sumItemSize = 0;
10191  // pAllocationRequest->item unused.
10192  pAllocationRequest->itemsToMakeLostCount = 0;
10193  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10194  return true;
10195  }
10196 
10197  return false;
10198 }
10199 
10200 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10201  uint32_t currentFrameIndex,
10202  uint32_t frameInUseCount,
10203  VkDeviceSize bufferImageGranularity,
10204  VkDeviceSize allocSize,
10205  VkDeviceSize allocAlignment,
10206  VmaSuballocationType allocType,
10207  bool canMakeOtherLost,
10208  uint32_t strategy,
10209  VmaAllocationRequest* pAllocationRequest)
10210 {
10211  const VkDeviceSize size = GetSize();
10212  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10213  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10214 
10215  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10216  {
10217  // Try to allocate at the end of 1st vector.
10218 
10219  VkDeviceSize resultBaseOffset = 0;
10220  if(!suballocations1st.empty())
10221  {
10222  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10223  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10224  }
10225 
10226  // Start from offset equal to beginning of free space.
10227  VkDeviceSize resultOffset = resultBaseOffset;
10228 
10229  // Apply VMA_DEBUG_MARGIN at the beginning.
10230  if(VMA_DEBUG_MARGIN > 0)
10231  {
10232  resultOffset += VMA_DEBUG_MARGIN;
10233  }
10234 
10235  // Apply alignment.
10236  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10237 
10238  // Check previous suballocations for BufferImageGranularity conflicts.
10239  // Make bigger alignment if necessary.
10240  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10241  {
10242  bool bufferImageGranularityConflict = false;
10243  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10244  {
10245  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10246  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10247  {
10248  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10249  {
10250  bufferImageGranularityConflict = true;
10251  break;
10252  }
10253  }
10254  else
10255  // Already on previous page.
10256  break;
10257  }
10258  if(bufferImageGranularityConflict)
10259  {
10260  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10261  }
10262  }
10263 
10264  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10265  suballocations2nd.back().offset : size;
10266 
10267  // There is enough free space at the end after alignment.
10268  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10269  {
10270  // Check next suballocations for BufferImageGranularity conflicts.
10271  // If conflict exists, allocation cannot be made here.
10272  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10273  {
10274  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10275  {
10276  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10277  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10278  {
10279  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10280  {
10281  return false;
10282  }
10283  }
10284  else
10285  {
10286  // Already on previous page.
10287  break;
10288  }
10289  }
10290  }
10291 
10292  // All tests passed: Success.
10293  pAllocationRequest->offset = resultOffset;
10294  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10295  pAllocationRequest->sumItemSize = 0;
10296  // pAllocationRequest->item, customData unused.
10297  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10298  pAllocationRequest->itemsToMakeLostCount = 0;
10299  return true;
10300  }
10301  }
10302 
10303  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10304  // beginning of 1st vector as the end of free space.
10305  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10306  {
10307  VMA_ASSERT(!suballocations1st.empty());
10308 
10309  VkDeviceSize resultBaseOffset = 0;
10310  if(!suballocations2nd.empty())
10311  {
10312  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10313  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10314  }
10315 
10316  // Start from offset equal to beginning of free space.
10317  VkDeviceSize resultOffset = resultBaseOffset;
10318 
10319  // Apply VMA_DEBUG_MARGIN at the beginning.
10320  if(VMA_DEBUG_MARGIN > 0)
10321  {
10322  resultOffset += VMA_DEBUG_MARGIN;
10323  }
10324 
10325  // Apply alignment.
10326  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10327 
10328  // Check previous suballocations for BufferImageGranularity conflicts.
10329  // Make bigger alignment if necessary.
10330  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10331  {
10332  bool bufferImageGranularityConflict = false;
10333  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10334  {
10335  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10336  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10337  {
10338  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10339  {
10340  bufferImageGranularityConflict = true;
10341  break;
10342  }
10343  }
10344  else
10345  // Already on previous page.
10346  break;
10347  }
10348  if(bufferImageGranularityConflict)
10349  {
10350  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10351  }
10352  }
10353 
10354  pAllocationRequest->itemsToMakeLostCount = 0;
10355  pAllocationRequest->sumItemSize = 0;
10356  size_t index1st = m_1stNullItemsBeginCount;
10357 
10358  if(canMakeOtherLost)
10359  {
10360  while(index1st < suballocations1st.size() &&
10361  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10362  {
10363  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10364  const VmaSuballocation& suballoc = suballocations1st[index1st];
10365  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10366  {
10367  // No problem.
10368  }
10369  else
10370  {
10371  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10372  if(suballoc.hAllocation->CanBecomeLost() &&
10373  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10374  {
10375  ++pAllocationRequest->itemsToMakeLostCount;
10376  pAllocationRequest->sumItemSize += suballoc.size;
10377  }
10378  else
10379  {
10380  return false;
10381  }
10382  }
10383  ++index1st;
10384  }
10385 
10386  // Check next suballocations for BufferImageGranularity conflicts.
10387  // If conflict exists, we must mark more allocations lost or fail.
10388  if(bufferImageGranularity > 1)
10389  {
10390  while(index1st < suballocations1st.size())
10391  {
10392  const VmaSuballocation& suballoc = suballocations1st[index1st];
10393  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10394  {
10395  if(suballoc.hAllocation != VK_NULL_HANDLE)
10396  {
10397  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10398  if(suballoc.hAllocation->CanBecomeLost() &&
10399  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10400  {
10401  ++pAllocationRequest->itemsToMakeLostCount;
10402  pAllocationRequest->sumItemSize += suballoc.size;
10403  }
10404  else
10405  {
10406  return false;
10407  }
10408  }
10409  }
10410  else
10411  {
10412  // Already on next page.
10413  break;
10414  }
10415  ++index1st;
10416  }
10417  }
10418 
10419  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10420  if(index1st == suballocations1st.size() &&
10421  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10422  {
10423  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10424  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10425  }
10426  }
10427 
10428  // There is enough free space at the end after alignment.
10429  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10430  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10431  {
10432  // Check next suballocations for BufferImageGranularity conflicts.
10433  // If conflict exists, allocation cannot be made here.
10434  if(bufferImageGranularity > 1)
10435  {
10436  for(size_t nextSuballocIndex = index1st;
10437  nextSuballocIndex < suballocations1st.size();
10438  nextSuballocIndex++)
10439  {
10440  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10441  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10442  {
10443  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10444  {
10445  return false;
10446  }
10447  }
10448  else
10449  {
10450  // Already on next page.
10451  break;
10452  }
10453  }
10454  }
10455 
10456  // All tests passed: Success.
10457  pAllocationRequest->offset = resultOffset;
10458  pAllocationRequest->sumFreeSize =
10459  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10460  - resultBaseOffset
10461  - pAllocationRequest->sumItemSize;
10462  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10463  // pAllocationRequest->item, customData unused.
10464  return true;
10465  }
10466  }
10467 
10468  return false;
10469 }
10470 
10471 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10472  uint32_t currentFrameIndex,
10473  uint32_t frameInUseCount,
10474  VmaAllocationRequest* pAllocationRequest)
10475 {
10476  if(pAllocationRequest->itemsToMakeLostCount == 0)
10477  {
10478  return true;
10479  }
10480 
10481  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10482 
10483  // We always start from 1st.
10484  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10485  size_t index = m_1stNullItemsBeginCount;
10486  size_t madeLostCount = 0;
10487  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10488  {
10489  if(index == suballocations->size())
10490  {
10491  index = 0;
10492  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10493  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10494  {
10495  suballocations = &AccessSuballocations2nd();
10496  }
10497  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10498  // suballocations continues pointing at AccessSuballocations1st().
10499  VMA_ASSERT(!suballocations->empty());
10500  }
10501  VmaSuballocation& suballoc = (*suballocations)[index];
10502  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10503  {
10504  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10505  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10506  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10507  {
10508  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10509  suballoc.hAllocation = VK_NULL_HANDLE;
10510  m_SumFreeSize += suballoc.size;
10511  if(suballocations == &AccessSuballocations1st())
10512  {
10513  ++m_1stNullItemsMiddleCount;
10514  }
10515  else
10516  {
10517  ++m_2ndNullItemsCount;
10518  }
10519  ++madeLostCount;
10520  }
10521  else
10522  {
10523  return false;
10524  }
10525  }
10526  ++index;
10527  }
10528 
10529  CleanupAfterFree();
10530  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10531 
10532  return true;
10533 }
10534 
10535 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10536 {
10537  uint32_t lostAllocationCount = 0;
10538 
10539  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10540  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10541  {
10542  VmaSuballocation& suballoc = suballocations1st[i];
10543  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10544  suballoc.hAllocation->CanBecomeLost() &&
10545  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10546  {
10547  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10548  suballoc.hAllocation = VK_NULL_HANDLE;
10549  ++m_1stNullItemsMiddleCount;
10550  m_SumFreeSize += suballoc.size;
10551  ++lostAllocationCount;
10552  }
10553  }
10554 
10555  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10556  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10557  {
10558  VmaSuballocation& suballoc = suballocations2nd[i];
10559  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10560  suballoc.hAllocation->CanBecomeLost() &&
10561  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10562  {
10563  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10564  suballoc.hAllocation = VK_NULL_HANDLE;
10565  ++m_2ndNullItemsCount;
10566  m_SumFreeSize += suballoc.size;
10567  ++lostAllocationCount;
10568  }
10569  }
10570 
10571  if(lostAllocationCount)
10572  {
10573  CleanupAfterFree();
10574  }
10575 
10576  return lostAllocationCount;
10577 }
10578 
10579 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10580 {
10581  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10582  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10583  {
10584  const VmaSuballocation& suballoc = suballocations1st[i];
10585  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10586  {
10587  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10588  {
10589  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10590  return VK_ERROR_VALIDATION_FAILED_EXT;
10591  }
10592  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10593  {
10594  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10595  return VK_ERROR_VALIDATION_FAILED_EXT;
10596  }
10597  }
10598  }
10599 
10600  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10601  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10602  {
10603  const VmaSuballocation& suballoc = suballocations2nd[i];
10604  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10605  {
10606  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10607  {
10608  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10609  return VK_ERROR_VALIDATION_FAILED_EXT;
10610  }
10611  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10612  {
10613  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10614  return VK_ERROR_VALIDATION_FAILED_EXT;
10615  }
10616  }
10617  }
10618 
10619  return VK_SUCCESS;
10620 }
10621 
10622 void VmaBlockMetadata_Linear::Alloc(
10623  const VmaAllocationRequest& request,
10624  VmaSuballocationType type,
10625  VkDeviceSize allocSize,
10626  VmaAllocation hAllocation)
10627 {
10628  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10629 
10630  switch(request.type)
10631  {
10632  case VmaAllocationRequestType::UpperAddress:
10633  {
10634  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10635  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10636  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10637  suballocations2nd.push_back(newSuballoc);
10638  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10639  }
10640  break;
10641  case VmaAllocationRequestType::EndOf1st:
10642  {
10643  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10644 
10645  VMA_ASSERT(suballocations1st.empty() ||
10646  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10647  // Check if it fits before the end of the block.
10648  VMA_ASSERT(request.offset + allocSize <= GetSize());
10649 
10650  suballocations1st.push_back(newSuballoc);
10651  }
10652  break;
10653  case VmaAllocationRequestType::EndOf2nd:
10654  {
10655  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10656  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10657  VMA_ASSERT(!suballocations1st.empty() &&
10658  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10659  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10660 
10661  switch(m_2ndVectorMode)
10662  {
10663  case SECOND_VECTOR_EMPTY:
10664  // First allocation from second part ring buffer.
10665  VMA_ASSERT(suballocations2nd.empty());
10666  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10667  break;
10668  case SECOND_VECTOR_RING_BUFFER:
10669  // 2-part ring buffer is already started.
10670  VMA_ASSERT(!suballocations2nd.empty());
10671  break;
10672  case SECOND_VECTOR_DOUBLE_STACK:
10673  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10674  break;
10675  default:
10676  VMA_ASSERT(0);
10677  }
10678 
10679  suballocations2nd.push_back(newSuballoc);
10680  }
10681  break;
10682  default:
10683  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10684  }
10685 
10686  m_SumFreeSize -= newSuballoc.size;
10687 }
10688 
10689 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10690 {
10691  FreeAtOffset(allocation->GetOffset());
10692 }
10693 
10694 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10695 {
10696  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10697  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10698 
10699  if(!suballocations1st.empty())
10700  {
10701  // First allocation: Mark it as next empty at the beginning.
10702  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10703  if(firstSuballoc.offset == offset)
10704  {
10705  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10706  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10707  m_SumFreeSize += firstSuballoc.size;
10708  ++m_1stNullItemsBeginCount;
10709  CleanupAfterFree();
10710  return;
10711  }
10712  }
10713 
10714  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10715  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10716  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10717  {
10718  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10719  if(lastSuballoc.offset == offset)
10720  {
10721  m_SumFreeSize += lastSuballoc.size;
10722  suballocations2nd.pop_back();
10723  CleanupAfterFree();
10724  return;
10725  }
10726  }
10727  // Last allocation in 1st vector.
10728  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10729  {
10730  VmaSuballocation& lastSuballoc = suballocations1st.back();
10731  if(lastSuballoc.offset == offset)
10732  {
10733  m_SumFreeSize += lastSuballoc.size;
10734  suballocations1st.pop_back();
10735  CleanupAfterFree();
10736  return;
10737  }
10738  }
10739 
10740  // Item from the middle of 1st vector.
10741  {
10742  VmaSuballocation refSuballoc;
10743  refSuballoc.offset = offset;
10744  // Rest of members stays uninitialized intentionally for better performance.
10745  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10746  suballocations1st.begin() + m_1stNullItemsBeginCount,
10747  suballocations1st.end(),
10748  refSuballoc,
10749  VmaSuballocationOffsetLess());
10750  if(it != suballocations1st.end())
10751  {
10752  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10753  it->hAllocation = VK_NULL_HANDLE;
10754  ++m_1stNullItemsMiddleCount;
10755  m_SumFreeSize += it->size;
10756  CleanupAfterFree();
10757  return;
10758  }
10759  }
10760 
10761  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10762  {
10763  // Item from the middle of 2nd vector.
10764  VmaSuballocation refSuballoc;
10765  refSuballoc.offset = offset;
10766  // Rest of members stays uninitialized intentionally for better performance.
10767  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10768  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10769  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10770  if(it != suballocations2nd.end())
10771  {
10772  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10773  it->hAllocation = VK_NULL_HANDLE;
10774  ++m_2ndNullItemsCount;
10775  m_SumFreeSize += it->size;
10776  CleanupAfterFree();
10777  return;
10778  }
10779  }
10780 
10781  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10782 }
10783 
10784 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10785 {
10786  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10787  const size_t suballocCount = AccessSuballocations1st().size();
10788  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10789 }
10790 
10791 void VmaBlockMetadata_Linear::CleanupAfterFree()
10792 {
10793  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10794  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10795 
10796  if(IsEmpty())
10797  {
10798  suballocations1st.clear();
10799  suballocations2nd.clear();
10800  m_1stNullItemsBeginCount = 0;
10801  m_1stNullItemsMiddleCount = 0;
10802  m_2ndNullItemsCount = 0;
10803  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10804  }
10805  else
10806  {
10807  const size_t suballoc1stCount = suballocations1st.size();
10808  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10809  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10810 
10811  // Find more null items at the beginning of 1st vector.
10812  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10813  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10814  {
10815  ++m_1stNullItemsBeginCount;
10816  --m_1stNullItemsMiddleCount;
10817  }
10818 
10819  // Find more null items at the end of 1st vector.
10820  while(m_1stNullItemsMiddleCount > 0 &&
10821  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10822  {
10823  --m_1stNullItemsMiddleCount;
10824  suballocations1st.pop_back();
10825  }
10826 
10827  // Find more null items at the end of 2nd vector.
10828  while(m_2ndNullItemsCount > 0 &&
10829  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10830  {
10831  --m_2ndNullItemsCount;
10832  suballocations2nd.pop_back();
10833  }
10834 
10835  // Find more null items at the beginning of 2nd vector.
10836  while(m_2ndNullItemsCount > 0 &&
10837  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10838  {
10839  --m_2ndNullItemsCount;
10840  VmaVectorRemove(suballocations2nd, 0);
10841  }
10842 
10843  if(ShouldCompact1st())
10844  {
10845  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10846  size_t srcIndex = m_1stNullItemsBeginCount;
10847  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10848  {
10849  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10850  {
10851  ++srcIndex;
10852  }
10853  if(dstIndex != srcIndex)
10854  {
10855  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10856  }
10857  ++srcIndex;
10858  }
10859  suballocations1st.resize(nonNullItemCount);
10860  m_1stNullItemsBeginCount = 0;
10861  m_1stNullItemsMiddleCount = 0;
10862  }
10863 
10864  // 2nd vector became empty.
10865  if(suballocations2nd.empty())
10866  {
10867  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10868  }
10869 
10870  // 1st vector became empty.
10871  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10872  {
10873  suballocations1st.clear();
10874  m_1stNullItemsBeginCount = 0;
10875 
10876  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10877  {
10878  // Swap 1st with 2nd. Now 2nd is empty.
10879  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10880  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10881  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10882  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10883  {
10884  ++m_1stNullItemsBeginCount;
10885  --m_1stNullItemsMiddleCount;
10886  }
10887  m_2ndNullItemsCount = 0;
10888  m_1stVectorIndex ^= 1;
10889  }
10890  }
10891  }
10892 
10893  VMA_HEAVY_ASSERT(Validate());
10894 }
10895 
10896 
10898 // class VmaBlockMetadata_Buddy
10899 
10900 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10901  VmaBlockMetadata(hAllocator),
10902  m_Root(VMA_NULL),
10903  m_AllocationCount(0),
10904  m_FreeCount(1),
10905  m_SumFreeSize(0)
10906 {
10907  memset(m_FreeList, 0, sizeof(m_FreeList));
10908 }
10909 
10910 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10911 {
10912  DeleteNode(m_Root);
10913 }
10914 
10915 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10916 {
10917  VmaBlockMetadata::Init(size);
10918 
10919  m_UsableSize = VmaPrevPow2(size);
10920  m_SumFreeSize = m_UsableSize;
10921 
10922  // Calculate m_LevelCount.
10923  m_LevelCount = 1;
10924  while(m_LevelCount < MAX_LEVELS &&
10925  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10926  {
10927  ++m_LevelCount;
10928  }
10929 
10930  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10931  rootNode->offset = 0;
10932  rootNode->type = Node::TYPE_FREE;
10933  rootNode->parent = VMA_NULL;
10934  rootNode->buddy = VMA_NULL;
10935 
10936  m_Root = rootNode;
10937  AddToFreeListFront(0, rootNode);
10938 }
10939 
10940 bool VmaBlockMetadata_Buddy::Validate() const
10941 {
10942  // Validate tree.
10943  ValidationContext ctx;
10944  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10945  {
10946  VMA_VALIDATE(false && "ValidateNode failed.");
10947  }
10948  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10949  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10950 
10951  // Validate free node lists.
10952  for(uint32_t level = 0; level < m_LevelCount; ++level)
10953  {
10954  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10955  m_FreeList[level].front->free.prev == VMA_NULL);
10956 
10957  for(Node* node = m_FreeList[level].front;
10958  node != VMA_NULL;
10959  node = node->free.next)
10960  {
10961  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10962 
10963  if(node->free.next == VMA_NULL)
10964  {
10965  VMA_VALIDATE(m_FreeList[level].back == node);
10966  }
10967  else
10968  {
10969  VMA_VALIDATE(node->free.next->free.prev == node);
10970  }
10971  }
10972  }
10973 
10974  // Validate that free lists ar higher levels are empty.
10975  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10976  {
10977  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10978  }
10979 
10980  return true;
10981 }
10982 
10983 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10984 {
10985  for(uint32_t level = 0; level < m_LevelCount; ++level)
10986  {
10987  if(m_FreeList[level].front != VMA_NULL)
10988  {
10989  return LevelToNodeSize(level);
10990  }
10991  }
10992  return 0;
10993 }
10994 
10995 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10996 {
10997  const VkDeviceSize unusableSize = GetUnusableSize();
10998 
10999  outInfo.blockCount = 1;
11000 
11001  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11002  outInfo.usedBytes = outInfo.unusedBytes = 0;
11003 
11004  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11005  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11006  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11007 
11008  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11009 
11010  if(unusableSize > 0)
11011  {
11012  ++outInfo.unusedRangeCount;
11013  outInfo.unusedBytes += unusableSize;
11014  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11015  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11016  }
11017 }
11018 
11019 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11020 {
11021  const VkDeviceSize unusableSize = GetUnusableSize();
11022 
11023  inoutStats.size += GetSize();
11024  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11025  inoutStats.allocationCount += m_AllocationCount;
11026  inoutStats.unusedRangeCount += m_FreeCount;
11027  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11028 
11029  if(unusableSize > 0)
11030  {
11031  ++inoutStats.unusedRangeCount;
11032  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11033  }
11034 }
11035 
11036 #if VMA_STATS_STRING_ENABLED
11037 
11038 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11039 {
11040  // TODO optimize
11041  VmaStatInfo stat;
11042  CalcAllocationStatInfo(stat);
11043 
11044  PrintDetailedMap_Begin(
11045  json,
11046  stat.unusedBytes,
11047  stat.allocationCount,
11048  stat.unusedRangeCount);
11049 
11050  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11051 
11052  const VkDeviceSize unusableSize = GetUnusableSize();
11053  if(unusableSize > 0)
11054  {
11055  PrintDetailedMap_UnusedRange(json,
11056  m_UsableSize, // offset
11057  unusableSize); // size
11058  }
11059 
11060  PrintDetailedMap_End(json);
11061 }
11062 
11063 #endif // #if VMA_STATS_STRING_ENABLED
11064 
11065 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11066  uint32_t currentFrameIndex,
11067  uint32_t frameInUseCount,
11068  VkDeviceSize bufferImageGranularity,
11069  VkDeviceSize allocSize,
11070  VkDeviceSize allocAlignment,
11071  bool upperAddress,
11072  VmaSuballocationType allocType,
11073  bool canMakeOtherLost,
11074  uint32_t strategy,
11075  VmaAllocationRequest* pAllocationRequest)
11076 {
11077  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11078 
11079  // Simple way to respect bufferImageGranularity. May be optimized some day.
11080  // Whenever it might be an OPTIMAL image...
11081  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11082  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11083  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11084  {
11085  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11086  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11087  }
11088 
11089  if(allocSize > m_UsableSize)
11090  {
11091  return false;
11092  }
11093 
11094  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11095  for(uint32_t level = targetLevel + 1; level--; )
11096  {
11097  for(Node* freeNode = m_FreeList[level].front;
11098  freeNode != VMA_NULL;
11099  freeNode = freeNode->free.next)
11100  {
11101  if(freeNode->offset % allocAlignment == 0)
11102  {
11103  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11104  pAllocationRequest->offset = freeNode->offset;
11105  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11106  pAllocationRequest->sumItemSize = 0;
11107  pAllocationRequest->itemsToMakeLostCount = 0;
11108  pAllocationRequest->customData = (void*)(uintptr_t)level;
11109  return true;
11110  }
11111  }
11112  }
11113 
11114  return false;
11115 }
11116 
11117 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11118  uint32_t currentFrameIndex,
11119  uint32_t frameInUseCount,
11120  VmaAllocationRequest* pAllocationRequest)
11121 {
11122  /*
11123  Lost allocations are not supported in buddy allocator at the moment.
11124  Support might be added in the future.
11125  */
11126  return pAllocationRequest->itemsToMakeLostCount == 0;
11127 }
11128 
11129 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11130 {
11131  /*
11132  Lost allocations are not supported in buddy allocator at the moment.
11133  Support might be added in the future.
11134  */
11135  return 0;
11136 }
11137 
11138 void VmaBlockMetadata_Buddy::Alloc(
11139  const VmaAllocationRequest& request,
11140  VmaSuballocationType type,
11141  VkDeviceSize allocSize,
11142  VmaAllocation hAllocation)
11143 {
11144  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11145 
11146  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11147  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11148 
11149  Node* currNode = m_FreeList[currLevel].front;
11150  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11151  while(currNode->offset != request.offset)
11152  {
11153  currNode = currNode->free.next;
11154  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11155  }
11156 
11157  // Go down, splitting free nodes.
11158  while(currLevel < targetLevel)
11159  {
11160  // currNode is already first free node at currLevel.
11161  // Remove it from list of free nodes at this currLevel.
11162  RemoveFromFreeList(currLevel, currNode);
11163 
11164  const uint32_t childrenLevel = currLevel + 1;
11165 
11166  // Create two free sub-nodes.
11167  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11168  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11169 
11170  leftChild->offset = currNode->offset;
11171  leftChild->type = Node::TYPE_FREE;
11172  leftChild->parent = currNode;
11173  leftChild->buddy = rightChild;
11174 
11175  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11176  rightChild->type = Node::TYPE_FREE;
11177  rightChild->parent = currNode;
11178  rightChild->buddy = leftChild;
11179 
11180  // Convert current currNode to split type.
11181  currNode->type = Node::TYPE_SPLIT;
11182  currNode->split.leftChild = leftChild;
11183 
11184  // Add child nodes to free list. Order is important!
11185  AddToFreeListFront(childrenLevel, rightChild);
11186  AddToFreeListFront(childrenLevel, leftChild);
11187 
11188  ++m_FreeCount;
11189  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11190  ++currLevel;
11191  currNode = m_FreeList[currLevel].front;
11192 
11193  /*
11194  We can be sure that currNode, as left child of node previously split,
11195  also fullfills the alignment requirement.
11196  */
11197  }
11198 
11199  // Remove from free list.
11200  VMA_ASSERT(currLevel == targetLevel &&
11201  currNode != VMA_NULL &&
11202  currNode->type == Node::TYPE_FREE);
11203  RemoveFromFreeList(currLevel, currNode);
11204 
11205  // Convert to allocation node.
11206  currNode->type = Node::TYPE_ALLOCATION;
11207  currNode->allocation.alloc = hAllocation;
11208 
11209  ++m_AllocationCount;
11210  --m_FreeCount;
11211  m_SumFreeSize -= allocSize;
11212 }
11213 
11214 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11215 {
11216  if(node->type == Node::TYPE_SPLIT)
11217  {
11218  DeleteNode(node->split.leftChild->buddy);
11219  DeleteNode(node->split.leftChild);
11220  }
11221 
11222  vma_delete(GetAllocationCallbacks(), node);
11223 }
11224 
11225 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11226 {
11227  VMA_VALIDATE(level < m_LevelCount);
11228  VMA_VALIDATE(curr->parent == parent);
11229  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11230  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11231  switch(curr->type)
11232  {
11233  case Node::TYPE_FREE:
11234  // curr->free.prev, next are validated separately.
11235  ctx.calculatedSumFreeSize += levelNodeSize;
11236  ++ctx.calculatedFreeCount;
11237  break;
11238  case Node::TYPE_ALLOCATION:
11239  ++ctx.calculatedAllocationCount;
11240  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11241  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11242  break;
11243  case Node::TYPE_SPLIT:
11244  {
11245  const uint32_t childrenLevel = level + 1;
11246  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11247  const Node* const leftChild = curr->split.leftChild;
11248  VMA_VALIDATE(leftChild != VMA_NULL);
11249  VMA_VALIDATE(leftChild->offset == curr->offset);
11250  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11251  {
11252  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11253  }
11254  const Node* const rightChild = leftChild->buddy;
11255  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11256  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11257  {
11258  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11259  }
11260  }
11261  break;
11262  default:
11263  return false;
11264  }
11265 
11266  return true;
11267 }
11268 
11269 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11270 {
11271  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11272  uint32_t level = 0;
11273  VkDeviceSize currLevelNodeSize = m_UsableSize;
11274  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11275  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11276  {
11277  ++level;
11278  currLevelNodeSize = nextLevelNodeSize;
11279  nextLevelNodeSize = currLevelNodeSize >> 1;
11280  }
11281  return level;
11282 }
11283 
11284 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11285 {
11286  // Find node and level.
11287  Node* node = m_Root;
11288  VkDeviceSize nodeOffset = 0;
11289  uint32_t level = 0;
11290  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11291  while(node->type == Node::TYPE_SPLIT)
11292  {
11293  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11294  if(offset < nodeOffset + nextLevelSize)
11295  {
11296  node = node->split.leftChild;
11297  }
11298  else
11299  {
11300  node = node->split.leftChild->buddy;
11301  nodeOffset += nextLevelSize;
11302  }
11303  ++level;
11304  levelNodeSize = nextLevelSize;
11305  }
11306 
11307  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11308  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11309 
11310  ++m_FreeCount;
11311  --m_AllocationCount;
11312  m_SumFreeSize += alloc->GetSize();
11313 
11314  node->type = Node::TYPE_FREE;
11315 
11316  // Join free nodes if possible.
11317  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11318  {
11319  RemoveFromFreeList(level, node->buddy);
11320  Node* const parent = node->parent;
11321 
11322  vma_delete(GetAllocationCallbacks(), node->buddy);
11323  vma_delete(GetAllocationCallbacks(), node);
11324  parent->type = Node::TYPE_FREE;
11325 
11326  node = parent;
11327  --level;
11328  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11329  --m_FreeCount;
11330  }
11331 
11332  AddToFreeListFront(level, node);
11333 }
11334 
11335 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11336 {
11337  switch(node->type)
11338  {
11339  case Node::TYPE_FREE:
11340  ++outInfo.unusedRangeCount;
11341  outInfo.unusedBytes += levelNodeSize;
11342  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11343  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11344  break;
11345  case Node::TYPE_ALLOCATION:
11346  {
11347  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11348  ++outInfo.allocationCount;
11349  outInfo.usedBytes += allocSize;
11350  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11351  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11352 
11353  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11354  if(unusedRangeSize > 0)
11355  {
11356  ++outInfo.unusedRangeCount;
11357  outInfo.unusedBytes += unusedRangeSize;
11358  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11359  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11360  }
11361  }
11362  break;
11363  case Node::TYPE_SPLIT:
11364  {
11365  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11366  const Node* const leftChild = node->split.leftChild;
11367  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11368  const Node* const rightChild = leftChild->buddy;
11369  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11370  }
11371  break;
11372  default:
11373  VMA_ASSERT(0);
11374  }
11375 }
11376 
11377 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11378 {
11379  VMA_ASSERT(node->type == Node::TYPE_FREE);
11380 
11381  // List is empty.
11382  Node* const frontNode = m_FreeList[level].front;
11383  if(frontNode == VMA_NULL)
11384  {
11385  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11386  node->free.prev = node->free.next = VMA_NULL;
11387  m_FreeList[level].front = m_FreeList[level].back = node;
11388  }
11389  else
11390  {
11391  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11392  node->free.prev = VMA_NULL;
11393  node->free.next = frontNode;
11394  frontNode->free.prev = node;
11395  m_FreeList[level].front = node;
11396  }
11397 }
11398 
11399 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11400 {
11401  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11402 
11403  // It is at the front.
11404  if(node->free.prev == VMA_NULL)
11405  {
11406  VMA_ASSERT(m_FreeList[level].front == node);
11407  m_FreeList[level].front = node->free.next;
11408  }
11409  else
11410  {
11411  Node* const prevFreeNode = node->free.prev;
11412  VMA_ASSERT(prevFreeNode->free.next == node);
11413  prevFreeNode->free.next = node->free.next;
11414  }
11415 
11416  // It is at the back.
11417  if(node->free.next == VMA_NULL)
11418  {
11419  VMA_ASSERT(m_FreeList[level].back == node);
11420  m_FreeList[level].back = node->free.prev;
11421  }
11422  else
11423  {
11424  Node* const nextFreeNode = node->free.next;
11425  VMA_ASSERT(nextFreeNode->free.prev == node);
11426  nextFreeNode->free.prev = node->free.prev;
11427  }
11428 }
11429 
11430 #if VMA_STATS_STRING_ENABLED
11431 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11432 {
11433  switch(node->type)
11434  {
11435  case Node::TYPE_FREE:
11436  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11437  break;
11438  case Node::TYPE_ALLOCATION:
11439  {
11440  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11441  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11442  if(allocSize < levelNodeSize)
11443  {
11444  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11445  }
11446  }
11447  break;
11448  case Node::TYPE_SPLIT:
11449  {
11450  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11451  const Node* const leftChild = node->split.leftChild;
11452  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11453  const Node* const rightChild = leftChild->buddy;
11454  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11455  }
11456  break;
11457  default:
11458  VMA_ASSERT(0);
11459  }
11460 }
11461 #endif // #if VMA_STATS_STRING_ENABLED
11462 
11463 
11465 // class VmaDeviceMemoryBlock
11466 
11467 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11468  m_pMetadata(VMA_NULL),
11469  m_MemoryTypeIndex(UINT32_MAX),
11470  m_Id(0),
11471  m_hMemory(VK_NULL_HANDLE),
11472  m_MapCount(0),
11473  m_pMappedData(VMA_NULL)
11474 {
11475 }
11476 
11477 void VmaDeviceMemoryBlock::Init(
11478  VmaAllocator hAllocator,
11479  VmaPool hParentPool,
11480  uint32_t newMemoryTypeIndex,
11481  VkDeviceMemory newMemory,
11482  VkDeviceSize newSize,
11483  uint32_t id,
11484  uint32_t algorithm)
11485 {
11486  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11487 
11488  m_hParentPool = hParentPool;
11489  m_MemoryTypeIndex = newMemoryTypeIndex;
11490  m_Id = id;
11491  m_hMemory = newMemory;
11492 
11493  switch(algorithm)
11494  {
11496  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11497  break;
11499  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11500  break;
11501  default:
11502  VMA_ASSERT(0);
11503  // Fall-through.
11504  case 0:
11505  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11506  }
11507  m_pMetadata->Init(newSize);
11508 }
11509 
11510 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11511 {
11512  // This is the most important assert in the entire library.
11513  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11514  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11515 
11516  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11517  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11518  m_hMemory = VK_NULL_HANDLE;
11519 
11520  vma_delete(allocator, m_pMetadata);
11521  m_pMetadata = VMA_NULL;
11522 }
11523 
11524 bool VmaDeviceMemoryBlock::Validate() const
11525 {
11526  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11527  (m_pMetadata->GetSize() != 0));
11528 
11529  return m_pMetadata->Validate();
11530 }
11531 
11532 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11533 {
11534  void* pData = nullptr;
11535  VkResult res = Map(hAllocator, 1, &pData);
11536  if(res != VK_SUCCESS)
11537  {
11538  return res;
11539  }
11540 
11541  res = m_pMetadata->CheckCorruption(pData);
11542 
11543  Unmap(hAllocator, 1);
11544 
11545  return res;
11546 }
11547 
11548 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11549 {
11550  if(count == 0)
11551  {
11552  return VK_SUCCESS;
11553  }
11554 
11555  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11556  if(m_MapCount != 0)
11557  {
11558  m_MapCount += count;
11559  VMA_ASSERT(m_pMappedData != VMA_NULL);
11560  if(ppData != VMA_NULL)
11561  {
11562  *ppData = m_pMappedData;
11563  }
11564  return VK_SUCCESS;
11565  }
11566  else
11567  {
11568  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11569  hAllocator->m_hDevice,
11570  m_hMemory,
11571  0, // offset
11572  VK_WHOLE_SIZE,
11573  0, // flags
11574  &m_pMappedData);
11575  if(result == VK_SUCCESS)
11576  {
11577  if(ppData != VMA_NULL)
11578  {
11579  *ppData = m_pMappedData;
11580  }
11581  m_MapCount = count;
11582  }
11583  return result;
11584  }
11585 }
11586 
11587 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11588 {
11589  if(count == 0)
11590  {
11591  return;
11592  }
11593 
11594  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11595  if(m_MapCount >= count)
11596  {
11597  m_MapCount -= count;
11598  if(m_MapCount == 0)
11599  {
11600  m_pMappedData = VMA_NULL;
11601  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11602  }
11603  }
11604  else
11605  {
11606  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11607  }
11608 }
11609 
11610 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11611 {
11612  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11613  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11614 
11615  void* pData;
11616  VkResult res = Map(hAllocator, 1, &pData);
11617  if(res != VK_SUCCESS)
11618  {
11619  return res;
11620  }
11621 
11622  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11623  VmaWriteMagicValue(pData, allocOffset + allocSize);
11624 
11625  Unmap(hAllocator, 1);
11626 
11627  return VK_SUCCESS;
11628 }
11629 
11630 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11631 {
11632  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11633  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11634 
11635  void* pData;
11636  VkResult res = Map(hAllocator, 1, &pData);
11637  if(res != VK_SUCCESS)
11638  {
11639  return res;
11640  }
11641 
11642  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11643  {
11644  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11645  }
11646  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11647  {
11648  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11649  }
11650 
11651  Unmap(hAllocator, 1);
11652 
11653  return VK_SUCCESS;
11654 }
11655 
11656 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11657  const VmaAllocator hAllocator,
11658  const VmaAllocation hAllocation,
11659  VkDeviceSize allocationLocalOffset,
11660  VkBuffer hBuffer,
11661  const void* pNext)
11662 {
11663  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11664  hAllocation->GetBlock() == this);
11665  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11666  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11667  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11668  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11669  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11670  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11671 }
11672 
11673 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11674  const VmaAllocator hAllocator,
11675  const VmaAllocation hAllocation,
11676  VkDeviceSize allocationLocalOffset,
11677  VkImage hImage,
11678  const void* pNext)
11679 {
11680  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11681  hAllocation->GetBlock() == this);
11682  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11683  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11684  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11685  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11686  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11687  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11688 }
11689 
11690 static void InitStatInfo(VmaStatInfo& outInfo)
11691 {
11692  memset(&outInfo, 0, sizeof(outInfo));
11693  outInfo.allocationSizeMin = UINT64_MAX;
11694  outInfo.unusedRangeSizeMin = UINT64_MAX;
11695 }
11696 
11697 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11698 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11699 {
11700  inoutInfo.blockCount += srcInfo.blockCount;
11701  inoutInfo.allocationCount += srcInfo.allocationCount;
11702  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11703  inoutInfo.usedBytes += srcInfo.usedBytes;
11704  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11705  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11706  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11707  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11708  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11709 }
11710 
11711 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11712 {
11713  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11714  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11715  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11716  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11717 }
11718 
11719 VmaPool_T::VmaPool_T(
11720  VmaAllocator hAllocator,
11721  const VmaPoolCreateInfo& createInfo,
11722  VkDeviceSize preferredBlockSize) :
11723  m_BlockVector(
11724  hAllocator,
11725  this, // hParentPool
11726  createInfo.memoryTypeIndex,
11727  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11728  createInfo.minBlockCount,
11729  createInfo.maxBlockCount,
11730  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11731  createInfo.frameInUseCount,
11732  createInfo.blockSize != 0, // explicitBlockSize
11733  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11734  m_Id(0),
11735  m_Name(VMA_NULL)
11736 {
11737 }
11738 
11739 VmaPool_T::~VmaPool_T()
11740 {
11741 }
11742 
11743 void VmaPool_T::SetName(const char* pName)
11744 {
11745  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
11746  VmaFreeString(allocs, m_Name);
11747 
11748  if(pName != VMA_NULL)
11749  {
11750  m_Name = VmaCreateStringCopy(allocs, pName);
11751  }
11752  else
11753  {
11754  m_Name = VMA_NULL;
11755  }
11756 }
11757 
11758 #if VMA_STATS_STRING_ENABLED
11759 
11760 #endif // #if VMA_STATS_STRING_ENABLED
11761 
11762 VmaBlockVector::VmaBlockVector(
11763  VmaAllocator hAllocator,
11764  VmaPool hParentPool,
11765  uint32_t memoryTypeIndex,
11766  VkDeviceSize preferredBlockSize,
11767  size_t minBlockCount,
11768  size_t maxBlockCount,
11769  VkDeviceSize bufferImageGranularity,
11770  uint32_t frameInUseCount,
11771  bool explicitBlockSize,
11772  uint32_t algorithm) :
11773  m_hAllocator(hAllocator),
11774  m_hParentPool(hParentPool),
11775  m_MemoryTypeIndex(memoryTypeIndex),
11776  m_PreferredBlockSize(preferredBlockSize),
11777  m_MinBlockCount(minBlockCount),
11778  m_MaxBlockCount(maxBlockCount),
11779  m_BufferImageGranularity(bufferImageGranularity),
11780  m_FrameInUseCount(frameInUseCount),
11781  m_ExplicitBlockSize(explicitBlockSize),
11782  m_Algorithm(algorithm),
11783  m_HasEmptyBlock(false),
11784  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11785  m_NextBlockId(0)
11786 {
11787 }
11788 
11789 VmaBlockVector::~VmaBlockVector()
11790 {
11791  for(size_t i = m_Blocks.size(); i--; )
11792  {
11793  m_Blocks[i]->Destroy(m_hAllocator);
11794  vma_delete(m_hAllocator, m_Blocks[i]);
11795  }
11796 }
11797 
11798 VkResult VmaBlockVector::CreateMinBlocks()
11799 {
11800  for(size_t i = 0; i < m_MinBlockCount; ++i)
11801  {
11802  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11803  if(res != VK_SUCCESS)
11804  {
11805  return res;
11806  }
11807  }
11808  return VK_SUCCESS;
11809 }
11810 
11811 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11812 {
11813  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11814 
11815  const size_t blockCount = m_Blocks.size();
11816 
11817  pStats->size = 0;
11818  pStats->unusedSize = 0;
11819  pStats->allocationCount = 0;
11820  pStats->unusedRangeCount = 0;
11821  pStats->unusedRangeSizeMax = 0;
11822  pStats->blockCount = blockCount;
11823 
11824  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11825  {
11826  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11827  VMA_ASSERT(pBlock);
11828  VMA_HEAVY_ASSERT(pBlock->Validate());
11829  pBlock->m_pMetadata->AddPoolStats(*pStats);
11830  }
11831 }
11832 
11833 bool VmaBlockVector::IsEmpty()
11834 {
11835  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11836  return m_Blocks.empty();
11837 }
11838 
11839 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11840 {
11841  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11842  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11843  (VMA_DEBUG_MARGIN > 0) &&
11844  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11845  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11846 }
11847 
11848 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11849 
11850 VkResult VmaBlockVector::Allocate(
11851  uint32_t currentFrameIndex,
11852  VkDeviceSize size,
11853  VkDeviceSize alignment,
11854  const VmaAllocationCreateInfo& createInfo,
11855  VmaSuballocationType suballocType,
11856  size_t allocationCount,
11857  VmaAllocation* pAllocations)
11858 {
11859  size_t allocIndex;
11860  VkResult res = VK_SUCCESS;
11861 
11862  if(IsCorruptionDetectionEnabled())
11863  {
11864  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11865  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11866  }
11867 
11868  {
11869  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11870  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11871  {
11872  res = AllocatePage(
11873  currentFrameIndex,
11874  size,
11875  alignment,
11876  createInfo,
11877  suballocType,
11878  pAllocations + allocIndex);
11879  if(res != VK_SUCCESS)
11880  {
11881  break;
11882  }
11883  }
11884  }
11885 
11886  if(res != VK_SUCCESS)
11887  {
11888  // Free all already created allocations.
11889  while(allocIndex--)
11890  {
11891  Free(pAllocations[allocIndex]);
11892  }
11893  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11894  }
11895 
11896  return res;
11897 }
11898 
11899 VkResult VmaBlockVector::AllocatePage(
11900  uint32_t currentFrameIndex,
11901  VkDeviceSize size,
11902  VkDeviceSize alignment,
11903  const VmaAllocationCreateInfo& createInfo,
11904  VmaSuballocationType suballocType,
11905  VmaAllocation* pAllocation)
11906 {
11907  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11908  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11909  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11910  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11911 
11912  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
11913  VkDeviceSize freeMemory;
11914  {
11915  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
11916  VmaBudget heapBudget = {};
11917  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
11918  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
11919  }
11920 
11921  const bool canFallbackToDedicated = !IsCustomPool();
11922  const bool canCreateNewBlock =
11923  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11924  (m_Blocks.size() < m_MaxBlockCount) &&
11925  (freeMemory >= size || !canFallbackToDedicated);
11926  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11927 
11928  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11929  // Which in turn is available only when maxBlockCount = 1.
11930  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11931  {
11932  canMakeOtherLost = false;
11933  }
11934 
11935  // Upper address can only be used with linear allocator and within single memory block.
11936  if(isUpperAddress &&
11937  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11938  {
11939  return VK_ERROR_FEATURE_NOT_PRESENT;
11940  }
11941 
11942  // Validate strategy.
11943  switch(strategy)
11944  {
11945  case 0:
11947  break;
11951  break;
11952  default:
11953  return VK_ERROR_FEATURE_NOT_PRESENT;
11954  }
11955 
11956  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11957  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11958  {
11959  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11960  }
11961 
11962  /*
11963  Under certain condition, this whole section can be skipped for optimization, so
11964  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11965  e.g. for custom pools with linear algorithm.
11966  */
11967  if(!canMakeOtherLost || canCreateNewBlock)
11968  {
11969  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11970  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11972 
11973  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11974  {
11975  // Use only last block.
11976  if(!m_Blocks.empty())
11977  {
11978  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11979  VMA_ASSERT(pCurrBlock);
11980  VkResult res = AllocateFromBlock(
11981  pCurrBlock,
11982  currentFrameIndex,
11983  size,
11984  alignment,
11985  allocFlagsCopy,
11986  createInfo.pUserData,
11987  suballocType,
11988  strategy,
11989  pAllocation);
11990  if(res == VK_SUCCESS)
11991  {
11992  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
11993  return VK_SUCCESS;
11994  }
11995  }
11996  }
11997  else
11998  {
12000  {
12001  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12002  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12003  {
12004  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12005  VMA_ASSERT(pCurrBlock);
12006  VkResult res = AllocateFromBlock(
12007  pCurrBlock,
12008  currentFrameIndex,
12009  size,
12010  alignment,
12011  allocFlagsCopy,
12012  createInfo.pUserData,
12013  suballocType,
12014  strategy,
12015  pAllocation);
12016  if(res == VK_SUCCESS)
12017  {
12018  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12019  return VK_SUCCESS;
12020  }
12021  }
12022  }
12023  else // WORST_FIT, FIRST_FIT
12024  {
12025  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12026  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12027  {
12028  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12029  VMA_ASSERT(pCurrBlock);
12030  VkResult res = AllocateFromBlock(
12031  pCurrBlock,
12032  currentFrameIndex,
12033  size,
12034  alignment,
12035  allocFlagsCopy,
12036  createInfo.pUserData,
12037  suballocType,
12038  strategy,
12039  pAllocation);
12040  if(res == VK_SUCCESS)
12041  {
12042  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12043  return VK_SUCCESS;
12044  }
12045  }
12046  }
12047  }
12048 
12049  // 2. Try to create new block.
12050  if(canCreateNewBlock)
12051  {
12052  // Calculate optimal size for new block.
12053  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12054  uint32_t newBlockSizeShift = 0;
12055  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12056 
12057  if(!m_ExplicitBlockSize)
12058  {
12059  // Allocate 1/8, 1/4, 1/2 as first blocks.
12060  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12061  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12062  {
12063  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12064  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12065  {
12066  newBlockSize = smallerNewBlockSize;
12067  ++newBlockSizeShift;
12068  }
12069  else
12070  {
12071  break;
12072  }
12073  }
12074  }
12075 
12076  size_t newBlockIndex = 0;
12077  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12078  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12079  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12080  if(!m_ExplicitBlockSize)
12081  {
12082  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12083  {
12084  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12085  if(smallerNewBlockSize >= size)
12086  {
12087  newBlockSize = smallerNewBlockSize;
12088  ++newBlockSizeShift;
12089  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12090  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12091  }
12092  else
12093  {
12094  break;
12095  }
12096  }
12097  }
12098 
12099  if(res == VK_SUCCESS)
12100  {
12101  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12102  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12103 
12104  res = AllocateFromBlock(
12105  pBlock,
12106  currentFrameIndex,
12107  size,
12108  alignment,
12109  allocFlagsCopy,
12110  createInfo.pUserData,
12111  suballocType,
12112  strategy,
12113  pAllocation);
12114  if(res == VK_SUCCESS)
12115  {
12116  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12117  return VK_SUCCESS;
12118  }
12119  else
12120  {
12121  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12122  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12123  }
12124  }
12125  }
12126  }
12127 
12128  // 3. Try to allocate from existing blocks with making other allocations lost.
12129  if(canMakeOtherLost)
12130  {
12131  uint32_t tryIndex = 0;
12132  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12133  {
12134  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12135  VmaAllocationRequest bestRequest = {};
12136  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12137 
12138  // 1. Search existing allocations.
12140  {
12141  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12142  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12143  {
12144  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12145  VMA_ASSERT(pCurrBlock);
12146  VmaAllocationRequest currRequest = {};
12147  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12148  currentFrameIndex,
12149  m_FrameInUseCount,
12150  m_BufferImageGranularity,
12151  size,
12152  alignment,
12153  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12154  suballocType,
12155  canMakeOtherLost,
12156  strategy,
12157  &currRequest))
12158  {
12159  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12160  if(pBestRequestBlock == VMA_NULL ||
12161  currRequestCost < bestRequestCost)
12162  {
12163  pBestRequestBlock = pCurrBlock;
12164  bestRequest = currRequest;
12165  bestRequestCost = currRequestCost;
12166 
12167  if(bestRequestCost == 0)
12168  {
12169  break;
12170  }
12171  }
12172  }
12173  }
12174  }
12175  else // WORST_FIT, FIRST_FIT
12176  {
12177  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12178  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12179  {
12180  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12181  VMA_ASSERT(pCurrBlock);
12182  VmaAllocationRequest currRequest = {};
12183  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12184  currentFrameIndex,
12185  m_FrameInUseCount,
12186  m_BufferImageGranularity,
12187  size,
12188  alignment,
12189  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12190  suballocType,
12191  canMakeOtherLost,
12192  strategy,
12193  &currRequest))
12194  {
12195  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12196  if(pBestRequestBlock == VMA_NULL ||
12197  currRequestCost < bestRequestCost ||
12199  {
12200  pBestRequestBlock = pCurrBlock;
12201  bestRequest = currRequest;
12202  bestRequestCost = currRequestCost;
12203 
12204  if(bestRequestCost == 0 ||
12206  {
12207  break;
12208  }
12209  }
12210  }
12211  }
12212  }
12213 
12214  if(pBestRequestBlock != VMA_NULL)
12215  {
12216  if(mapped)
12217  {
12218  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12219  if(res != VK_SUCCESS)
12220  {
12221  return res;
12222  }
12223  }
12224 
12225  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12226  currentFrameIndex,
12227  m_FrameInUseCount,
12228  &bestRequest))
12229  {
12230  // Allocate from this pBlock.
12231  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12232  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12233  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12234  UpdateHasEmptyBlock();
12235  (*pAllocation)->InitBlockAllocation(
12236  pBestRequestBlock,
12237  bestRequest.offset,
12238  alignment,
12239  size,
12240  m_MemoryTypeIndex,
12241  suballocType,
12242  mapped,
12243  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12244  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12245  VMA_DEBUG_LOG(" Returned from existing block");
12246  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12247  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12248  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12249  {
12250  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12251  }
12252  if(IsCorruptionDetectionEnabled())
12253  {
12254  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12255  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12256  }
12257  return VK_SUCCESS;
12258  }
12259  // else: Some allocations must have been touched while we are here. Next try.
12260  }
12261  else
12262  {
12263  // Could not find place in any of the blocks - break outer loop.
12264  break;
12265  }
12266  }
12267  /* Maximum number of tries exceeded - a very unlike event when many other
12268  threads are simultaneously touching allocations making it impossible to make
12269  lost at the same time as we try to allocate. */
12270  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12271  {
12272  return VK_ERROR_TOO_MANY_OBJECTS;
12273  }
12274  }
12275 
12276  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12277 }
12278 
12279 void VmaBlockVector::Free(
12280  const VmaAllocation hAllocation)
12281 {
12282  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12283 
12284  bool budgetExceeded = false;
12285  {
12286  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12287  VmaBudget heapBudget = {};
12288  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12289  budgetExceeded = heapBudget.usage >= heapBudget.budget;
12290  }
12291 
12292  // Scope for lock.
12293  {
12294  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12295 
12296  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12297 
12298  if(IsCorruptionDetectionEnabled())
12299  {
12300  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12301  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12302  }
12303 
12304  if(hAllocation->IsPersistentMap())
12305  {
12306  pBlock->Unmap(m_hAllocator, 1);
12307  }
12308 
12309  pBlock->m_pMetadata->Free(hAllocation);
12310  VMA_HEAVY_ASSERT(pBlock->Validate());
12311 
12312  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12313 
12314  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12315  // pBlock became empty after this deallocation.
12316  if(pBlock->m_pMetadata->IsEmpty())
12317  {
12318  // Already has empty block. We don't want to have two, so delete this one.
12319  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
12320  {
12321  pBlockToDelete = pBlock;
12322  Remove(pBlock);
12323  }
12324  // else: We now have an empty block - leave it.
12325  }
12326  // pBlock didn't become empty, but we have another empty block - find and free that one.
12327  // (This is optional, heuristics.)
12328  else if(m_HasEmptyBlock && canDeleteBlock)
12329  {
12330  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12331  if(pLastBlock->m_pMetadata->IsEmpty())
12332  {
12333  pBlockToDelete = pLastBlock;
12334  m_Blocks.pop_back();
12335  }
12336  }
12337 
12338  UpdateHasEmptyBlock();
12339  IncrementallySortBlocks();
12340  }
12341 
12342  // Destruction of a free block. Deferred until this point, outside of mutex
12343  // lock, for performance reason.
12344  if(pBlockToDelete != VMA_NULL)
12345  {
12346  VMA_DEBUG_LOG(" Deleted empty block");
12347  pBlockToDelete->Destroy(m_hAllocator);
12348  vma_delete(m_hAllocator, pBlockToDelete);
12349  }
12350 }
12351 
12352 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12353 {
12354  VkDeviceSize result = 0;
12355  for(size_t i = m_Blocks.size(); i--; )
12356  {
12357  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12358  if(result >= m_PreferredBlockSize)
12359  {
12360  break;
12361  }
12362  }
12363  return result;
12364 }
12365 
12366 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12367 {
12368  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12369  {
12370  if(m_Blocks[blockIndex] == pBlock)
12371  {
12372  VmaVectorRemove(m_Blocks, blockIndex);
12373  return;
12374  }
12375  }
12376  VMA_ASSERT(0);
12377 }
12378 
12379 void VmaBlockVector::IncrementallySortBlocks()
12380 {
12381  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12382  {
12383  // Bubble sort only until first swap.
12384  for(size_t i = 1; i < m_Blocks.size(); ++i)
12385  {
12386  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12387  {
12388  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12389  return;
12390  }
12391  }
12392  }
12393 }
12394 
12395 VkResult VmaBlockVector::AllocateFromBlock(
12396  VmaDeviceMemoryBlock* pBlock,
12397  uint32_t currentFrameIndex,
12398  VkDeviceSize size,
12399  VkDeviceSize alignment,
12400  VmaAllocationCreateFlags allocFlags,
12401  void* pUserData,
12402  VmaSuballocationType suballocType,
12403  uint32_t strategy,
12404  VmaAllocation* pAllocation)
12405 {
12406  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12407  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12408  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12409  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12410 
12411  VmaAllocationRequest currRequest = {};
12412  if(pBlock->m_pMetadata->CreateAllocationRequest(
12413  currentFrameIndex,
12414  m_FrameInUseCount,
12415  m_BufferImageGranularity,
12416  size,
12417  alignment,
12418  isUpperAddress,
12419  suballocType,
12420  false, // canMakeOtherLost
12421  strategy,
12422  &currRequest))
12423  {
12424  // Allocate from pCurrBlock.
12425  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12426 
12427  if(mapped)
12428  {
12429  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12430  if(res != VK_SUCCESS)
12431  {
12432  return res;
12433  }
12434  }
12435 
12436  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12437  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12438  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12439  UpdateHasEmptyBlock();
12440  (*pAllocation)->InitBlockAllocation(
12441  pBlock,
12442  currRequest.offset,
12443  alignment,
12444  size,
12445  m_MemoryTypeIndex,
12446  suballocType,
12447  mapped,
12448  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12449  VMA_HEAVY_ASSERT(pBlock->Validate());
12450  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12451  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
12452  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12453  {
12454  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12455  }
12456  if(IsCorruptionDetectionEnabled())
12457  {
12458  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12459  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12460  }
12461  return VK_SUCCESS;
12462  }
12463  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12464 }
12465 
12466 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12467 {
12468  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12469  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12470  allocInfo.allocationSize = blockSize;
12471  VkDeviceMemory mem = VK_NULL_HANDLE;
12472  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12473  if(res < 0)
12474  {
12475  return res;
12476  }
12477 
12478  // New VkDeviceMemory successfully created.
12479 
12480  // Create new Allocation for it.
12481  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12482  pBlock->Init(
12483  m_hAllocator,
12484  m_hParentPool,
12485  m_MemoryTypeIndex,
12486  mem,
12487  allocInfo.allocationSize,
12488  m_NextBlockId++,
12489  m_Algorithm);
12490 
12491  m_Blocks.push_back(pBlock);
12492  if(pNewBlockIndex != VMA_NULL)
12493  {
12494  *pNewBlockIndex = m_Blocks.size() - 1;
12495  }
12496 
12497  return VK_SUCCESS;
12498 }
12499 
12500 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12501  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12502  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12503 {
12504  const size_t blockCount = m_Blocks.size();
12505  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12506 
12507  enum BLOCK_FLAG
12508  {
12509  BLOCK_FLAG_USED = 0x00000001,
12510  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12511  };
12512 
12513  struct BlockInfo
12514  {
12515  uint32_t flags;
12516  void* pMappedData;
12517  };
12518  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12519  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12520  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12521 
12522  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12523  const size_t moveCount = moves.size();
12524  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12525  {
12526  const VmaDefragmentationMove& move = moves[moveIndex];
12527  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12528  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12529  }
12530 
12531  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12532 
12533  // Go over all blocks. Get mapped pointer or map if necessary.
12534  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12535  {
12536  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12537  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12538  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12539  {
12540  currBlockInfo.pMappedData = pBlock->GetMappedData();
12541  // It is not originally mapped - map it.
12542  if(currBlockInfo.pMappedData == VMA_NULL)
12543  {
12544  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12545  if(pDefragCtx->res == VK_SUCCESS)
12546  {
12547  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12548  }
12549  }
12550  }
12551  }
12552 
12553  // Go over all moves. Do actual data transfer.
12554  if(pDefragCtx->res == VK_SUCCESS)
12555  {
12556  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12557  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12558 
12559  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12560  {
12561  const VmaDefragmentationMove& move = moves[moveIndex];
12562 
12563  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12564  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12565 
12566  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12567 
12568  // Invalidate source.
12569  if(isNonCoherent)
12570  {
12571  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12572  memRange.memory = pSrcBlock->GetDeviceMemory();
12573  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12574  memRange.size = VMA_MIN(
12575  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12576  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12577  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12578  }
12579 
12580  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12581  memmove(
12582  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12583  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12584  static_cast<size_t>(move.size));
12585 
12586  if(IsCorruptionDetectionEnabled())
12587  {
12588  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12589  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12590  }
12591 
12592  // Flush destination.
12593  if(isNonCoherent)
12594  {
12595  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12596  memRange.memory = pDstBlock->GetDeviceMemory();
12597  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12598  memRange.size = VMA_MIN(
12599  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12600  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12601  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12602  }
12603  }
12604  }
12605 
12606  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12607  // Regardless of pCtx->res == VK_SUCCESS.
12608  for(size_t blockIndex = blockCount; blockIndex--; )
12609  {
12610  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12611  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12612  {
12613  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12614  pBlock->Unmap(m_hAllocator, 1);
12615  }
12616  }
12617 }
12618 
12619 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12620  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12621  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12622  VkCommandBuffer commandBuffer)
12623 {
12624  const size_t blockCount = m_Blocks.size();
12625 
12626  pDefragCtx->blockContexts.resize(blockCount);
12627  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12628 
12629  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12630  const size_t moveCount = moves.size();
12631  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12632  {
12633  const VmaDefragmentationMove& move = moves[moveIndex];
12634  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12635  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12636  }
12637 
12638  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12639 
12640  // Go over all blocks. Create and bind buffer for whole block if necessary.
12641  {
12642  VkBufferCreateInfo bufCreateInfo;
12643  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12644 
12645  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12646  {
12647  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12648  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12649  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12650  {
12651  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12652  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12653  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12654  if(pDefragCtx->res == VK_SUCCESS)
12655  {
12656  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12657  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12658  }
12659  }
12660  }
12661  }
12662 
12663  // Go over all moves. Post data transfer commands to command buffer.
12664  if(pDefragCtx->res == VK_SUCCESS)
12665  {
12666  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12667  {
12668  const VmaDefragmentationMove& move = moves[moveIndex];
12669 
12670  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12671  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12672 
12673  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12674 
12675  VkBufferCopy region = {
12676  move.srcOffset,
12677  move.dstOffset,
12678  move.size };
12679  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12680  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12681  }
12682  }
12683 
12684  // Save buffers to defrag context for later destruction.
12685  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12686  {
12687  pDefragCtx->res = VK_NOT_READY;
12688  }
12689 }
12690 
12691 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12692 {
12693  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12694  {
12695  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12696  if(pBlock->m_pMetadata->IsEmpty())
12697  {
12698  if(m_Blocks.size() > m_MinBlockCount)
12699  {
12700  if(pDefragmentationStats != VMA_NULL)
12701  {
12702  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12703  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12704  }
12705 
12706  VmaVectorRemove(m_Blocks, blockIndex);
12707  pBlock->Destroy(m_hAllocator);
12708  vma_delete(m_hAllocator, pBlock);
12709  }
12710  else
12711  {
12712  break;
12713  }
12714  }
12715  }
12716  UpdateHasEmptyBlock();
12717 }
12718 
12719 void VmaBlockVector::UpdateHasEmptyBlock()
12720 {
12721  m_HasEmptyBlock = false;
12722  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12723  {
12724  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12725  if(pBlock->m_pMetadata->IsEmpty())
12726  {
12727  m_HasEmptyBlock = true;
12728  break;
12729  }
12730  }
12731 }
12732 
12733 #if VMA_STATS_STRING_ENABLED
12734 
12735 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12736 {
12737  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12738 
12739  json.BeginObject();
12740 
12741  if(IsCustomPool())
12742  {
12743  const char* poolName = m_hParentPool->GetName();
12744  if(poolName != VMA_NULL && poolName[0] != '\0')
12745  {
12746  json.WriteString("Name");
12747  json.WriteString(poolName);
12748  }
12749 
12750  json.WriteString("MemoryTypeIndex");
12751  json.WriteNumber(m_MemoryTypeIndex);
12752 
12753  json.WriteString("BlockSize");
12754  json.WriteNumber(m_PreferredBlockSize);
12755 
12756  json.WriteString("BlockCount");
12757  json.BeginObject(true);
12758  if(m_MinBlockCount > 0)
12759  {
12760  json.WriteString("Min");
12761  json.WriteNumber((uint64_t)m_MinBlockCount);
12762  }
12763  if(m_MaxBlockCount < SIZE_MAX)
12764  {
12765  json.WriteString("Max");
12766  json.WriteNumber((uint64_t)m_MaxBlockCount);
12767  }
12768  json.WriteString("Cur");
12769  json.WriteNumber((uint64_t)m_Blocks.size());
12770  json.EndObject();
12771 
12772  if(m_FrameInUseCount > 0)
12773  {
12774  json.WriteString("FrameInUseCount");
12775  json.WriteNumber(m_FrameInUseCount);
12776  }
12777 
12778  if(m_Algorithm != 0)
12779  {
12780  json.WriteString("Algorithm");
12781  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12782  }
12783  }
12784  else
12785  {
12786  json.WriteString("PreferredBlockSize");
12787  json.WriteNumber(m_PreferredBlockSize);
12788  }
12789 
12790  json.WriteString("Blocks");
12791  json.BeginObject();
12792  for(size_t i = 0; i < m_Blocks.size(); ++i)
12793  {
12794  json.BeginString();
12795  json.ContinueString(m_Blocks[i]->GetId());
12796  json.EndString();
12797 
12798  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12799  }
12800  json.EndObject();
12801 
12802  json.EndObject();
12803 }
12804 
12805 #endif // #if VMA_STATS_STRING_ENABLED
12806 
12807 void VmaBlockVector::Defragment(
12808  class VmaBlockVectorDefragmentationContext* pCtx,
12809  VmaDefragmentationStats* pStats,
12810  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12811  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12812  VkCommandBuffer commandBuffer)
12813 {
12814  pCtx->res = VK_SUCCESS;
12815 
12816  const VkMemoryPropertyFlags memPropFlags =
12817  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12818  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12819 
12820  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12821  isHostVisible;
12822  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12823  !IsCorruptionDetectionEnabled() &&
12824  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12825 
12826  // There are options to defragment this memory type.
12827  if(canDefragmentOnCpu || canDefragmentOnGpu)
12828  {
12829  bool defragmentOnGpu;
12830  // There is only one option to defragment this memory type.
12831  if(canDefragmentOnGpu != canDefragmentOnCpu)
12832  {
12833  defragmentOnGpu = canDefragmentOnGpu;
12834  }
12835  // Both options are available: Heuristics to choose the best one.
12836  else
12837  {
12838  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12839  m_hAllocator->IsIntegratedGpu();
12840  }
12841 
12842  bool overlappingMoveSupported = !defragmentOnGpu;
12843 
12844  if(m_hAllocator->m_UseMutex)
12845  {
12846  m_Mutex.LockWrite();
12847  pCtx->mutexLocked = true;
12848  }
12849 
12850  pCtx->Begin(overlappingMoveSupported);
12851 
12852  // Defragment.
12853 
12854  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12855  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12856  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12857  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12858  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12859 
12860  // Accumulate statistics.
12861  if(pStats != VMA_NULL)
12862  {
12863  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12864  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12865  pStats->bytesMoved += bytesMoved;
12866  pStats->allocationsMoved += allocationsMoved;
12867  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12868  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12869  if(defragmentOnGpu)
12870  {
12871  maxGpuBytesToMove -= bytesMoved;
12872  maxGpuAllocationsToMove -= allocationsMoved;
12873  }
12874  else
12875  {
12876  maxCpuBytesToMove -= bytesMoved;
12877  maxCpuAllocationsToMove -= allocationsMoved;
12878  }
12879  }
12880 
12881  if(pCtx->res >= VK_SUCCESS)
12882  {
12883  if(defragmentOnGpu)
12884  {
12885  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12886  }
12887  else
12888  {
12889  ApplyDefragmentationMovesCpu(pCtx, moves);
12890  }
12891  }
12892  }
12893 }
12894 
12895 void VmaBlockVector::DefragmentationEnd(
12896  class VmaBlockVectorDefragmentationContext* pCtx,
12897  VmaDefragmentationStats* pStats)
12898 {
12899  // Destroy buffers.
12900  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12901  {
12902  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12903  if(blockCtx.hBuffer)
12904  {
12905  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12906  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12907  }
12908  }
12909 
12910  if(pCtx->res >= VK_SUCCESS)
12911  {
12912  FreeEmptyBlocks(pStats);
12913  }
12914 
12915  if(pCtx->mutexLocked)
12916  {
12917  VMA_ASSERT(m_hAllocator->m_UseMutex);
12918  m_Mutex.UnlockWrite();
12919  }
12920 }
12921 
12922 size_t VmaBlockVector::CalcAllocationCount() const
12923 {
12924  size_t result = 0;
12925  for(size_t i = 0; i < m_Blocks.size(); ++i)
12926  {
12927  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12928  }
12929  return result;
12930 }
12931 
12932 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12933 {
12934  if(m_BufferImageGranularity == 1)
12935  {
12936  return false;
12937  }
12938  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12939  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12940  {
12941  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12942  VMA_ASSERT(m_Algorithm == 0);
12943  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12944  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12945  {
12946  return true;
12947  }
12948  }
12949  return false;
12950 }
12951 
12952 void VmaBlockVector::MakePoolAllocationsLost(
12953  uint32_t currentFrameIndex,
12954  size_t* pLostAllocationCount)
12955 {
12956  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12957  size_t lostAllocationCount = 0;
12958  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12959  {
12960  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12961  VMA_ASSERT(pBlock);
12962  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12963  }
12964  if(pLostAllocationCount != VMA_NULL)
12965  {
12966  *pLostAllocationCount = lostAllocationCount;
12967  }
12968 }
12969 
12970 VkResult VmaBlockVector::CheckCorruption()
12971 {
12972  if(!IsCorruptionDetectionEnabled())
12973  {
12974  return VK_ERROR_FEATURE_NOT_PRESENT;
12975  }
12976 
12977  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12978  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12979  {
12980  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12981  VMA_ASSERT(pBlock);
12982  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12983  if(res != VK_SUCCESS)
12984  {
12985  return res;
12986  }
12987  }
12988  return VK_SUCCESS;
12989 }
12990 
12991 void VmaBlockVector::AddStats(VmaStats* pStats)
12992 {
12993  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12994  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12995 
12996  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12997 
12998  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12999  {
13000  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13001  VMA_ASSERT(pBlock);
13002  VMA_HEAVY_ASSERT(pBlock->Validate());
13003  VmaStatInfo allocationStatInfo;
13004  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13005  VmaAddStatInfo(pStats->total, allocationStatInfo);
13006  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13007  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13008  }
13009 }
13010 
13012 // VmaDefragmentationAlgorithm_Generic members definition
13013 
13014 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13015  VmaAllocator hAllocator,
13016  VmaBlockVector* pBlockVector,
13017  uint32_t currentFrameIndex,
13018  bool overlappingMoveSupported) :
13019  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13020  m_AllocationCount(0),
13021  m_AllAllocations(false),
13022  m_BytesMoved(0),
13023  m_AllocationsMoved(0),
13024  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13025 {
13026  // Create block info for each block.
13027  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13028  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13029  {
13030  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13031  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13032  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13033  m_Blocks.push_back(pBlockInfo);
13034  }
13035 
13036  // Sort them by m_pBlock pointer value.
13037  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13038 }
13039 
13040 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13041 {
13042  for(size_t i = m_Blocks.size(); i--; )
13043  {
13044  vma_delete(m_hAllocator, m_Blocks[i]);
13045  }
13046 }
13047 
13048 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13049 {
13050  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13051  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13052  {
13053  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13054  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13055  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13056  {
13057  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13058  (*it)->m_Allocations.push_back(allocInfo);
13059  }
13060  else
13061  {
13062  VMA_ASSERT(0);
13063  }
13064 
13065  ++m_AllocationCount;
13066  }
13067 }
13068 
13069 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13070  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13071  VkDeviceSize maxBytesToMove,
13072  uint32_t maxAllocationsToMove)
13073 {
13074  if(m_Blocks.empty())
13075  {
13076  return VK_SUCCESS;
13077  }
13078 
13079  // This is a choice based on research.
13080  // Option 1:
13081  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13082  // Option 2:
13083  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13084  // Option 3:
13085  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13086 
13087  size_t srcBlockMinIndex = 0;
13088  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13089  /*
13090  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13091  {
13092  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13093  if(blocksWithNonMovableCount > 0)
13094  {
13095  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13096  }
13097  }
13098  */
13099 
13100  size_t srcBlockIndex = m_Blocks.size() - 1;
13101  size_t srcAllocIndex = SIZE_MAX;
13102  for(;;)
13103  {
13104  // 1. Find next allocation to move.
13105  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
13106  // 1.2. Then start from last to first m_Allocations.
13107  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
13108  {
13109  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
13110  {
13111  // Finished: no more allocations to process.
13112  if(srcBlockIndex == srcBlockMinIndex)
13113  {
13114  return VK_SUCCESS;
13115  }
13116  else
13117  {
13118  --srcBlockIndex;
13119  srcAllocIndex = SIZE_MAX;
13120  }
13121  }
13122  else
13123  {
13124  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
13125  }
13126  }
13127 
13128  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
13129  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
13130 
13131  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
13132  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
13133  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
13134  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
13135 
13136  // 2. Try to find new place for this allocation in preceding or current block.
13137  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
13138  {
13139  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
13140  VmaAllocationRequest dstAllocRequest;
13141  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
13142  m_CurrentFrameIndex,
13143  m_pBlockVector->GetFrameInUseCount(),
13144  m_pBlockVector->GetBufferImageGranularity(),
13145  size,
13146  alignment,
13147  false, // upperAddress
13148  suballocType,
13149  false, // canMakeOtherLost
13150  strategy,
13151  &dstAllocRequest) &&
13152  MoveMakesSense(
13153  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13154  {
13155  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13156 
13157  // Reached limit on number of allocations or bytes to move.
13158  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13159  (m_BytesMoved + size > maxBytesToMove))
13160  {
13161  return VK_SUCCESS;
13162  }
13163 
13164  VmaDefragmentationMove move;
13165  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13166  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13167  move.srcOffset = srcOffset;
13168  move.dstOffset = dstAllocRequest.offset;
13169  move.size = size;
13170  moves.push_back(move);
13171 
13172  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13173  dstAllocRequest,
13174  suballocType,
13175  size,
13176  allocInfo.m_hAllocation);
13177  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13178 
13179  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13180 
13181  if(allocInfo.m_pChanged != VMA_NULL)
13182  {
13183  *allocInfo.m_pChanged = VK_TRUE;
13184  }
13185 
13186  ++m_AllocationsMoved;
13187  m_BytesMoved += size;
13188 
13189  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13190 
13191  break;
13192  }
13193  }
13194 
13195  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13196 
13197  if(srcAllocIndex > 0)
13198  {
13199  --srcAllocIndex;
13200  }
13201  else
13202  {
13203  if(srcBlockIndex > 0)
13204  {
13205  --srcBlockIndex;
13206  srcAllocIndex = SIZE_MAX;
13207  }
13208  else
13209  {
13210  return VK_SUCCESS;
13211  }
13212  }
13213  }
13214 }
13215 
13216 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13217 {
13218  size_t result = 0;
13219  for(size_t i = 0; i < m_Blocks.size(); ++i)
13220  {
13221  if(m_Blocks[i]->m_HasNonMovableAllocations)
13222  {
13223  ++result;
13224  }
13225  }
13226  return result;
13227 }
13228 
13229 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13230  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13231  VkDeviceSize maxBytesToMove,
13232  uint32_t maxAllocationsToMove)
13233 {
13234  if(!m_AllAllocations && m_AllocationCount == 0)
13235  {
13236  return VK_SUCCESS;
13237  }
13238 
13239  const size_t blockCount = m_Blocks.size();
13240  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13241  {
13242  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13243 
13244  if(m_AllAllocations)
13245  {
13246  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13247  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13248  it != pMetadata->m_Suballocations.end();
13249  ++it)
13250  {
13251  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13252  {
13253  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13254  pBlockInfo->m_Allocations.push_back(allocInfo);
13255  }
13256  }
13257  }
13258 
13259  pBlockInfo->CalcHasNonMovableAllocations();
13260 
13261  // This is a choice based on research.
13262  // Option 1:
13263  pBlockInfo->SortAllocationsByOffsetDescending();
13264  // Option 2:
13265  //pBlockInfo->SortAllocationsBySizeDescending();
13266  }
13267 
13268  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13269  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13270 
13271  // This is a choice based on research.
13272  const uint32_t roundCount = 2;
13273 
13274  // Execute defragmentation rounds (the main part).
13275  VkResult result = VK_SUCCESS;
13276  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13277  {
13278  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
13279  }
13280 
13281  return result;
13282 }
13283 
13284 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13285  size_t dstBlockIndex, VkDeviceSize dstOffset,
13286  size_t srcBlockIndex, VkDeviceSize srcOffset)
13287 {
13288  if(dstBlockIndex < srcBlockIndex)
13289  {
13290  return true;
13291  }
13292  if(dstBlockIndex > srcBlockIndex)
13293  {
13294  return false;
13295  }
13296  if(dstOffset < srcOffset)
13297  {
13298  return true;
13299  }
13300  return false;
13301 }
13302 
13304 // VmaDefragmentationAlgorithm_Fast
13305 
13306 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13307  VmaAllocator hAllocator,
13308  VmaBlockVector* pBlockVector,
13309  uint32_t currentFrameIndex,
13310  bool overlappingMoveSupported) :
13311  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13312  m_OverlappingMoveSupported(overlappingMoveSupported),
13313  m_AllocationCount(0),
13314  m_AllAllocations(false),
13315  m_BytesMoved(0),
13316  m_AllocationsMoved(0),
13317  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13318 {
13319  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13320 
13321 }
13322 
13323 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13324 {
13325 }
13326 
13327 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13328  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13329  VkDeviceSize maxBytesToMove,
13330  uint32_t maxAllocationsToMove)
13331 {
13332  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13333 
13334  const size_t blockCount = m_pBlockVector->GetBlockCount();
13335  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13336  {
13337  return VK_SUCCESS;
13338  }
13339 
13340  PreprocessMetadata();
13341 
13342  // Sort blocks in order from most destination.
13343 
13344  m_BlockInfos.resize(blockCount);
13345  for(size_t i = 0; i < blockCount; ++i)
13346  {
13347  m_BlockInfos[i].origBlockIndex = i;
13348  }
13349 
13350  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13351  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13352  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13353  });
13354 
13355  // THE MAIN ALGORITHM
13356 
13357  FreeSpaceDatabase freeSpaceDb;
13358 
13359  size_t dstBlockInfoIndex = 0;
13360  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13361  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13362  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13363  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13364  VkDeviceSize dstOffset = 0;
13365 
13366  bool end = false;
13367  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13368  {
13369  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13370  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13371  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13372  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13373  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13374  {
13375  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13376  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13377  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13378  if(m_AllocationsMoved == maxAllocationsToMove ||
13379  m_BytesMoved + srcAllocSize > maxBytesToMove)
13380  {
13381  end = true;
13382  break;
13383  }
13384  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13385 
13386  // Try to place it in one of free spaces from the database.
13387  size_t freeSpaceInfoIndex;
13388  VkDeviceSize dstAllocOffset;
13389  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13390  freeSpaceInfoIndex, dstAllocOffset))
13391  {
13392  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13393  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13394  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13395 
13396  // Same block
13397  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13398  {
13399  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13400 
13401  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13402 
13403  VmaSuballocation suballoc = *srcSuballocIt;
13404  suballoc.offset = dstAllocOffset;
13405  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13406  m_BytesMoved += srcAllocSize;
13407  ++m_AllocationsMoved;
13408 
13409  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13410  ++nextSuballocIt;
13411  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13412  srcSuballocIt = nextSuballocIt;
13413 
13414  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13415 
13416  VmaDefragmentationMove move = {
13417  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13418  srcAllocOffset, dstAllocOffset,
13419  srcAllocSize };
13420  moves.push_back(move);
13421  }
13422  // Different block
13423  else
13424  {
13425  // MOVE OPTION 2: Move the allocation to a different block.
13426 
13427  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13428 
13429  VmaSuballocation suballoc = *srcSuballocIt;
13430  suballoc.offset = dstAllocOffset;
13431  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13432  m_BytesMoved += srcAllocSize;
13433  ++m_AllocationsMoved;
13434 
13435  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13436  ++nextSuballocIt;
13437  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13438  srcSuballocIt = nextSuballocIt;
13439 
13440  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13441 
13442  VmaDefragmentationMove move = {
13443  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13444  srcAllocOffset, dstAllocOffset,
13445  srcAllocSize };
13446  moves.push_back(move);
13447  }
13448  }
13449  else
13450  {
13451  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13452 
13453  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13454  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13455  dstAllocOffset + srcAllocSize > dstBlockSize)
13456  {
13457  // But before that, register remaining free space at the end of dst block.
13458  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13459 
13460  ++dstBlockInfoIndex;
13461  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13462  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13463  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13464  dstBlockSize = pDstMetadata->GetSize();
13465  dstOffset = 0;
13466  dstAllocOffset = 0;
13467  }
13468 
13469  // Same block
13470  if(dstBlockInfoIndex == srcBlockInfoIndex)
13471  {
13472  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13473 
13474  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13475 
13476  bool skipOver = overlap;
13477  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13478  {
13479  // If destination and source place overlap, skip if it would move it
13480  // by only < 1/64 of its size.
13481  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13482  }
13483 
13484  if(skipOver)
13485  {
13486  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13487 
13488  dstOffset = srcAllocOffset + srcAllocSize;
13489  ++srcSuballocIt;
13490  }
13491  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13492  else
13493  {
13494  srcSuballocIt->offset = dstAllocOffset;
13495  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13496  dstOffset = dstAllocOffset + srcAllocSize;
13497  m_BytesMoved += srcAllocSize;
13498  ++m_AllocationsMoved;
13499  ++srcSuballocIt;
13500  VmaDefragmentationMove move = {
13501  srcOrigBlockIndex, dstOrigBlockIndex,
13502  srcAllocOffset, dstAllocOffset,
13503  srcAllocSize };
13504  moves.push_back(move);
13505  }
13506  }
13507  // Different block
13508  else
13509  {
13510  // MOVE OPTION 2: Move the allocation to a different block.
13511 
13512  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13513  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13514 
13515  VmaSuballocation suballoc = *srcSuballocIt;
13516  suballoc.offset = dstAllocOffset;
13517  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13518  dstOffset = dstAllocOffset + srcAllocSize;
13519  m_BytesMoved += srcAllocSize;
13520  ++m_AllocationsMoved;
13521 
13522  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13523  ++nextSuballocIt;
13524  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13525  srcSuballocIt = nextSuballocIt;
13526 
13527  pDstMetadata->m_Suballocations.push_back(suballoc);
13528 
13529  VmaDefragmentationMove move = {
13530  srcOrigBlockIndex, dstOrigBlockIndex,
13531  srcAllocOffset, dstAllocOffset,
13532  srcAllocSize };
13533  moves.push_back(move);
13534  }
13535  }
13536  }
13537  }
13538 
13539  m_BlockInfos.clear();
13540 
13541  PostprocessMetadata();
13542 
13543  return VK_SUCCESS;
13544 }
13545 
13546 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13547 {
13548  const size_t blockCount = m_pBlockVector->GetBlockCount();
13549  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13550  {
13551  VmaBlockMetadata_Generic* const pMetadata =
13552  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13553  pMetadata->m_FreeCount = 0;
13554  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13555  pMetadata->m_FreeSuballocationsBySize.clear();
13556  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13557  it != pMetadata->m_Suballocations.end(); )
13558  {
13559  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13560  {
13561  VmaSuballocationList::iterator nextIt = it;
13562  ++nextIt;
13563  pMetadata->m_Suballocations.erase(it);
13564  it = nextIt;
13565  }
13566  else
13567  {
13568  ++it;
13569  }
13570  }
13571  }
13572 }
13573 
13574 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13575 {
13576  const size_t blockCount = m_pBlockVector->GetBlockCount();
13577  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13578  {
13579  VmaBlockMetadata_Generic* const pMetadata =
13580  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13581  const VkDeviceSize blockSize = pMetadata->GetSize();
13582 
13583  // No allocations in this block - entire area is free.
13584  if(pMetadata->m_Suballocations.empty())
13585  {
13586  pMetadata->m_FreeCount = 1;
13587  //pMetadata->m_SumFreeSize is already set to blockSize.
13588  VmaSuballocation suballoc = {
13589  0, // offset
13590  blockSize, // size
13591  VMA_NULL, // hAllocation
13592  VMA_SUBALLOCATION_TYPE_FREE };
13593  pMetadata->m_Suballocations.push_back(suballoc);
13594  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13595  }
13596  // There are some allocations in this block.
13597  else
13598  {
13599  VkDeviceSize offset = 0;
13600  VmaSuballocationList::iterator it;
13601  for(it = pMetadata->m_Suballocations.begin();
13602  it != pMetadata->m_Suballocations.end();
13603  ++it)
13604  {
13605  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13606  VMA_ASSERT(it->offset >= offset);
13607 
13608  // Need to insert preceding free space.
13609  if(it->offset > offset)
13610  {
13611  ++pMetadata->m_FreeCount;
13612  const VkDeviceSize freeSize = it->offset - offset;
13613  VmaSuballocation suballoc = {
13614  offset, // offset
13615  freeSize, // size
13616  VMA_NULL, // hAllocation
13617  VMA_SUBALLOCATION_TYPE_FREE };
13618  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13619  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13620  {
13621  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13622  }
13623  }
13624 
13625  pMetadata->m_SumFreeSize -= it->size;
13626  offset = it->offset + it->size;
13627  }
13628 
13629  // Need to insert trailing free space.
13630  if(offset < blockSize)
13631  {
13632  ++pMetadata->m_FreeCount;
13633  const VkDeviceSize freeSize = blockSize - offset;
13634  VmaSuballocation suballoc = {
13635  offset, // offset
13636  freeSize, // size
13637  VMA_NULL, // hAllocation
13638  VMA_SUBALLOCATION_TYPE_FREE };
13639  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13640  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13641  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13642  {
13643  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13644  }
13645  }
13646 
13647  VMA_SORT(
13648  pMetadata->m_FreeSuballocationsBySize.begin(),
13649  pMetadata->m_FreeSuballocationsBySize.end(),
13650  VmaSuballocationItemSizeLess());
13651  }
13652 
13653  VMA_HEAVY_ASSERT(pMetadata->Validate());
13654  }
13655 }
13656 
13657 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13658 {
13659  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13660  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13661  while(it != pMetadata->m_Suballocations.end())
13662  {
13663  if(it->offset < suballoc.offset)
13664  {
13665  ++it;
13666  }
13667  }
13668  pMetadata->m_Suballocations.insert(it, suballoc);
13669 }
13670 
13672 // VmaBlockVectorDefragmentationContext
13673 
13674 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13675  VmaAllocator hAllocator,
13676  VmaPool hCustomPool,
13677  VmaBlockVector* pBlockVector,
13678  uint32_t currFrameIndex) :
13679  res(VK_SUCCESS),
13680  mutexLocked(false),
13681  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13682  m_hAllocator(hAllocator),
13683  m_hCustomPool(hCustomPool),
13684  m_pBlockVector(pBlockVector),
13685  m_CurrFrameIndex(currFrameIndex),
13686  m_pAlgorithm(VMA_NULL),
13687  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13688  m_AllAllocations(false)
13689 {
13690 }
13691 
13692 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13693 {
13694  vma_delete(m_hAllocator, m_pAlgorithm);
13695 }
13696 
13697 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13698 {
13699  AllocInfo info = { hAlloc, pChanged };
13700  m_Allocations.push_back(info);
13701 }
13702 
13703 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13704 {
13705  const bool allAllocations = m_AllAllocations ||
13706  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13707 
13708  /********************************
13709  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13710  ********************************/
13711 
13712  /*
13713  Fast algorithm is supported only when certain criteria are met:
13714  - VMA_DEBUG_MARGIN is 0.
13715  - All allocations in this block vector are moveable.
13716  - There is no possibility of image/buffer granularity conflict.
13717  */
13718  if(VMA_DEBUG_MARGIN == 0 &&
13719  allAllocations &&
13720  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13721  {
13722  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13723  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13724  }
13725  else
13726  {
13727  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13728  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13729  }
13730 
13731  if(allAllocations)
13732  {
13733  m_pAlgorithm->AddAll();
13734  }
13735  else
13736  {
13737  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13738  {
13739  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13740  }
13741  }
13742 }
13743 
13745 // VmaDefragmentationContext
13746 
13747 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13748  VmaAllocator hAllocator,
13749  uint32_t currFrameIndex,
13750  uint32_t flags,
13751  VmaDefragmentationStats* pStats) :
13752  m_hAllocator(hAllocator),
13753  m_CurrFrameIndex(currFrameIndex),
13754  m_Flags(flags),
13755  m_pStats(pStats),
13756  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13757 {
13758  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13759 }
13760 
13761 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13762 {
13763  for(size_t i = m_CustomPoolContexts.size(); i--; )
13764  {
13765  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13766  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13767  vma_delete(m_hAllocator, pBlockVectorCtx);
13768  }
13769  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13770  {
13771  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13772  if(pBlockVectorCtx)
13773  {
13774  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13775  vma_delete(m_hAllocator, pBlockVectorCtx);
13776  }
13777  }
13778 }
13779 
13780 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13781 {
13782  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13783  {
13784  VmaPool pool = pPools[poolIndex];
13785  VMA_ASSERT(pool);
13786  // Pools with algorithm other than default are not defragmented.
13787  if(pool->m_BlockVector.GetAlgorithm() == 0)
13788  {
13789  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13790 
13791  for(size_t i = m_CustomPoolContexts.size(); i--; )
13792  {
13793  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13794  {
13795  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13796  break;
13797  }
13798  }
13799 
13800  if(!pBlockVectorDefragCtx)
13801  {
13802  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13803  m_hAllocator,
13804  pool,
13805  &pool->m_BlockVector,
13806  m_CurrFrameIndex);
13807  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13808  }
13809 
13810  pBlockVectorDefragCtx->AddAll();
13811  }
13812  }
13813 }
13814 
13815 void VmaDefragmentationContext_T::AddAllocations(
13816  uint32_t allocationCount,
13817  VmaAllocation* pAllocations,
13818  VkBool32* pAllocationsChanged)
13819 {
13820  // Dispatch pAllocations among defragmentators. Create them when necessary.
13821  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13822  {
13823  const VmaAllocation hAlloc = pAllocations[allocIndex];
13824  VMA_ASSERT(hAlloc);
13825  // DedicatedAlloc cannot be defragmented.
13826  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13827  // Lost allocation cannot be defragmented.
13828  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13829  {
13830  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13831 
13832  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13833  // This allocation belongs to custom pool.
13834  if(hAllocPool != VK_NULL_HANDLE)
13835  {
13836  // Pools with algorithm other than default are not defragmented.
13837  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13838  {
13839  for(size_t i = m_CustomPoolContexts.size(); i--; )
13840  {
13841  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13842  {
13843  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13844  break;
13845  }
13846  }
13847  if(!pBlockVectorDefragCtx)
13848  {
13849  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13850  m_hAllocator,
13851  hAllocPool,
13852  &hAllocPool->m_BlockVector,
13853  m_CurrFrameIndex);
13854  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13855  }
13856  }
13857  }
13858  // This allocation belongs to default pool.
13859  else
13860  {
13861  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13862  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13863  if(!pBlockVectorDefragCtx)
13864  {
13865  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13866  m_hAllocator,
13867  VMA_NULL, // hCustomPool
13868  m_hAllocator->m_pBlockVectors[memTypeIndex],
13869  m_CurrFrameIndex);
13870  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13871  }
13872  }
13873 
13874  if(pBlockVectorDefragCtx)
13875  {
13876  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13877  &pAllocationsChanged[allocIndex] : VMA_NULL;
13878  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13879  }
13880  }
13881  }
13882 }
13883 
13884 VkResult VmaDefragmentationContext_T::Defragment(
13885  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13886  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13887  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13888 {
13889  if(pStats)
13890  {
13891  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13892  }
13893 
13894  if(commandBuffer == VK_NULL_HANDLE)
13895  {
13896  maxGpuBytesToMove = 0;
13897  maxGpuAllocationsToMove = 0;
13898  }
13899 
13900  VkResult res = VK_SUCCESS;
13901 
13902  // Process default pools.
13903  for(uint32_t memTypeIndex = 0;
13904  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13905  ++memTypeIndex)
13906  {
13907  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13908  if(pBlockVectorCtx)
13909  {
13910  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13911  pBlockVectorCtx->GetBlockVector()->Defragment(
13912  pBlockVectorCtx,
13913  pStats,
13914  maxCpuBytesToMove, maxCpuAllocationsToMove,
13915  maxGpuBytesToMove, maxGpuAllocationsToMove,
13916  commandBuffer);
13917  if(pBlockVectorCtx->res != VK_SUCCESS)
13918  {
13919  res = pBlockVectorCtx->res;
13920  }
13921  }
13922  }
13923 
13924  // Process custom pools.
13925  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13926  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13927  ++customCtxIndex)
13928  {
13929  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13930  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13931  pBlockVectorCtx->GetBlockVector()->Defragment(
13932  pBlockVectorCtx,
13933  pStats,
13934  maxCpuBytesToMove, maxCpuAllocationsToMove,
13935  maxGpuBytesToMove, maxGpuAllocationsToMove,
13936  commandBuffer);
13937  if(pBlockVectorCtx->res != VK_SUCCESS)
13938  {
13939  res = pBlockVectorCtx->res;
13940  }
13941  }
13942 
13943  return res;
13944 }
13945 
13947 // VmaRecorder
13948 
13949 #if VMA_RECORDING_ENABLED
13950 
13951 VmaRecorder::VmaRecorder() :
13952  m_UseMutex(true),
13953  m_Flags(0),
13954  m_File(VMA_NULL),
13955  m_Freq(INT64_MAX),
13956  m_StartCounter(INT64_MAX)
13957 {
13958 }
13959 
13960 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13961 {
13962  m_UseMutex = useMutex;
13963  m_Flags = settings.flags;
13964 
13965  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13966  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13967 
13968  // Open file for writing.
13969  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13970  if(err != 0)
13971  {
13972  return VK_ERROR_INITIALIZATION_FAILED;
13973  }
13974 
13975  // Write header.
13976  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13977  fprintf(m_File, "%s\n", "1,8");
13978 
13979  return VK_SUCCESS;
13980 }
13981 
13982 VmaRecorder::~VmaRecorder()
13983 {
13984  if(m_File != VMA_NULL)
13985  {
13986  fclose(m_File);
13987  }
13988 }
13989 
13990 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13991 {
13992  CallParams callParams;
13993  GetBasicParams(callParams);
13994 
13995  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13996  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13997  Flush();
13998 }
13999 
14000 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
14001 {
14002  CallParams callParams;
14003  GetBasicParams(callParams);
14004 
14005  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14006  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
14007  Flush();
14008 }
14009 
14010 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
14011 {
14012  CallParams callParams;
14013  GetBasicParams(callParams);
14014 
14015  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14016  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
14017  createInfo.memoryTypeIndex,
14018  createInfo.flags,
14019  createInfo.blockSize,
14020  (uint64_t)createInfo.minBlockCount,
14021  (uint64_t)createInfo.maxBlockCount,
14022  createInfo.frameInUseCount,
14023  pool);
14024  Flush();
14025 }
14026 
14027 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
14028 {
14029  CallParams callParams;
14030  GetBasicParams(callParams);
14031 
14032  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14033  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
14034  pool);
14035  Flush();
14036 }
14037 
14038 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
14039  const VkMemoryRequirements& vkMemReq,
14040  const VmaAllocationCreateInfo& createInfo,
14041  VmaAllocation allocation)
14042 {
14043  CallParams callParams;
14044  GetBasicParams(callParams);
14045 
14046  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14047  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14048  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14049  vkMemReq.size,
14050  vkMemReq.alignment,
14051  vkMemReq.memoryTypeBits,
14052  createInfo.flags,
14053  createInfo.usage,
14054  createInfo.requiredFlags,
14055  createInfo.preferredFlags,
14056  createInfo.memoryTypeBits,
14057  createInfo.pool,
14058  allocation,
14059  userDataStr.GetString());
14060  Flush();
14061 }
14062 
14063 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
14064  const VkMemoryRequirements& vkMemReq,
14065  const VmaAllocationCreateInfo& createInfo,
14066  uint64_t allocationCount,
14067  const VmaAllocation* pAllocations)
14068 {
14069  CallParams callParams;
14070  GetBasicParams(callParams);
14071 
14072  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14073  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14074  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
14075  vkMemReq.size,
14076  vkMemReq.alignment,
14077  vkMemReq.memoryTypeBits,
14078  createInfo.flags,
14079  createInfo.usage,
14080  createInfo.requiredFlags,
14081  createInfo.preferredFlags,
14082  createInfo.memoryTypeBits,
14083  createInfo.pool);
14084  PrintPointerList(allocationCount, pAllocations);
14085  fprintf(m_File, ",%s\n", userDataStr.GetString());
14086  Flush();
14087 }
14088 
14089 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
14090  const VkMemoryRequirements& vkMemReq,
14091  bool requiresDedicatedAllocation,
14092  bool prefersDedicatedAllocation,
14093  const VmaAllocationCreateInfo& createInfo,
14094  VmaAllocation allocation)
14095 {
14096  CallParams callParams;
14097  GetBasicParams(callParams);
14098 
14099  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14100  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14101  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14102  vkMemReq.size,
14103  vkMemReq.alignment,
14104  vkMemReq.memoryTypeBits,
14105  requiresDedicatedAllocation ? 1 : 0,
14106  prefersDedicatedAllocation ? 1 : 0,
14107  createInfo.flags,
14108  createInfo.usage,
14109  createInfo.requiredFlags,
14110  createInfo.preferredFlags,
14111  createInfo.memoryTypeBits,
14112  createInfo.pool,
14113  allocation,
14114  userDataStr.GetString());
14115  Flush();
14116 }
14117 
14118 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
14119  const VkMemoryRequirements& vkMemReq,
14120  bool requiresDedicatedAllocation,
14121  bool prefersDedicatedAllocation,
14122  const VmaAllocationCreateInfo& createInfo,
14123  VmaAllocation allocation)
14124 {
14125  CallParams callParams;
14126  GetBasicParams(callParams);
14127 
14128  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14129  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
14130  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14131  vkMemReq.size,
14132  vkMemReq.alignment,
14133  vkMemReq.memoryTypeBits,
14134  requiresDedicatedAllocation ? 1 : 0,
14135  prefersDedicatedAllocation ? 1 : 0,
14136  createInfo.flags,
14137  createInfo.usage,
14138  createInfo.requiredFlags,
14139  createInfo.preferredFlags,
14140  createInfo.memoryTypeBits,
14141  createInfo.pool,
14142  allocation,
14143  userDataStr.GetString());
14144  Flush();
14145 }
14146 
14147 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
14148  VmaAllocation allocation)
14149 {
14150  CallParams callParams;
14151  GetBasicParams(callParams);
14152 
14153  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14154  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14155  allocation);
14156  Flush();
14157 }
14158 
14159 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14160  uint64_t allocationCount,
14161  const VmaAllocation* pAllocations)
14162 {
14163  CallParams callParams;
14164  GetBasicParams(callParams);
14165 
14166  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14167  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14168  PrintPointerList(allocationCount, pAllocations);
14169  fprintf(m_File, "\n");
14170  Flush();
14171 }
14172 
14173 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14174  VmaAllocation allocation,
14175  const void* pUserData)
14176 {
14177  CallParams callParams;
14178  GetBasicParams(callParams);
14179 
14180  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14181  UserDataString userDataStr(
14182  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14183  pUserData);
14184  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14185  allocation,
14186  userDataStr.GetString());
14187  Flush();
14188 }
14189 
14190 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14191  VmaAllocation allocation)
14192 {
14193  CallParams callParams;
14194  GetBasicParams(callParams);
14195 
14196  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14197  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14198  allocation);
14199  Flush();
14200 }
14201 
14202 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14203  VmaAllocation allocation)
14204 {
14205  CallParams callParams;
14206  GetBasicParams(callParams);
14207 
14208  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14209  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14210  allocation);
14211  Flush();
14212 }
14213 
14214 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14215  VmaAllocation allocation)
14216 {
14217  CallParams callParams;
14218  GetBasicParams(callParams);
14219 
14220  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14221  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14222  allocation);
14223  Flush();
14224 }
14225 
14226 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14227  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14228 {
14229  CallParams callParams;
14230  GetBasicParams(callParams);
14231 
14232  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14233  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14234  allocation,
14235  offset,
14236  size);
14237  Flush();
14238 }
14239 
14240 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14241  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14242 {
14243  CallParams callParams;
14244  GetBasicParams(callParams);
14245 
14246  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14247  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14248  allocation,
14249  offset,
14250  size);
14251  Flush();
14252 }
14253 
14254 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14255  const VkBufferCreateInfo& bufCreateInfo,
14256  const VmaAllocationCreateInfo& allocCreateInfo,
14257  VmaAllocation allocation)
14258 {
14259  CallParams callParams;
14260  GetBasicParams(callParams);
14261 
14262  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14263  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14264  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14265  bufCreateInfo.flags,
14266  bufCreateInfo.size,
14267  bufCreateInfo.usage,
14268  bufCreateInfo.sharingMode,
14269  allocCreateInfo.flags,
14270  allocCreateInfo.usage,
14271  allocCreateInfo.requiredFlags,
14272  allocCreateInfo.preferredFlags,
14273  allocCreateInfo.memoryTypeBits,
14274  allocCreateInfo.pool,
14275  allocation,
14276  userDataStr.GetString());
14277  Flush();
14278 }
14279 
14280 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14281  const VkImageCreateInfo& imageCreateInfo,
14282  const VmaAllocationCreateInfo& allocCreateInfo,
14283  VmaAllocation allocation)
14284 {
14285  CallParams callParams;
14286  GetBasicParams(callParams);
14287 
14288  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14289  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14290  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14291  imageCreateInfo.flags,
14292  imageCreateInfo.imageType,
14293  imageCreateInfo.format,
14294  imageCreateInfo.extent.width,
14295  imageCreateInfo.extent.height,
14296  imageCreateInfo.extent.depth,
14297  imageCreateInfo.mipLevels,
14298  imageCreateInfo.arrayLayers,
14299  imageCreateInfo.samples,
14300  imageCreateInfo.tiling,
14301  imageCreateInfo.usage,
14302  imageCreateInfo.sharingMode,
14303  imageCreateInfo.initialLayout,
14304  allocCreateInfo.flags,
14305  allocCreateInfo.usage,
14306  allocCreateInfo.requiredFlags,
14307  allocCreateInfo.preferredFlags,
14308  allocCreateInfo.memoryTypeBits,
14309  allocCreateInfo.pool,
14310  allocation,
14311  userDataStr.GetString());
14312  Flush();
14313 }
14314 
14315 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14316  VmaAllocation allocation)
14317 {
14318  CallParams callParams;
14319  GetBasicParams(callParams);
14320 
14321  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14322  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14323  allocation);
14324  Flush();
14325 }
14326 
14327 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14328  VmaAllocation allocation)
14329 {
14330  CallParams callParams;
14331  GetBasicParams(callParams);
14332 
14333  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14334  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14335  allocation);
14336  Flush();
14337 }
14338 
14339 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14340  VmaAllocation allocation)
14341 {
14342  CallParams callParams;
14343  GetBasicParams(callParams);
14344 
14345  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14346  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14347  allocation);
14348  Flush();
14349 }
14350 
14351 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14352  VmaAllocation allocation)
14353 {
14354  CallParams callParams;
14355  GetBasicParams(callParams);
14356 
14357  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14358  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14359  allocation);
14360  Flush();
14361 }
14362 
14363 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14364  VmaPool pool)
14365 {
14366  CallParams callParams;
14367  GetBasicParams(callParams);
14368 
14369  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14370  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14371  pool);
14372  Flush();
14373 }
14374 
14375 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14376  const VmaDefragmentationInfo2& info,
14378 {
14379  CallParams callParams;
14380  GetBasicParams(callParams);
14381 
14382  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14383  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14384  info.flags);
14385  PrintPointerList(info.allocationCount, info.pAllocations);
14386  fprintf(m_File, ",");
14387  PrintPointerList(info.poolCount, info.pPools);
14388  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14389  info.maxCpuBytesToMove,
14391  info.maxGpuBytesToMove,
14393  info.commandBuffer,
14394  ctx);
14395  Flush();
14396 }
14397 
14398 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14400 {
14401  CallParams callParams;
14402  GetBasicParams(callParams);
14403 
14404  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14405  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14406  ctx);
14407  Flush();
14408 }
14409 
14410 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
14411  VmaPool pool,
14412  const char* name)
14413 {
14414  CallParams callParams;
14415  GetBasicParams(callParams);
14416 
14417  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14418  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14419  pool, name != VMA_NULL ? name : "");
14420  Flush();
14421 }
14422 
14423 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14424 {
14425  if(pUserData != VMA_NULL)
14426  {
14427  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14428  {
14429  m_Str = (const char*)pUserData;
14430  }
14431  else
14432  {
14433  sprintf_s(m_PtrStr, "%p", pUserData);
14434  m_Str = m_PtrStr;
14435  }
14436  }
14437  else
14438  {
14439  m_Str = "";
14440  }
14441 }
14442 
14443 void VmaRecorder::WriteConfiguration(
14444  const VkPhysicalDeviceProperties& devProps,
14445  const VkPhysicalDeviceMemoryProperties& memProps,
14446  uint32_t vulkanApiVersion,
14447  bool dedicatedAllocationExtensionEnabled,
14448  bool bindMemory2ExtensionEnabled,
14449  bool memoryBudgetExtensionEnabled)
14450 {
14451  fprintf(m_File, "Config,Begin\n");
14452 
14453  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
14454 
14455  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14456  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14457  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14458  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14459  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14460  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14461 
14462  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14463  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14464  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14465 
14466  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14467  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14468  {
14469  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14470  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14471  }
14472  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14473  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14474  {
14475  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14476  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14477  }
14478 
14479  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14480  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14481  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
14482 
14483  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14484  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14485  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14486  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14487  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14488  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14489  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14490  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14491  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14492 
14493  fprintf(m_File, "Config,End\n");
14494 }
14495 
14496 void VmaRecorder::GetBasicParams(CallParams& outParams)
14497 {
14498  outParams.threadId = GetCurrentThreadId();
14499 
14500  LARGE_INTEGER counter;
14501  QueryPerformanceCounter(&counter);
14502  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14503 }
14504 
14505 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14506 {
14507  if(count)
14508  {
14509  fprintf(m_File, "%p", pItems[0]);
14510  for(uint64_t i = 1; i < count; ++i)
14511  {
14512  fprintf(m_File, " %p", pItems[i]);
14513  }
14514  }
14515 }
14516 
14517 void VmaRecorder::Flush()
14518 {
14519  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14520  {
14521  fflush(m_File);
14522  }
14523 }
14524 
14525 #endif // #if VMA_RECORDING_ENABLED
14526 
14528 // VmaAllocationObjectAllocator
14529 
14530 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14531  m_Allocator(pAllocationCallbacks, 1024)
14532 {
14533 }
14534 
14535 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14536 {
14537  VmaMutexLock mutexLock(m_Mutex);
14538  return m_Allocator.Alloc();
14539 }
14540 
14541 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14542 {
14543  VmaMutexLock mutexLock(m_Mutex);
14544  m_Allocator.Free(hAlloc);
14545 }
14546 
14548 // VmaAllocator_T
14549 
14550 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14551  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14552  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
14553  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14554  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14555  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14556  m_hDevice(pCreateInfo->device),
14557  m_hInstance(pCreateInfo->instance),
14558  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14559  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14560  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14561  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14562  m_HeapSizeLimitMask(0),
14563  m_PreferredLargeHeapBlockSize(0),
14564  m_PhysicalDevice(pCreateInfo->physicalDevice),
14565  m_CurrentFrameIndex(0),
14566  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14567  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14568  m_NextPoolId(0)
14570  ,m_pRecorder(VMA_NULL)
14571 #endif
14572 {
14573  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14574  {
14575  m_UseKhrDedicatedAllocation = false;
14576  m_UseKhrBindMemory2 = false;
14577  }
14578 
14579  if(VMA_DEBUG_DETECT_CORRUPTION)
14580  {
14581  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14582  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14583  }
14584 
14585  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14586 
14587  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14588  {
14589 #if !(VMA_DEDICATED_ALLOCATION)
14591  {
14592  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14593  }
14594 #endif
14595 #if !(VMA_BIND_MEMORY2)
14596  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14597  {
14598  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14599  }
14600 #endif
14601  }
14602 #if !(VMA_MEMORY_BUDGET)
14603  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14604  {
14605  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14606  }
14607 #endif
14608 #if VMA_VULKAN_VERSION < 1001000
14609  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14610  {
14611  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14612  }
14613 #endif
14614 
14615  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14616  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14617  memset(&m_MemProps, 0, sizeof(m_MemProps));
14618 
14619  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14620  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14621  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14622 
14623  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14624  {
14625  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14626  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14627  }
14628 
14629  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14630 
14631  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14632  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14633 
14634  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14635  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14636  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14637  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14638 
14639  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14640  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14641 
14642  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14643  {
14644  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14645  {
14646  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14647  if(limit != VK_WHOLE_SIZE)
14648  {
14649  m_HeapSizeLimitMask |= 1u << heapIndex;
14650  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14651  {
14652  m_MemProps.memoryHeaps[heapIndex].size = limit;
14653  }
14654  }
14655  }
14656  }
14657 
14658  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14659  {
14660  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14661 
14662  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14663  this,
14664  VK_NULL_HANDLE, // hParentPool
14665  memTypeIndex,
14666  preferredBlockSize,
14667  0,
14668  SIZE_MAX,
14669  GetBufferImageGranularity(),
14670  pCreateInfo->frameInUseCount,
14671  false, // explicitBlockSize
14672  false); // linearAlgorithm
14673  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14674  // becase minBlockCount is 0.
14675  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14676 
14677  }
14678 }
14679 
14680 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14681 {
14682  VkResult res = VK_SUCCESS;
14683 
14684  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14685  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14686  {
14687 #if VMA_RECORDING_ENABLED
14688  m_pRecorder = vma_new(this, VmaRecorder)();
14689  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14690  if(res != VK_SUCCESS)
14691  {
14692  return res;
14693  }
14694  m_pRecorder->WriteConfiguration(
14695  m_PhysicalDeviceProperties,
14696  m_MemProps,
14697  m_VulkanApiVersion,
14698  m_UseKhrDedicatedAllocation,
14699  m_UseKhrBindMemory2,
14700  m_UseExtMemoryBudget);
14701  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14702 #else
14703  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14704  return VK_ERROR_FEATURE_NOT_PRESENT;
14705 #endif
14706  }
14707 
14708 #if VMA_MEMORY_BUDGET
14709  if(m_UseExtMemoryBudget)
14710  {
14711  UpdateVulkanBudget();
14712  }
14713 #endif // #if VMA_MEMORY_BUDGET
14714 
14715  return res;
14716 }
14717 
14718 VmaAllocator_T::~VmaAllocator_T()
14719 {
14720 #if VMA_RECORDING_ENABLED
14721  if(m_pRecorder != VMA_NULL)
14722  {
14723  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14724  vma_delete(this, m_pRecorder);
14725  }
14726 #endif
14727 
14728  VMA_ASSERT(m_Pools.empty());
14729 
14730  for(size_t i = GetMemoryTypeCount(); i--; )
14731  {
14732  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14733  {
14734  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14735  }
14736 
14737  vma_delete(this, m_pDedicatedAllocations[i]);
14738  vma_delete(this, m_pBlockVectors[i]);
14739  }
14740 }
14741 
14742 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14743 {
14744 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14745  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14746  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14747  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14748  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14749  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14750  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14751  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14752  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14753  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14754  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14755  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14756  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14757  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14758  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14759  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14760  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14761  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14762 #if VMA_VULKAN_VERSION >= 1001000
14763  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14764  {
14765  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14766  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14767  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2");
14768  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14769  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2");
14770  m_VulkanFunctions.vkBindBufferMemory2KHR =
14771  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2");
14772  m_VulkanFunctions.vkBindImageMemory2KHR =
14773  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2");
14774  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14775  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2");
14776  }
14777 #endif
14778 #if VMA_DEDICATED_ALLOCATION
14779  if(m_UseKhrDedicatedAllocation)
14780  {
14781  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14782  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14783  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14784  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14785  }
14786 #endif
14787 #if VMA_BIND_MEMORY2
14788  if(m_UseKhrBindMemory2)
14789  {
14790  m_VulkanFunctions.vkBindBufferMemory2KHR =
14791  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14792  m_VulkanFunctions.vkBindImageMemory2KHR =
14793  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14794  }
14795 #endif // #if VMA_BIND_MEMORY2
14796 #if VMA_MEMORY_BUDGET
14797  if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14798  {
14799  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14800  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14801  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
14802  }
14803 #endif // #if VMA_MEMORY_BUDGET
14804 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14805 
14806 #define VMA_COPY_IF_NOT_NULL(funcName) \
14807  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14808 
14809  if(pVulkanFunctions != VMA_NULL)
14810  {
14811  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14812  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14813  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14814  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14815  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14816  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14817  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14818  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14819  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14820  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14821  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14822  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14823  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14824  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14825  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14826  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14827  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14828 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14829  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14830  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14831 #endif
14832 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14833  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14834  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14835 #endif
14836 #if VMA_MEMORY_BUDGET
14837  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14838 #endif
14839  }
14840 
14841 #undef VMA_COPY_IF_NOT_NULL
14842 
14843  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14844  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14845  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14846  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14847  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14848  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14849  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14850  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14851  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14852  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14853  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14854  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14855  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14856  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14857  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14858  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14859  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14860  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14861  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14862 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14863  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14864  {
14865  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14866  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14867  }
14868 #endif
14869 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14870  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14871  {
14872  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14873  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14874  }
14875 #endif
14876 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14877  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14878  {
14879  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14880  }
14881 #endif
14882 }
14883 
14884 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14885 {
14886  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14887  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14888  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14889  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14890 }
14891 
14892 VkResult VmaAllocator_T::AllocateMemoryOfType(
14893  VkDeviceSize size,
14894  VkDeviceSize alignment,
14895  bool dedicatedAllocation,
14896  VkBuffer dedicatedBuffer,
14897  VkImage dedicatedImage,
14898  const VmaAllocationCreateInfo& createInfo,
14899  uint32_t memTypeIndex,
14900  VmaSuballocationType suballocType,
14901  size_t allocationCount,
14902  VmaAllocation* pAllocations)
14903 {
14904  VMA_ASSERT(pAllocations != VMA_NULL);
14905  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14906 
14907  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14908 
14909  // If memory type is not HOST_VISIBLE, disable MAPPED.
14910  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14911  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14912  {
14913  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14914  }
14915  // If memory is lazily allocated, it should be always dedicated.
14916  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
14917  {
14919  }
14920 
14921  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14922  VMA_ASSERT(blockVector);
14923 
14924  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14925  bool preferDedicatedMemory =
14926  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14927  dedicatedAllocation ||
14928  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14929  size > preferredBlockSize / 2;
14930 
14931  if(preferDedicatedMemory &&
14932  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14933  finalCreateInfo.pool == VK_NULL_HANDLE)
14934  {
14936  }
14937 
14938  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14939  {
14940  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14941  {
14942  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14943  }
14944  else
14945  {
14946  return AllocateDedicatedMemory(
14947  size,
14948  suballocType,
14949  memTypeIndex,
14950  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14951  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14952  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14953  finalCreateInfo.pUserData,
14954  dedicatedBuffer,
14955  dedicatedImage,
14956  allocationCount,
14957  pAllocations);
14958  }
14959  }
14960  else
14961  {
14962  VkResult res = blockVector->Allocate(
14963  m_CurrentFrameIndex.load(),
14964  size,
14965  alignment,
14966  finalCreateInfo,
14967  suballocType,
14968  allocationCount,
14969  pAllocations);
14970  if(res == VK_SUCCESS)
14971  {
14972  return res;
14973  }
14974 
14975  // 5. Try dedicated memory.
14976  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14977  {
14978  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14979  }
14980  else
14981  {
14982  res = AllocateDedicatedMemory(
14983  size,
14984  suballocType,
14985  memTypeIndex,
14986  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14987  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14988  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14989  finalCreateInfo.pUserData,
14990  dedicatedBuffer,
14991  dedicatedImage,
14992  allocationCount,
14993  pAllocations);
14994  if(res == VK_SUCCESS)
14995  {
14996  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14997  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14998  return VK_SUCCESS;
14999  }
15000  else
15001  {
15002  // Everything failed: Return error code.
15003  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15004  return res;
15005  }
15006  }
15007  }
15008 }
15009 
15010 VkResult VmaAllocator_T::AllocateDedicatedMemory(
15011  VkDeviceSize size,
15012  VmaSuballocationType suballocType,
15013  uint32_t memTypeIndex,
15014  bool withinBudget,
15015  bool map,
15016  bool isUserDataString,
15017  void* pUserData,
15018  VkBuffer dedicatedBuffer,
15019  VkImage dedicatedImage,
15020  size_t allocationCount,
15021  VmaAllocation* pAllocations)
15022 {
15023  VMA_ASSERT(allocationCount > 0 && pAllocations);
15024 
15025  if(withinBudget)
15026  {
15027  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15028  VmaBudget heapBudget = {};
15029  GetBudget(&heapBudget, heapIndex, 1);
15030  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
15031  {
15032  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15033  }
15034  }
15035 
15036  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
15037  allocInfo.memoryTypeIndex = memTypeIndex;
15038  allocInfo.allocationSize = size;
15039 
15040 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15041  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
15042  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15043  {
15044  if(dedicatedBuffer != VK_NULL_HANDLE)
15045  {
15046  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
15047  dedicatedAllocInfo.buffer = dedicatedBuffer;
15048  allocInfo.pNext = &dedicatedAllocInfo;
15049  }
15050  else if(dedicatedImage != VK_NULL_HANDLE)
15051  {
15052  dedicatedAllocInfo.image = dedicatedImage;
15053  allocInfo.pNext = &dedicatedAllocInfo;
15054  }
15055  }
15056 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15057 
15058  size_t allocIndex;
15059  VkResult res = VK_SUCCESS;
15060  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15061  {
15062  res = AllocateDedicatedMemoryPage(
15063  size,
15064  suballocType,
15065  memTypeIndex,
15066  allocInfo,
15067  map,
15068  isUserDataString,
15069  pUserData,
15070  pAllocations + allocIndex);
15071  if(res != VK_SUCCESS)
15072  {
15073  break;
15074  }
15075  }
15076 
15077  if(res == VK_SUCCESS)
15078  {
15079  // Register them in m_pDedicatedAllocations.
15080  {
15081  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15082  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15083  VMA_ASSERT(pDedicatedAllocations);
15084  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15085  {
15086  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
15087  }
15088  }
15089 
15090  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
15091  }
15092  else
15093  {
15094  // Free all already created allocations.
15095  while(allocIndex--)
15096  {
15097  VmaAllocation currAlloc = pAllocations[allocIndex];
15098  VkDeviceMemory hMemory = currAlloc->GetMemory();
15099 
15100  /*
15101  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15102  before vkFreeMemory.
15103 
15104  if(currAlloc->GetMappedData() != VMA_NULL)
15105  {
15106  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15107  }
15108  */
15109 
15110  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
15111  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
15112  currAlloc->SetUserData(this, VMA_NULL);
15113  currAlloc->Dtor();
15114  m_AllocationObjectAllocator.Free(currAlloc);
15115  }
15116 
15117  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15118  }
15119 
15120  return res;
15121 }
15122 
15123 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
15124  VkDeviceSize size,
15125  VmaSuballocationType suballocType,
15126  uint32_t memTypeIndex,
15127  const VkMemoryAllocateInfo& allocInfo,
15128  bool map,
15129  bool isUserDataString,
15130  void* pUserData,
15131  VmaAllocation* pAllocation)
15132 {
15133  VkDeviceMemory hMemory = VK_NULL_HANDLE;
15134  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
15135  if(res < 0)
15136  {
15137  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
15138  return res;
15139  }
15140 
15141  void* pMappedData = VMA_NULL;
15142  if(map)
15143  {
15144  res = (*m_VulkanFunctions.vkMapMemory)(
15145  m_hDevice,
15146  hMemory,
15147  0,
15148  VK_WHOLE_SIZE,
15149  0,
15150  &pMappedData);
15151  if(res < 0)
15152  {
15153  VMA_DEBUG_LOG(" vkMapMemory FAILED");
15154  FreeVulkanMemory(memTypeIndex, size, hMemory);
15155  return res;
15156  }
15157  }
15158 
15159  *pAllocation = m_AllocationObjectAllocator.Allocate();
15160  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
15161  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
15162  (*pAllocation)->SetUserData(this, pUserData);
15163  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
15164  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15165  {
15166  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
15167  }
15168 
15169  return VK_SUCCESS;
15170 }
15171 
15172 void VmaAllocator_T::GetBufferMemoryRequirements(
15173  VkBuffer hBuffer,
15174  VkMemoryRequirements& memReq,
15175  bool& requiresDedicatedAllocation,
15176  bool& prefersDedicatedAllocation) const
15177 {
15178 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15179  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15180  {
15181  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
15182  memReqInfo.buffer = hBuffer;
15183 
15184  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15185 
15186  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15187  memReq2.pNext = &memDedicatedReq;
15188 
15189  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15190 
15191  memReq = memReq2.memoryRequirements;
15192  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15193  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15194  }
15195  else
15196 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15197  {
15198  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
15199  requiresDedicatedAllocation = false;
15200  prefersDedicatedAllocation = false;
15201  }
15202 }
15203 
15204 void VmaAllocator_T::GetImageMemoryRequirements(
15205  VkImage hImage,
15206  VkMemoryRequirements& memReq,
15207  bool& requiresDedicatedAllocation,
15208  bool& prefersDedicatedAllocation) const
15209 {
15210 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15211  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15212  {
15213  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15214  memReqInfo.image = hImage;
15215 
15216  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15217 
15218  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15219  memReq2.pNext = &memDedicatedReq;
15220 
15221  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15222 
15223  memReq = memReq2.memoryRequirements;
15224  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15225  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15226  }
15227  else
15228 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15229  {
15230  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15231  requiresDedicatedAllocation = false;
15232  prefersDedicatedAllocation = false;
15233  }
15234 }
15235 
15236 VkResult VmaAllocator_T::AllocateMemory(
15237  const VkMemoryRequirements& vkMemReq,
15238  bool requiresDedicatedAllocation,
15239  bool prefersDedicatedAllocation,
15240  VkBuffer dedicatedBuffer,
15241  VkImage dedicatedImage,
15242  const VmaAllocationCreateInfo& createInfo,
15243  VmaSuballocationType suballocType,
15244  size_t allocationCount,
15245  VmaAllocation* pAllocations)
15246 {
15247  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15248 
15249  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15250 
15251  if(vkMemReq.size == 0)
15252  {
15253  return VK_ERROR_VALIDATION_FAILED_EXT;
15254  }
15255  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15256  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15257  {
15258  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15259  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15260  }
15261  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15263  {
15264  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15265  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15266  }
15267  if(requiresDedicatedAllocation)
15268  {
15269  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15270  {
15271  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15273  }
15274  if(createInfo.pool != VK_NULL_HANDLE)
15275  {
15276  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15277  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15278  }
15279  }
15280  if((createInfo.pool != VK_NULL_HANDLE) &&
15281  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15282  {
15283  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15284  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15285  }
15286 
15287  if(createInfo.pool != VK_NULL_HANDLE)
15288  {
15289  const VkDeviceSize alignmentForPool = VMA_MAX(
15290  vkMemReq.alignment,
15291  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15292 
15293  VmaAllocationCreateInfo createInfoForPool = createInfo;
15294  // If memory type is not HOST_VISIBLE, disable MAPPED.
15295  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15296  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15297  {
15298  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15299  }
15300 
15301  return createInfo.pool->m_BlockVector.Allocate(
15302  m_CurrentFrameIndex.load(),
15303  vkMemReq.size,
15304  alignmentForPool,
15305  createInfoForPool,
15306  suballocType,
15307  allocationCount,
15308  pAllocations);
15309  }
15310  else
15311  {
15312  // Bit mask of memory Vulkan types acceptable for this allocation.
15313  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15314  uint32_t memTypeIndex = UINT32_MAX;
15315  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15316  if(res == VK_SUCCESS)
15317  {
15318  VkDeviceSize alignmentForMemType = VMA_MAX(
15319  vkMemReq.alignment,
15320  GetMemoryTypeMinAlignment(memTypeIndex));
15321 
15322  res = AllocateMemoryOfType(
15323  vkMemReq.size,
15324  alignmentForMemType,
15325  requiresDedicatedAllocation || prefersDedicatedAllocation,
15326  dedicatedBuffer,
15327  dedicatedImage,
15328  createInfo,
15329  memTypeIndex,
15330  suballocType,
15331  allocationCount,
15332  pAllocations);
15333  // Succeeded on first try.
15334  if(res == VK_SUCCESS)
15335  {
15336  return res;
15337  }
15338  // Allocation from this memory type failed. Try other compatible memory types.
15339  else
15340  {
15341  for(;;)
15342  {
15343  // Remove old memTypeIndex from list of possibilities.
15344  memoryTypeBits &= ~(1u << memTypeIndex);
15345  // Find alternative memTypeIndex.
15346  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15347  if(res == VK_SUCCESS)
15348  {
15349  alignmentForMemType = VMA_MAX(
15350  vkMemReq.alignment,
15351  GetMemoryTypeMinAlignment(memTypeIndex));
15352 
15353  res = AllocateMemoryOfType(
15354  vkMemReq.size,
15355  alignmentForMemType,
15356  requiresDedicatedAllocation || prefersDedicatedAllocation,
15357  dedicatedBuffer,
15358  dedicatedImage,
15359  createInfo,
15360  memTypeIndex,
15361  suballocType,
15362  allocationCount,
15363  pAllocations);
15364  // Allocation from this alternative memory type succeeded.
15365  if(res == VK_SUCCESS)
15366  {
15367  return res;
15368  }
15369  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15370  }
15371  // No other matching memory type index could be found.
15372  else
15373  {
15374  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15375  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15376  }
15377  }
15378  }
15379  }
15380  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15381  else
15382  return res;
15383  }
15384 }
15385 
15386 void VmaAllocator_T::FreeMemory(
15387  size_t allocationCount,
15388  const VmaAllocation* pAllocations)
15389 {
15390  VMA_ASSERT(pAllocations);
15391 
15392  for(size_t allocIndex = allocationCount; allocIndex--; )
15393  {
15394  VmaAllocation allocation = pAllocations[allocIndex];
15395 
15396  if(allocation != VK_NULL_HANDLE)
15397  {
15398  if(TouchAllocation(allocation))
15399  {
15400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15401  {
15402  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15403  }
15404 
15405  switch(allocation->GetType())
15406  {
15407  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15408  {
15409  VmaBlockVector* pBlockVector = VMA_NULL;
15410  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15411  if(hPool != VK_NULL_HANDLE)
15412  {
15413  pBlockVector = &hPool->m_BlockVector;
15414  }
15415  else
15416  {
15417  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15418  pBlockVector = m_pBlockVectors[memTypeIndex];
15419  }
15420  pBlockVector->Free(allocation);
15421  }
15422  break;
15423  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15424  FreeDedicatedMemory(allocation);
15425  break;
15426  default:
15427  VMA_ASSERT(0);
15428  }
15429  }
15430 
15431  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
15432  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15433  allocation->SetUserData(this, VMA_NULL);
15434  allocation->Dtor();
15435  m_AllocationObjectAllocator.Free(allocation);
15436  }
15437  }
15438 }
15439 
15440 VkResult VmaAllocator_T::ResizeAllocation(
15441  const VmaAllocation alloc,
15442  VkDeviceSize newSize)
15443 {
15444  // This function is deprecated and so it does nothing. It's left for backward compatibility.
15445  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
15446  {
15447  return VK_ERROR_VALIDATION_FAILED_EXT;
15448  }
15449  if(newSize == alloc->GetSize())
15450  {
15451  return VK_SUCCESS;
15452  }
15453  return VK_ERROR_OUT_OF_POOL_MEMORY;
15454 }
15455 
15456 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15457 {
15458  // Initialize.
15459  InitStatInfo(pStats->total);
15460  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15461  InitStatInfo(pStats->memoryType[i]);
15462  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15463  InitStatInfo(pStats->memoryHeap[i]);
15464 
15465  // Process default pools.
15466  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15467  {
15468  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15469  VMA_ASSERT(pBlockVector);
15470  pBlockVector->AddStats(pStats);
15471  }
15472 
15473  // Process custom pools.
15474  {
15475  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15476  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15477  {
15478  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15479  }
15480  }
15481 
15482  // Process dedicated allocations.
15483  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15484  {
15485  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15486  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15487  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15488  VMA_ASSERT(pDedicatedAllocVector);
15489  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15490  {
15491  VmaStatInfo allocationStatInfo;
15492  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15493  VmaAddStatInfo(pStats->total, allocationStatInfo);
15494  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15495  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15496  }
15497  }
15498 
15499  // Postprocess.
15500  VmaPostprocessCalcStatInfo(pStats->total);
15501  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15502  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15503  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15504  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15505 }
15506 
15507 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
15508 {
15509 #if VMA_MEMORY_BUDGET
15510  if(m_UseExtMemoryBudget)
15511  {
15512  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15513  {
15514  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15515  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15516  {
15517  const uint32_t heapIndex = firstHeap + i;
15518 
15519  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15520  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15521 
15522  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15523  {
15524  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
15525  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15526  }
15527  else
15528  {
15529  outBudget->usage = 0;
15530  }
15531 
15532  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15533  outBudget->budget = VMA_MIN(
15534  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15535  }
15536  }
15537  else
15538  {
15539  UpdateVulkanBudget(); // Outside of mutex lock
15540  GetBudget(outBudget, firstHeap, heapCount); // Recursion
15541  }
15542  }
15543  else
15544 #endif
15545  {
15546  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15547  {
15548  const uint32_t heapIndex = firstHeap + i;
15549 
15550  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15551  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15552 
15553  outBudget->usage = outBudget->blockBytes;
15554  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15555  }
15556  }
15557 }
15558 
15559 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15560 
15561 VkResult VmaAllocator_T::DefragmentationBegin(
15562  const VmaDefragmentationInfo2& info,
15563  VmaDefragmentationStats* pStats,
15564  VmaDefragmentationContext* pContext)
15565 {
15566  if(info.pAllocationsChanged != VMA_NULL)
15567  {
15568  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15569  }
15570 
15571  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15572  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15573 
15574  (*pContext)->AddPools(info.poolCount, info.pPools);
15575  (*pContext)->AddAllocations(
15577 
15578  VkResult res = (*pContext)->Defragment(
15581  info.commandBuffer, pStats);
15582 
15583  if(res != VK_NOT_READY)
15584  {
15585  vma_delete(this, *pContext);
15586  *pContext = VMA_NULL;
15587  }
15588 
15589  return res;
15590 }
15591 
15592 VkResult VmaAllocator_T::DefragmentationEnd(
15593  VmaDefragmentationContext context)
15594 {
15595  vma_delete(this, context);
15596  return VK_SUCCESS;
15597 }
15598 
15599 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15600 {
15601  if(hAllocation->CanBecomeLost())
15602  {
15603  /*
15604  Warning: This is a carefully designed algorithm.
15605  Do not modify unless you really know what you're doing :)
15606  */
15607  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15608  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15609  for(;;)
15610  {
15611  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15612  {
15613  pAllocationInfo->memoryType = UINT32_MAX;
15614  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15615  pAllocationInfo->offset = 0;
15616  pAllocationInfo->size = hAllocation->GetSize();
15617  pAllocationInfo->pMappedData = VMA_NULL;
15618  pAllocationInfo->pUserData = hAllocation->GetUserData();
15619  return;
15620  }
15621  else if(localLastUseFrameIndex == localCurrFrameIndex)
15622  {
15623  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15624  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15625  pAllocationInfo->offset = hAllocation->GetOffset();
15626  pAllocationInfo->size = hAllocation->GetSize();
15627  pAllocationInfo->pMappedData = VMA_NULL;
15628  pAllocationInfo->pUserData = hAllocation->GetUserData();
15629  return;
15630  }
15631  else // Last use time earlier than current time.
15632  {
15633  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15634  {
15635  localLastUseFrameIndex = localCurrFrameIndex;
15636  }
15637  }
15638  }
15639  }
15640  else
15641  {
15642 #if VMA_STATS_STRING_ENABLED
15643  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15644  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15645  for(;;)
15646  {
15647  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15648  if(localLastUseFrameIndex == localCurrFrameIndex)
15649  {
15650  break;
15651  }
15652  else // Last use time earlier than current time.
15653  {
15654  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15655  {
15656  localLastUseFrameIndex = localCurrFrameIndex;
15657  }
15658  }
15659  }
15660 #endif
15661 
15662  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15663  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15664  pAllocationInfo->offset = hAllocation->GetOffset();
15665  pAllocationInfo->size = hAllocation->GetSize();
15666  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15667  pAllocationInfo->pUserData = hAllocation->GetUserData();
15668  }
15669 }
15670 
15671 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15672 {
15673  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15674  if(hAllocation->CanBecomeLost())
15675  {
15676  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15677  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15678  for(;;)
15679  {
15680  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15681  {
15682  return false;
15683  }
15684  else if(localLastUseFrameIndex == localCurrFrameIndex)
15685  {
15686  return true;
15687  }
15688  else // Last use time earlier than current time.
15689  {
15690  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15691  {
15692  localLastUseFrameIndex = localCurrFrameIndex;
15693  }
15694  }
15695  }
15696  }
15697  else
15698  {
15699 #if VMA_STATS_STRING_ENABLED
15700  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15701  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15702  for(;;)
15703  {
15704  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15705  if(localLastUseFrameIndex == localCurrFrameIndex)
15706  {
15707  break;
15708  }
15709  else // Last use time earlier than current time.
15710  {
15711  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15712  {
15713  localLastUseFrameIndex = localCurrFrameIndex;
15714  }
15715  }
15716  }
15717 #endif
15718 
15719  return true;
15720  }
15721 }
15722 
15723 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15724 {
15725  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15726 
15727  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15728 
15729  if(newCreateInfo.maxBlockCount == 0)
15730  {
15731  newCreateInfo.maxBlockCount = SIZE_MAX;
15732  }
15733  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15734  {
15735  return VK_ERROR_INITIALIZATION_FAILED;
15736  }
15737 
15738  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15739 
15740  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15741 
15742  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15743  if(res != VK_SUCCESS)
15744  {
15745  vma_delete(this, *pPool);
15746  *pPool = VMA_NULL;
15747  return res;
15748  }
15749 
15750  // Add to m_Pools.
15751  {
15752  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15753  (*pPool)->SetId(m_NextPoolId++);
15754  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15755  }
15756 
15757  return VK_SUCCESS;
15758 }
15759 
15760 void VmaAllocator_T::DestroyPool(VmaPool pool)
15761 {
15762  // Remove from m_Pools.
15763  {
15764  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15765  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15766  VMA_ASSERT(success && "Pool not found in Allocator.");
15767  }
15768 
15769  vma_delete(this, pool);
15770 }
15771 
15772 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15773 {
15774  pool->m_BlockVector.GetPoolStats(pPoolStats);
15775 }
15776 
15777 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15778 {
15779  m_CurrentFrameIndex.store(frameIndex);
15780 
15781 #if VMA_MEMORY_BUDGET
15782  if(m_UseExtMemoryBudget)
15783  {
15784  UpdateVulkanBudget();
15785  }
15786 #endif // #if VMA_MEMORY_BUDGET
15787 }
15788 
15789 void VmaAllocator_T::MakePoolAllocationsLost(
15790  VmaPool hPool,
15791  size_t* pLostAllocationCount)
15792 {
15793  hPool->m_BlockVector.MakePoolAllocationsLost(
15794  m_CurrentFrameIndex.load(),
15795  pLostAllocationCount);
15796 }
15797 
15798 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15799 {
15800  return hPool->m_BlockVector.CheckCorruption();
15801 }
15802 
15803 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15804 {
15805  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15806 
15807  // Process default pools.
15808  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15809  {
15810  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15811  {
15812  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15813  VMA_ASSERT(pBlockVector);
15814  VkResult localRes = pBlockVector->CheckCorruption();
15815  switch(localRes)
15816  {
15817  case VK_ERROR_FEATURE_NOT_PRESENT:
15818  break;
15819  case VK_SUCCESS:
15820  finalRes = VK_SUCCESS;
15821  break;
15822  default:
15823  return localRes;
15824  }
15825  }
15826  }
15827 
15828  // Process custom pools.
15829  {
15830  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15831  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15832  {
15833  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15834  {
15835  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15836  switch(localRes)
15837  {
15838  case VK_ERROR_FEATURE_NOT_PRESENT:
15839  break;
15840  case VK_SUCCESS:
15841  finalRes = VK_SUCCESS;
15842  break;
15843  default:
15844  return localRes;
15845  }
15846  }
15847  }
15848  }
15849 
15850  return finalRes;
15851 }
15852 
15853 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15854 {
15855  *pAllocation = m_AllocationObjectAllocator.Allocate();
15856  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15857  (*pAllocation)->InitLost();
15858 }
15859 
15860 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15861 {
15862  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15863 
15864  // HeapSizeLimit is in effect for this heap.
15865  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15866  {
15867  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15868  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15869  for(;;)
15870  {
15871  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15872  if(blockBytesAfterAllocation > heapSize)
15873  {
15874  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15875  }
15876  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15877  {
15878  break;
15879  }
15880  }
15881  }
15882  else
15883  {
15884  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15885  }
15886 
15887  // VULKAN CALL vkAllocateMemory.
15888  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15889 
15890  if(res == VK_SUCCESS)
15891  {
15892 #if VMA_MEMORY_BUDGET
15893  ++m_Budget.m_OperationsSinceBudgetFetch;
15894 #endif
15895 
15896  // Informative callback.
15897  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15898  {
15899  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15900  }
15901  }
15902  else
15903  {
15904  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15905  }
15906 
15907  return res;
15908 }
15909 
15910 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15911 {
15912  // Informative callback.
15913  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15914  {
15915  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15916  }
15917 
15918  // VULKAN CALL vkFreeMemory.
15919  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15920 
15921  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
15922 }
15923 
15924 VkResult VmaAllocator_T::BindVulkanBuffer(
15925  VkDeviceMemory memory,
15926  VkDeviceSize memoryOffset,
15927  VkBuffer buffer,
15928  const void* pNext)
15929 {
15930  if(pNext != VMA_NULL)
15931  {
15932 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15933  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15934  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15935  {
15936  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15937  bindBufferMemoryInfo.pNext = pNext;
15938  bindBufferMemoryInfo.buffer = buffer;
15939  bindBufferMemoryInfo.memory = memory;
15940  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15941  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15942  }
15943  else
15944 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15945  {
15946  return VK_ERROR_EXTENSION_NOT_PRESENT;
15947  }
15948  }
15949  else
15950  {
15951  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15952  }
15953 }
15954 
15955 VkResult VmaAllocator_T::BindVulkanImage(
15956  VkDeviceMemory memory,
15957  VkDeviceSize memoryOffset,
15958  VkImage image,
15959  const void* pNext)
15960 {
15961  if(pNext != VMA_NULL)
15962  {
15963 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15964  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15965  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15966  {
15967  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15968  bindBufferMemoryInfo.pNext = pNext;
15969  bindBufferMemoryInfo.image = image;
15970  bindBufferMemoryInfo.memory = memory;
15971  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15972  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15973  }
15974  else
15975 #endif // #if VMA_BIND_MEMORY2
15976  {
15977  return VK_ERROR_EXTENSION_NOT_PRESENT;
15978  }
15979  }
15980  else
15981  {
15982  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15983  }
15984 }
15985 
15986 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15987 {
15988  if(hAllocation->CanBecomeLost())
15989  {
15990  return VK_ERROR_MEMORY_MAP_FAILED;
15991  }
15992 
15993  switch(hAllocation->GetType())
15994  {
15995  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15996  {
15997  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15998  char *pBytes = VMA_NULL;
15999  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
16000  if(res == VK_SUCCESS)
16001  {
16002  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
16003  hAllocation->BlockAllocMap();
16004  }
16005  return res;
16006  }
16007  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16008  return hAllocation->DedicatedAllocMap(this, ppData);
16009  default:
16010  VMA_ASSERT(0);
16011  return VK_ERROR_MEMORY_MAP_FAILED;
16012  }
16013 }
16014 
16015 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
16016 {
16017  switch(hAllocation->GetType())
16018  {
16019  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16020  {
16021  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16022  hAllocation->BlockAllocUnmap();
16023  pBlock->Unmap(this, 1);
16024  }
16025  break;
16026  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16027  hAllocation->DedicatedAllocUnmap(this);
16028  break;
16029  default:
16030  VMA_ASSERT(0);
16031  }
16032 }
16033 
16034 VkResult VmaAllocator_T::BindBufferMemory(
16035  VmaAllocation hAllocation,
16036  VkDeviceSize allocationLocalOffset,
16037  VkBuffer hBuffer,
16038  const void* pNext)
16039 {
16040  VkResult res = VK_SUCCESS;
16041  switch(hAllocation->GetType())
16042  {
16043  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16044  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
16045  break;
16046  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16047  {
16048  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
16049  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
16050  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
16051  break;
16052  }
16053  default:
16054  VMA_ASSERT(0);
16055  }
16056  return res;
16057 }
16058 
16059 VkResult VmaAllocator_T::BindImageMemory(
16060  VmaAllocation hAllocation,
16061  VkDeviceSize allocationLocalOffset,
16062  VkImage hImage,
16063  const void* pNext)
16064 {
16065  VkResult res = VK_SUCCESS;
16066  switch(hAllocation->GetType())
16067  {
16068  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16069  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
16070  break;
16071  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16072  {
16073  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
16074  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
16075  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
16076  break;
16077  }
16078  default:
16079  VMA_ASSERT(0);
16080  }
16081  return res;
16082 }
16083 
16084 void VmaAllocator_T::FlushOrInvalidateAllocation(
16085  VmaAllocation hAllocation,
16086  VkDeviceSize offset, VkDeviceSize size,
16087  VMA_CACHE_OPERATION op)
16088 {
16089  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
16090  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
16091  {
16092  const VkDeviceSize allocationSize = hAllocation->GetSize();
16093  VMA_ASSERT(offset <= allocationSize);
16094 
16095  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
16096 
16097  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
16098  memRange.memory = hAllocation->GetMemory();
16099 
16100  switch(hAllocation->GetType())
16101  {
16102  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16103  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16104  if(size == VK_WHOLE_SIZE)
16105  {
16106  memRange.size = allocationSize - memRange.offset;
16107  }
16108  else
16109  {
16110  VMA_ASSERT(offset + size <= allocationSize);
16111  memRange.size = VMA_MIN(
16112  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
16113  allocationSize - memRange.offset);
16114  }
16115  break;
16116 
16117  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16118  {
16119  // 1. Still within this allocation.
16120  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
16121  if(size == VK_WHOLE_SIZE)
16122  {
16123  size = allocationSize - offset;
16124  }
16125  else
16126  {
16127  VMA_ASSERT(offset + size <= allocationSize);
16128  }
16129  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
16130 
16131  // 2. Adjust to whole block.
16132  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
16133  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
16134  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
16135  memRange.offset += allocationOffset;
16136  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
16137 
16138  break;
16139  }
16140 
16141  default:
16142  VMA_ASSERT(0);
16143  }
16144 
16145  switch(op)
16146  {
16147  case VMA_CACHE_FLUSH:
16148  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
16149  break;
16150  case VMA_CACHE_INVALIDATE:
16151  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
16152  break;
16153  default:
16154  VMA_ASSERT(0);
16155  }
16156  }
16157  // else: Just ignore this call.
16158 }
16159 
16160 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
16161 {
16162  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
16163 
16164  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16165  {
16166  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16167  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16168  VMA_ASSERT(pDedicatedAllocations);
16169  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
16170  VMA_ASSERT(success);
16171  }
16172 
16173  VkDeviceMemory hMemory = allocation->GetMemory();
16174 
16175  /*
16176  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16177  before vkFreeMemory.
16178 
16179  if(allocation->GetMappedData() != VMA_NULL)
16180  {
16181  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16182  }
16183  */
16184 
16185  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
16186 
16187  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
16188 }
16189 
16190 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
16191 {
16192  VkBufferCreateInfo dummyBufCreateInfo;
16193  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
16194 
16195  uint32_t memoryTypeBits = 0;
16196 
16197  // Create buffer.
16198  VkBuffer buf = VK_NULL_HANDLE;
16199  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
16200  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
16201  if(res == VK_SUCCESS)
16202  {
16203  // Query for supported memory types.
16204  VkMemoryRequirements memReq;
16205  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
16206  memoryTypeBits = memReq.memoryTypeBits;
16207 
16208  // Destroy buffer.
16209  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16210  }
16211 
16212  return memoryTypeBits;
16213 }
16214 
16215 #if VMA_MEMORY_BUDGET
16216 
16217 void VmaAllocator_T::UpdateVulkanBudget()
16218 {
16219  VMA_ASSERT(m_UseExtMemoryBudget);
16220 
16221  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16222 
16223  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16224  memProps.pNext = &budgetProps;
16225 
16226  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16227 
16228  {
16229  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16230 
16231  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16232  {
16233  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16234  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16235  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16236  }
16237  m_Budget.m_OperationsSinceBudgetFetch = 0;
16238  }
16239 }
16240 
16241 #endif // #if VMA_MEMORY_BUDGET
16242 
16243 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16244 {
16245  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16246  !hAllocation->CanBecomeLost() &&
16247  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16248  {
16249  void* pData = VMA_NULL;
16250  VkResult res = Map(hAllocation, &pData);
16251  if(res == VK_SUCCESS)
16252  {
16253  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16254  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16255  Unmap(hAllocation);
16256  }
16257  else
16258  {
16259  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16260  }
16261  }
16262 }
16263 
16264 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16265 {
16266  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16267  if(memoryTypeBits == UINT32_MAX)
16268  {
16269  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16270  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16271  }
16272  return memoryTypeBits;
16273 }
16274 
16275 #if VMA_STATS_STRING_ENABLED
16276 
16277 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16278 {
16279  bool dedicatedAllocationsStarted = false;
16280  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16281  {
16282  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16283  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16284  VMA_ASSERT(pDedicatedAllocVector);
16285  if(pDedicatedAllocVector->empty() == false)
16286  {
16287  if(dedicatedAllocationsStarted == false)
16288  {
16289  dedicatedAllocationsStarted = true;
16290  json.WriteString("DedicatedAllocations");
16291  json.BeginObject();
16292  }
16293 
16294  json.BeginString("Type ");
16295  json.ContinueString(memTypeIndex);
16296  json.EndString();
16297 
16298  json.BeginArray();
16299 
16300  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16301  {
16302  json.BeginObject(true);
16303  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16304  hAlloc->PrintParameters(json);
16305  json.EndObject();
16306  }
16307 
16308  json.EndArray();
16309  }
16310  }
16311  if(dedicatedAllocationsStarted)
16312  {
16313  json.EndObject();
16314  }
16315 
16316  {
16317  bool allocationsStarted = false;
16318  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16319  {
16320  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16321  {
16322  if(allocationsStarted == false)
16323  {
16324  allocationsStarted = true;
16325  json.WriteString("DefaultPools");
16326  json.BeginObject();
16327  }
16328 
16329  json.BeginString("Type ");
16330  json.ContinueString(memTypeIndex);
16331  json.EndString();
16332 
16333  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16334  }
16335  }
16336  if(allocationsStarted)
16337  {
16338  json.EndObject();
16339  }
16340  }
16341 
16342  // Custom pools
16343  {
16344  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16345  const size_t poolCount = m_Pools.size();
16346  if(poolCount > 0)
16347  {
16348  json.WriteString("Pools");
16349  json.BeginObject();
16350  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16351  {
16352  json.BeginString();
16353  json.ContinueString(m_Pools[poolIndex]->GetId());
16354  json.EndString();
16355 
16356  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16357  }
16358  json.EndObject();
16359  }
16360  }
16361 }
16362 
16363 #endif // #if VMA_STATS_STRING_ENABLED
16364 
16366 // Public interface
16367 
16368 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
16369  const VmaAllocatorCreateInfo* pCreateInfo,
16370  VmaAllocator* pAllocator)
16371 {
16372  VMA_ASSERT(pCreateInfo && pAllocator);
16373  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16374  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1));
16375  VMA_DEBUG_LOG("vmaCreateAllocator");
16376  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16377  return (*pAllocator)->Init(pCreateInfo);
16378 }
16379 
16380 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
16381  VmaAllocator allocator)
16382 {
16383  if(allocator != VK_NULL_HANDLE)
16384  {
16385  VMA_DEBUG_LOG("vmaDestroyAllocator");
16386  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
16387  vma_delete(&allocationCallbacks, allocator);
16388  }
16389 }
16390 
16391 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
16392  VmaAllocator allocator,
16393  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16394 {
16395  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16396  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16397 }
16398 
16399 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
16400  VmaAllocator allocator,
16401  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16402 {
16403  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16404  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16405 }
16406 
16407 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
16408  VmaAllocator allocator,
16409  uint32_t memoryTypeIndex,
16410  VkMemoryPropertyFlags* pFlags)
16411 {
16412  VMA_ASSERT(allocator && pFlags);
16413  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16414  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16415 }
16416 
16417 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
16418  VmaAllocator allocator,
16419  uint32_t frameIndex)
16420 {
16421  VMA_ASSERT(allocator);
16422  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
16423 
16424  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16425 
16426  allocator->SetCurrentFrameIndex(frameIndex);
16427 }
16428 
16429 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
16430  VmaAllocator allocator,
16431  VmaStats* pStats)
16432 {
16433  VMA_ASSERT(allocator && pStats);
16434  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16435  allocator->CalculateStats(pStats);
16436 }
16437 
16438 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
16439  VmaAllocator allocator,
16440  VmaBudget* pBudget)
16441 {
16442  VMA_ASSERT(allocator && pBudget);
16443  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16444  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
16445 }
16446 
16447 #if VMA_STATS_STRING_ENABLED
16448 
16449 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
16450  VmaAllocator allocator,
16451  char** ppStatsString,
16452  VkBool32 detailedMap)
16453 {
16454  VMA_ASSERT(allocator && ppStatsString);
16455  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16456 
16457  VmaStringBuilder sb(allocator);
16458  {
16459  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16460  json.BeginObject();
16461 
16462  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
16463  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
16464 
16465  VmaStats stats;
16466  allocator->CalculateStats(&stats);
16467 
16468  json.WriteString("Total");
16469  VmaPrintStatInfo(json, stats.total);
16470 
16471  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16472  {
16473  json.BeginString("Heap ");
16474  json.ContinueString(heapIndex);
16475  json.EndString();
16476  json.BeginObject();
16477 
16478  json.WriteString("Size");
16479  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
16480 
16481  json.WriteString("Flags");
16482  json.BeginArray(true);
16483  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
16484  {
16485  json.WriteString("DEVICE_LOCAL");
16486  }
16487  json.EndArray();
16488 
16489  json.WriteString("Budget");
16490  json.BeginObject();
16491  {
16492  json.WriteString("BlockBytes");
16493  json.WriteNumber(budget[heapIndex].blockBytes);
16494  json.WriteString("AllocationBytes");
16495  json.WriteNumber(budget[heapIndex].allocationBytes);
16496  json.WriteString("Usage");
16497  json.WriteNumber(budget[heapIndex].usage);
16498  json.WriteString("Budget");
16499  json.WriteNumber(budget[heapIndex].budget);
16500  }
16501  json.EndObject();
16502 
16503  if(stats.memoryHeap[heapIndex].blockCount > 0)
16504  {
16505  json.WriteString("Stats");
16506  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
16507  }
16508 
16509  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16510  {
16511  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16512  {
16513  json.BeginString("Type ");
16514  json.ContinueString(typeIndex);
16515  json.EndString();
16516 
16517  json.BeginObject();
16518 
16519  json.WriteString("Flags");
16520  json.BeginArray(true);
16521  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16522  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
16523  {
16524  json.WriteString("DEVICE_LOCAL");
16525  }
16526  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16527  {
16528  json.WriteString("HOST_VISIBLE");
16529  }
16530  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
16531  {
16532  json.WriteString("HOST_COHERENT");
16533  }
16534  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
16535  {
16536  json.WriteString("HOST_CACHED");
16537  }
16538  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
16539  {
16540  json.WriteString("LAZILY_ALLOCATED");
16541  }
16542  json.EndArray();
16543 
16544  if(stats.memoryType[typeIndex].blockCount > 0)
16545  {
16546  json.WriteString("Stats");
16547  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
16548  }
16549 
16550  json.EndObject();
16551  }
16552  }
16553 
16554  json.EndObject();
16555  }
16556  if(detailedMap == VK_TRUE)
16557  {
16558  allocator->PrintDetailedMap(json);
16559  }
16560 
16561  json.EndObject();
16562  }
16563 
16564  const size_t len = sb.GetLength();
16565  char* const pChars = vma_new_array(allocator, char, len + 1);
16566  if(len > 0)
16567  {
16568  memcpy(pChars, sb.GetData(), len);
16569  }
16570  pChars[len] = '\0';
16571  *ppStatsString = pChars;
16572 }
16573 
16574 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
16575  VmaAllocator allocator,
16576  char* pStatsString)
16577 {
16578  if(pStatsString != VMA_NULL)
16579  {
16580  VMA_ASSERT(allocator);
16581  size_t len = strlen(pStatsString);
16582  vma_delete_array(allocator, pStatsString, len + 1);
16583  }
16584 }
16585 
16586 #endif // #if VMA_STATS_STRING_ENABLED
16587 
16588 /*
16589 This function is not protected by any mutex because it just reads immutable data.
16590 */
16591 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
16592  VmaAllocator allocator,
16593  uint32_t memoryTypeBits,
16594  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16595  uint32_t* pMemoryTypeIndex)
16596 {
16597  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16598  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16599  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16600 
16601  if(pAllocationCreateInfo->memoryTypeBits != 0)
16602  {
16603  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
16604  }
16605 
16606  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
16607  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
16608  uint32_t notPreferredFlags = 0;
16609 
16610  // Convert usage to requiredFlags and preferredFlags.
16611  switch(pAllocationCreateInfo->usage)
16612  {
16614  break;
16616  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16617  {
16618  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16619  }
16620  break;
16622  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
16623  break;
16625  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16626  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16627  {
16628  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16629  }
16630  break;
16632  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16633  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16634  break;
16636  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16637  break;
16639  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
16640  break;
16641  default:
16642  VMA_ASSERT(0);
16643  break;
16644  }
16645 
16646  *pMemoryTypeIndex = UINT32_MAX;
16647  uint32_t minCost = UINT32_MAX;
16648  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16649  memTypeIndex < allocator->GetMemoryTypeCount();
16650  ++memTypeIndex, memTypeBit <<= 1)
16651  {
16652  // This memory type is acceptable according to memoryTypeBits bitmask.
16653  if((memTypeBit & memoryTypeBits) != 0)
16654  {
16655  const VkMemoryPropertyFlags currFlags =
16656  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16657  // This memory type contains requiredFlags.
16658  if((requiredFlags & ~currFlags) == 0)
16659  {
16660  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16661  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
16662  VmaCountBitsSet(currFlags & notPreferredFlags);
16663  // Remember memory type with lowest cost.
16664  if(currCost < minCost)
16665  {
16666  *pMemoryTypeIndex = memTypeIndex;
16667  if(currCost == 0)
16668  {
16669  return VK_SUCCESS;
16670  }
16671  minCost = currCost;
16672  }
16673  }
16674  }
16675  }
16676  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16677 }
16678 
16679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
16680  VmaAllocator allocator,
16681  const VkBufferCreateInfo* pBufferCreateInfo,
16682  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16683  uint32_t* pMemoryTypeIndex)
16684 {
16685  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16686  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16687  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16688  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16689 
16690  const VkDevice hDev = allocator->m_hDevice;
16691  VkBuffer hBuffer = VK_NULL_HANDLE;
16692  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16693  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16694  if(res == VK_SUCCESS)
16695  {
16696  VkMemoryRequirements memReq = {};
16697  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16698  hDev, hBuffer, &memReq);
16699 
16700  res = vmaFindMemoryTypeIndex(
16701  allocator,
16702  memReq.memoryTypeBits,
16703  pAllocationCreateInfo,
16704  pMemoryTypeIndex);
16705 
16706  allocator->GetVulkanFunctions().vkDestroyBuffer(
16707  hDev, hBuffer, allocator->GetAllocationCallbacks());
16708  }
16709  return res;
16710 }
16711 
16712 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
16713  VmaAllocator allocator,
16714  const VkImageCreateInfo* pImageCreateInfo,
16715  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16716  uint32_t* pMemoryTypeIndex)
16717 {
16718  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16719  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16720  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16721  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16722 
16723  const VkDevice hDev = allocator->m_hDevice;
16724  VkImage hImage = VK_NULL_HANDLE;
16725  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16726  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16727  if(res == VK_SUCCESS)
16728  {
16729  VkMemoryRequirements memReq = {};
16730  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16731  hDev, hImage, &memReq);
16732 
16733  res = vmaFindMemoryTypeIndex(
16734  allocator,
16735  memReq.memoryTypeBits,
16736  pAllocationCreateInfo,
16737  pMemoryTypeIndex);
16738 
16739  allocator->GetVulkanFunctions().vkDestroyImage(
16740  hDev, hImage, allocator->GetAllocationCallbacks());
16741  }
16742  return res;
16743 }
16744 
16745 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
16746  VmaAllocator allocator,
16747  const VmaPoolCreateInfo* pCreateInfo,
16748  VmaPool* pPool)
16749 {
16750  VMA_ASSERT(allocator && pCreateInfo && pPool);
16751 
16752  VMA_DEBUG_LOG("vmaCreatePool");
16753 
16754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16755 
16756  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16757 
16758 #if VMA_RECORDING_ENABLED
16759  if(allocator->GetRecorder() != VMA_NULL)
16760  {
16761  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16762  }
16763 #endif
16764 
16765  return res;
16766 }
16767 
16768 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
16769  VmaAllocator allocator,
16770  VmaPool pool)
16771 {
16772  VMA_ASSERT(allocator);
16773 
16774  if(pool == VK_NULL_HANDLE)
16775  {
16776  return;
16777  }
16778 
16779  VMA_DEBUG_LOG("vmaDestroyPool");
16780 
16781  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16782 
16783 #if VMA_RECORDING_ENABLED
16784  if(allocator->GetRecorder() != VMA_NULL)
16785  {
16786  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16787  }
16788 #endif
16789 
16790  allocator->DestroyPool(pool);
16791 }
16792 
16793 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
16794  VmaAllocator allocator,
16795  VmaPool pool,
16796  VmaPoolStats* pPoolStats)
16797 {
16798  VMA_ASSERT(allocator && pool && pPoolStats);
16799 
16800  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16801 
16802  allocator->GetPoolStats(pool, pPoolStats);
16803 }
16804 
16805 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
16806  VmaAllocator allocator,
16807  VmaPool pool,
16808  size_t* pLostAllocationCount)
16809 {
16810  VMA_ASSERT(allocator && pool);
16811 
16812  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16813 
16814 #if VMA_RECORDING_ENABLED
16815  if(allocator->GetRecorder() != VMA_NULL)
16816  {
16817  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16818  }
16819 #endif
16820 
16821  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16822 }
16823 
16824 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16825 {
16826  VMA_ASSERT(allocator && pool);
16827 
16828  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16829 
16830  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16831 
16832  return allocator->CheckPoolCorruption(pool);
16833 }
16834 
16835 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
16836  VmaAllocator allocator,
16837  VmaPool pool,
16838  const char** ppName)
16839 {
16840  VMA_ASSERT(allocator && pool);
16841 
16842  VMA_DEBUG_LOG("vmaGetPoolName");
16843 
16844  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16845 
16846  *ppName = pool->GetName();
16847 }
16848 
16849 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
16850  VmaAllocator allocator,
16851  VmaPool pool,
16852  const char* pName)
16853 {
16854  VMA_ASSERT(allocator && pool);
16855 
16856  VMA_DEBUG_LOG("vmaSetPoolName");
16857 
16858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16859 
16860  pool->SetName(pName);
16861 
16862 #if VMA_RECORDING_ENABLED
16863  if(allocator->GetRecorder() != VMA_NULL)
16864  {
16865  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
16866  }
16867 #endif
16868 }
16869 
16870 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
16871  VmaAllocator allocator,
16872  const VkMemoryRequirements* pVkMemoryRequirements,
16873  const VmaAllocationCreateInfo* pCreateInfo,
16874  VmaAllocation* pAllocation,
16875  VmaAllocationInfo* pAllocationInfo)
16876 {
16877  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16878 
16879  VMA_DEBUG_LOG("vmaAllocateMemory");
16880 
16881  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16882 
16883  VkResult result = allocator->AllocateMemory(
16884  *pVkMemoryRequirements,
16885  false, // requiresDedicatedAllocation
16886  false, // prefersDedicatedAllocation
16887  VK_NULL_HANDLE, // dedicatedBuffer
16888  VK_NULL_HANDLE, // dedicatedImage
16889  *pCreateInfo,
16890  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16891  1, // allocationCount
16892  pAllocation);
16893 
16894 #if VMA_RECORDING_ENABLED
16895  if(allocator->GetRecorder() != VMA_NULL)
16896  {
16897  allocator->GetRecorder()->RecordAllocateMemory(
16898  allocator->GetCurrentFrameIndex(),
16899  *pVkMemoryRequirements,
16900  *pCreateInfo,
16901  *pAllocation);
16902  }
16903 #endif
16904 
16905  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16906  {
16907  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16908  }
16909 
16910  return result;
16911 }
16912 
16913 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
16914  VmaAllocator allocator,
16915  const VkMemoryRequirements* pVkMemoryRequirements,
16916  const VmaAllocationCreateInfo* pCreateInfo,
16917  size_t allocationCount,
16918  VmaAllocation* pAllocations,
16919  VmaAllocationInfo* pAllocationInfo)
16920 {
16921  if(allocationCount == 0)
16922  {
16923  return VK_SUCCESS;
16924  }
16925 
16926  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16927 
16928  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16929 
16930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16931 
16932  VkResult result = allocator->AllocateMemory(
16933  *pVkMemoryRequirements,
16934  false, // requiresDedicatedAllocation
16935  false, // prefersDedicatedAllocation
16936  VK_NULL_HANDLE, // dedicatedBuffer
16937  VK_NULL_HANDLE, // dedicatedImage
16938  *pCreateInfo,
16939  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16940  allocationCount,
16941  pAllocations);
16942 
16943 #if VMA_RECORDING_ENABLED
16944  if(allocator->GetRecorder() != VMA_NULL)
16945  {
16946  allocator->GetRecorder()->RecordAllocateMemoryPages(
16947  allocator->GetCurrentFrameIndex(),
16948  *pVkMemoryRequirements,
16949  *pCreateInfo,
16950  (uint64_t)allocationCount,
16951  pAllocations);
16952  }
16953 #endif
16954 
16955  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16956  {
16957  for(size_t i = 0; i < allocationCount; ++i)
16958  {
16959  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16960  }
16961  }
16962 
16963  return result;
16964 }
16965 
16966 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
16967  VmaAllocator allocator,
16968  VkBuffer buffer,
16969  const VmaAllocationCreateInfo* pCreateInfo,
16970  VmaAllocation* pAllocation,
16971  VmaAllocationInfo* pAllocationInfo)
16972 {
16973  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16974 
16975  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16976 
16977  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16978 
16979  VkMemoryRequirements vkMemReq = {};
16980  bool requiresDedicatedAllocation = false;
16981  bool prefersDedicatedAllocation = false;
16982  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16983  requiresDedicatedAllocation,
16984  prefersDedicatedAllocation);
16985 
16986  VkResult result = allocator->AllocateMemory(
16987  vkMemReq,
16988  requiresDedicatedAllocation,
16989  prefersDedicatedAllocation,
16990  buffer, // dedicatedBuffer
16991  VK_NULL_HANDLE, // dedicatedImage
16992  *pCreateInfo,
16993  VMA_SUBALLOCATION_TYPE_BUFFER,
16994  1, // allocationCount
16995  pAllocation);
16996 
16997 #if VMA_RECORDING_ENABLED
16998  if(allocator->GetRecorder() != VMA_NULL)
16999  {
17000  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
17001  allocator->GetCurrentFrameIndex(),
17002  vkMemReq,
17003  requiresDedicatedAllocation,
17004  prefersDedicatedAllocation,
17005  *pCreateInfo,
17006  *pAllocation);
17007  }
17008 #endif
17009 
17010  if(pAllocationInfo && result == VK_SUCCESS)
17011  {
17012  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17013  }
17014 
17015  return result;
17016 }
17017 
17018 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
17019  VmaAllocator allocator,
17020  VkImage image,
17021  const VmaAllocationCreateInfo* pCreateInfo,
17022  VmaAllocation* pAllocation,
17023  VmaAllocationInfo* pAllocationInfo)
17024 {
17025  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
17026 
17027  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
17028 
17029  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17030 
17031  VkMemoryRequirements vkMemReq = {};
17032  bool requiresDedicatedAllocation = false;
17033  bool prefersDedicatedAllocation = false;
17034  allocator->GetImageMemoryRequirements(image, vkMemReq,
17035  requiresDedicatedAllocation, prefersDedicatedAllocation);
17036 
17037  VkResult result = allocator->AllocateMemory(
17038  vkMemReq,
17039  requiresDedicatedAllocation,
17040  prefersDedicatedAllocation,
17041  VK_NULL_HANDLE, // dedicatedBuffer
17042  image, // dedicatedImage
17043  *pCreateInfo,
17044  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
17045  1, // allocationCount
17046  pAllocation);
17047 
17048 #if VMA_RECORDING_ENABLED
17049  if(allocator->GetRecorder() != VMA_NULL)
17050  {
17051  allocator->GetRecorder()->RecordAllocateMemoryForImage(
17052  allocator->GetCurrentFrameIndex(),
17053  vkMemReq,
17054  requiresDedicatedAllocation,
17055  prefersDedicatedAllocation,
17056  *pCreateInfo,
17057  *pAllocation);
17058  }
17059 #endif
17060 
17061  if(pAllocationInfo && result == VK_SUCCESS)
17062  {
17063  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17064  }
17065 
17066  return result;
17067 }
17068 
17069 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
17070  VmaAllocator allocator,
17071  VmaAllocation allocation)
17072 {
17073  VMA_ASSERT(allocator);
17074 
17075  if(allocation == VK_NULL_HANDLE)
17076  {
17077  return;
17078  }
17079 
17080  VMA_DEBUG_LOG("vmaFreeMemory");
17081 
17082  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17083 
17084 #if VMA_RECORDING_ENABLED
17085  if(allocator->GetRecorder() != VMA_NULL)
17086  {
17087  allocator->GetRecorder()->RecordFreeMemory(
17088  allocator->GetCurrentFrameIndex(),
17089  allocation);
17090  }
17091 #endif
17092 
17093  allocator->FreeMemory(
17094  1, // allocationCount
17095  &allocation);
17096 }
17097 
17098 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
17099  VmaAllocator allocator,
17100  size_t allocationCount,
17101  VmaAllocation* pAllocations)
17102 {
17103  if(allocationCount == 0)
17104  {
17105  return;
17106  }
17107 
17108  VMA_ASSERT(allocator);
17109 
17110  VMA_DEBUG_LOG("vmaFreeMemoryPages");
17111 
17112  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17113 
17114 #if VMA_RECORDING_ENABLED
17115  if(allocator->GetRecorder() != VMA_NULL)
17116  {
17117  allocator->GetRecorder()->RecordFreeMemoryPages(
17118  allocator->GetCurrentFrameIndex(),
17119  (uint64_t)allocationCount,
17120  pAllocations);
17121  }
17122 #endif
17123 
17124  allocator->FreeMemory(allocationCount, pAllocations);
17125 }
17126 
17127 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
17128  VmaAllocator allocator,
17129  VmaAllocation allocation,
17130  VkDeviceSize newSize)
17131 {
17132  VMA_ASSERT(allocator && allocation);
17133 
17134  VMA_DEBUG_LOG("vmaResizeAllocation");
17135 
17136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17137 
17138  return allocator->ResizeAllocation(allocation, newSize);
17139 }
17140 
17141 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
17142  VmaAllocator allocator,
17143  VmaAllocation allocation,
17144  VmaAllocationInfo* pAllocationInfo)
17145 {
17146  VMA_ASSERT(allocator && allocation && pAllocationInfo);
17147 
17148  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17149 
17150 #if VMA_RECORDING_ENABLED
17151  if(allocator->GetRecorder() != VMA_NULL)
17152  {
17153  allocator->GetRecorder()->RecordGetAllocationInfo(
17154  allocator->GetCurrentFrameIndex(),
17155  allocation);
17156  }
17157 #endif
17158 
17159  allocator->GetAllocationInfo(allocation, pAllocationInfo);
17160 }
17161 
17162 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
17163  VmaAllocator allocator,
17164  VmaAllocation allocation)
17165 {
17166  VMA_ASSERT(allocator && allocation);
17167 
17168  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17169 
17170 #if VMA_RECORDING_ENABLED
17171  if(allocator->GetRecorder() != VMA_NULL)
17172  {
17173  allocator->GetRecorder()->RecordTouchAllocation(
17174  allocator->GetCurrentFrameIndex(),
17175  allocation);
17176  }
17177 #endif
17178 
17179  return allocator->TouchAllocation(allocation);
17180 }
17181 
17182 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
17183  VmaAllocator allocator,
17184  VmaAllocation allocation,
17185  void* pUserData)
17186 {
17187  VMA_ASSERT(allocator && allocation);
17188 
17189  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17190 
17191  allocation->SetUserData(allocator, pUserData);
17192 
17193 #if VMA_RECORDING_ENABLED
17194  if(allocator->GetRecorder() != VMA_NULL)
17195  {
17196  allocator->GetRecorder()->RecordSetAllocationUserData(
17197  allocator->GetCurrentFrameIndex(),
17198  allocation,
17199  pUserData);
17200  }
17201 #endif
17202 }
17203 
17204 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
17205  VmaAllocator allocator,
17206  VmaAllocation* pAllocation)
17207 {
17208  VMA_ASSERT(allocator && pAllocation);
17209 
17210  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17211 
17212  allocator->CreateLostAllocation(pAllocation);
17213 
17214 #if VMA_RECORDING_ENABLED
17215  if(allocator->GetRecorder() != VMA_NULL)
17216  {
17217  allocator->GetRecorder()->RecordCreateLostAllocation(
17218  allocator->GetCurrentFrameIndex(),
17219  *pAllocation);
17220  }
17221 #endif
17222 }
17223 
17224 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
17225  VmaAllocator allocator,
17226  VmaAllocation allocation,
17227  void** ppData)
17228 {
17229  VMA_ASSERT(allocator && allocation && ppData);
17230 
17231  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17232 
17233  VkResult res = allocator->Map(allocation, ppData);
17234 
17235 #if VMA_RECORDING_ENABLED
17236  if(allocator->GetRecorder() != VMA_NULL)
17237  {
17238  allocator->GetRecorder()->RecordMapMemory(
17239  allocator->GetCurrentFrameIndex(),
17240  allocation);
17241  }
17242 #endif
17243 
17244  return res;
17245 }
17246 
17247 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
17248  VmaAllocator allocator,
17249  VmaAllocation allocation)
17250 {
17251  VMA_ASSERT(allocator && allocation);
17252 
17253  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17254 
17255 #if VMA_RECORDING_ENABLED
17256  if(allocator->GetRecorder() != VMA_NULL)
17257  {
17258  allocator->GetRecorder()->RecordUnmapMemory(
17259  allocator->GetCurrentFrameIndex(),
17260  allocation);
17261  }
17262 #endif
17263 
17264  allocator->Unmap(allocation);
17265 }
17266 
17267 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17268 {
17269  VMA_ASSERT(allocator && allocation);
17270 
17271  VMA_DEBUG_LOG("vmaFlushAllocation");
17272 
17273  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17274 
17275  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17276 
17277 #if VMA_RECORDING_ENABLED
17278  if(allocator->GetRecorder() != VMA_NULL)
17279  {
17280  allocator->GetRecorder()->RecordFlushAllocation(
17281  allocator->GetCurrentFrameIndex(),
17282  allocation, offset, size);
17283  }
17284 #endif
17285 }
17286 
17287 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17288 {
17289  VMA_ASSERT(allocator && allocation);
17290 
17291  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17292 
17293  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17294 
17295  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17296 
17297 #if VMA_RECORDING_ENABLED
17298  if(allocator->GetRecorder() != VMA_NULL)
17299  {
17300  allocator->GetRecorder()->RecordInvalidateAllocation(
17301  allocator->GetCurrentFrameIndex(),
17302  allocation, offset, size);
17303  }
17304 #endif
17305 }
17306 
17307 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17308 {
17309  VMA_ASSERT(allocator);
17310 
17311  VMA_DEBUG_LOG("vmaCheckCorruption");
17312 
17313  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17314 
17315  return allocator->CheckCorruption(memoryTypeBits);
17316 }
17317 
17318 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
17319  VmaAllocator allocator,
17320  VmaAllocation* pAllocations,
17321  size_t allocationCount,
17322  VkBool32* pAllocationsChanged,
17323  const VmaDefragmentationInfo *pDefragmentationInfo,
17324  VmaDefragmentationStats* pDefragmentationStats)
17325 {
17326  // Deprecated interface, reimplemented using new one.
17327 
17328  VmaDefragmentationInfo2 info2 = {};
17329  info2.allocationCount = (uint32_t)allocationCount;
17330  info2.pAllocations = pAllocations;
17331  info2.pAllocationsChanged = pAllocationsChanged;
17332  if(pDefragmentationInfo != VMA_NULL)
17333  {
17334  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17335  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17336  }
17337  else
17338  {
17339  info2.maxCpuAllocationsToMove = UINT32_MAX;
17340  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
17341  }
17342  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
17343 
17345  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
17346  if(res == VK_NOT_READY)
17347  {
17348  res = vmaDefragmentationEnd( allocator, ctx);
17349  }
17350  return res;
17351 }
17352 
17353 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
17354  VmaAllocator allocator,
17355  const VmaDefragmentationInfo2* pInfo,
17356  VmaDefragmentationStats* pStats,
17357  VmaDefragmentationContext *pContext)
17358 {
17359  VMA_ASSERT(allocator && pInfo && pContext);
17360 
17361  // Degenerate case: Nothing to defragment.
17362  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
17363  {
17364  return VK_SUCCESS;
17365  }
17366 
17367  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
17368  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
17369  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
17370  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17371 
17372  VMA_DEBUG_LOG("vmaDefragmentationBegin");
17373 
17374  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17375 
17376  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
17377 
17378 #if VMA_RECORDING_ENABLED
17379  if(allocator->GetRecorder() != VMA_NULL)
17380  {
17381  allocator->GetRecorder()->RecordDefragmentationBegin(
17382  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
17383  }
17384 #endif
17385 
17386  return res;
17387 }
17388 
17389 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
17390  VmaAllocator allocator,
17391  VmaDefragmentationContext context)
17392 {
17393  VMA_ASSERT(allocator);
17394 
17395  VMA_DEBUG_LOG("vmaDefragmentationEnd");
17396 
17397  if(context != VK_NULL_HANDLE)
17398  {
17399  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17400 
17401 #if VMA_RECORDING_ENABLED
17402  if(allocator->GetRecorder() != VMA_NULL)
17403  {
17404  allocator->GetRecorder()->RecordDefragmentationEnd(
17405  allocator->GetCurrentFrameIndex(), context);
17406  }
17407 #endif
17408 
17409  return allocator->DefragmentationEnd(context);
17410  }
17411  else
17412  {
17413  return VK_SUCCESS;
17414  }
17415 }
17416 
17417 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
17418  VmaAllocator allocator,
17419  VmaAllocation allocation,
17420  VkBuffer buffer)
17421 {
17422  VMA_ASSERT(allocator && allocation && buffer);
17423 
17424  VMA_DEBUG_LOG("vmaBindBufferMemory");
17425 
17426  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17427 
17428  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17429 }
17430 
17431 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
17432  VmaAllocator allocator,
17433  VmaAllocation allocation,
17434  VkDeviceSize allocationLocalOffset,
17435  VkBuffer buffer,
17436  const void* pNext)
17437 {
17438  VMA_ASSERT(allocator && allocation && buffer);
17439 
17440  VMA_DEBUG_LOG("vmaBindBufferMemory2");
17441 
17442  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17443 
17444  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17445 }
17446 
17447 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
17448  VmaAllocator allocator,
17449  VmaAllocation allocation,
17450  VkImage image)
17451 {
17452  VMA_ASSERT(allocator && allocation && image);
17453 
17454  VMA_DEBUG_LOG("vmaBindImageMemory");
17455 
17456  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17457 
17458  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17459 }
17460 
17461 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
17462  VmaAllocator allocator,
17463  VmaAllocation allocation,
17464  VkDeviceSize allocationLocalOffset,
17465  VkImage image,
17466  const void* pNext)
17467 {
17468  VMA_ASSERT(allocator && allocation && image);
17469 
17470  VMA_DEBUG_LOG("vmaBindImageMemory2");
17471 
17472  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17473 
17474  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
17475 }
17476 
17477 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
17478  VmaAllocator allocator,
17479  const VkBufferCreateInfo* pBufferCreateInfo,
17480  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17481  VkBuffer* pBuffer,
17482  VmaAllocation* pAllocation,
17483  VmaAllocationInfo* pAllocationInfo)
17484 {
17485  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
17486 
17487  if(pBufferCreateInfo->size == 0)
17488  {
17489  return VK_ERROR_VALIDATION_FAILED_EXT;
17490  }
17491 
17492  VMA_DEBUG_LOG("vmaCreateBuffer");
17493 
17494  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17495 
17496  *pBuffer = VK_NULL_HANDLE;
17497  *pAllocation = VK_NULL_HANDLE;
17498 
17499  // 1. Create VkBuffer.
17500  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17501  allocator->m_hDevice,
17502  pBufferCreateInfo,
17503  allocator->GetAllocationCallbacks(),
17504  pBuffer);
17505  if(res >= 0)
17506  {
17507  // 2. vkGetBufferMemoryRequirements.
17508  VkMemoryRequirements vkMemReq = {};
17509  bool requiresDedicatedAllocation = false;
17510  bool prefersDedicatedAllocation = false;
17511  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17512  requiresDedicatedAllocation, prefersDedicatedAllocation);
17513 
17514  // Make sure alignment requirements for specific buffer usages reported
17515  // in Physical Device Properties are included in alignment reported by memory requirements.
17516  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
17517  {
17518  VMA_ASSERT(vkMemReq.alignment %
17519  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
17520  }
17521  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
17522  {
17523  VMA_ASSERT(vkMemReq.alignment %
17524  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
17525  }
17526  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
17527  {
17528  VMA_ASSERT(vkMemReq.alignment %
17529  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
17530  }
17531 
17532  // 3. Allocate memory using allocator.
17533  res = allocator->AllocateMemory(
17534  vkMemReq,
17535  requiresDedicatedAllocation,
17536  prefersDedicatedAllocation,
17537  *pBuffer, // dedicatedBuffer
17538  VK_NULL_HANDLE, // dedicatedImage
17539  *pAllocationCreateInfo,
17540  VMA_SUBALLOCATION_TYPE_BUFFER,
17541  1, // allocationCount
17542  pAllocation);
17543 
17544 #if VMA_RECORDING_ENABLED
17545  if(allocator->GetRecorder() != VMA_NULL)
17546  {
17547  allocator->GetRecorder()->RecordCreateBuffer(
17548  allocator->GetCurrentFrameIndex(),
17549  *pBufferCreateInfo,
17550  *pAllocationCreateInfo,
17551  *pAllocation);
17552  }
17553 #endif
17554 
17555  if(res >= 0)
17556  {
17557  // 3. Bind buffer with memory.
17558  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17559  {
17560  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17561  }
17562  if(res >= 0)
17563  {
17564  // All steps succeeded.
17565  #if VMA_STATS_STRING_ENABLED
17566  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17567  #endif
17568  if(pAllocationInfo != VMA_NULL)
17569  {
17570  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17571  }
17572 
17573  return VK_SUCCESS;
17574  }
17575  allocator->FreeMemory(
17576  1, // allocationCount
17577  pAllocation);
17578  *pAllocation = VK_NULL_HANDLE;
17579  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17580  *pBuffer = VK_NULL_HANDLE;
17581  return res;
17582  }
17583  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17584  *pBuffer = VK_NULL_HANDLE;
17585  return res;
17586  }
17587  return res;
17588 }
17589 
17590 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
17591  VmaAllocator allocator,
17592  VkBuffer buffer,
17593  VmaAllocation allocation)
17594 {
17595  VMA_ASSERT(allocator);
17596 
17597  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17598  {
17599  return;
17600  }
17601 
17602  VMA_DEBUG_LOG("vmaDestroyBuffer");
17603 
17604  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17605 
17606 #if VMA_RECORDING_ENABLED
17607  if(allocator->GetRecorder() != VMA_NULL)
17608  {
17609  allocator->GetRecorder()->RecordDestroyBuffer(
17610  allocator->GetCurrentFrameIndex(),
17611  allocation);
17612  }
17613 #endif
17614 
17615  if(buffer != VK_NULL_HANDLE)
17616  {
17617  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17618  }
17619 
17620  if(allocation != VK_NULL_HANDLE)
17621  {
17622  allocator->FreeMemory(
17623  1, // allocationCount
17624  &allocation);
17625  }
17626 }
17627 
17628 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
17629  VmaAllocator allocator,
17630  const VkImageCreateInfo* pImageCreateInfo,
17631  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17632  VkImage* pImage,
17633  VmaAllocation* pAllocation,
17634  VmaAllocationInfo* pAllocationInfo)
17635 {
17636  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17637 
17638  if(pImageCreateInfo->extent.width == 0 ||
17639  pImageCreateInfo->extent.height == 0 ||
17640  pImageCreateInfo->extent.depth == 0 ||
17641  pImageCreateInfo->mipLevels == 0 ||
17642  pImageCreateInfo->arrayLayers == 0)
17643  {
17644  return VK_ERROR_VALIDATION_FAILED_EXT;
17645  }
17646 
17647  VMA_DEBUG_LOG("vmaCreateImage");
17648 
17649  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17650 
17651  *pImage = VK_NULL_HANDLE;
17652  *pAllocation = VK_NULL_HANDLE;
17653 
17654  // 1. Create VkImage.
17655  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17656  allocator->m_hDevice,
17657  pImageCreateInfo,
17658  allocator->GetAllocationCallbacks(),
17659  pImage);
17660  if(res >= 0)
17661  {
17662  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17663  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17664  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17665 
17666  // 2. Allocate memory using allocator.
17667  VkMemoryRequirements vkMemReq = {};
17668  bool requiresDedicatedAllocation = false;
17669  bool prefersDedicatedAllocation = false;
17670  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17671  requiresDedicatedAllocation, prefersDedicatedAllocation);
17672 
17673  res = allocator->AllocateMemory(
17674  vkMemReq,
17675  requiresDedicatedAllocation,
17676  prefersDedicatedAllocation,
17677  VK_NULL_HANDLE, // dedicatedBuffer
17678  *pImage, // dedicatedImage
17679  *pAllocationCreateInfo,
17680  suballocType,
17681  1, // allocationCount
17682  pAllocation);
17683 
17684 #if VMA_RECORDING_ENABLED
17685  if(allocator->GetRecorder() != VMA_NULL)
17686  {
17687  allocator->GetRecorder()->RecordCreateImage(
17688  allocator->GetCurrentFrameIndex(),
17689  *pImageCreateInfo,
17690  *pAllocationCreateInfo,
17691  *pAllocation);
17692  }
17693 #endif
17694 
17695  if(res >= 0)
17696  {
17697  // 3. Bind image with memory.
17698  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17699  {
17700  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17701  }
17702  if(res >= 0)
17703  {
17704  // All steps succeeded.
17705  #if VMA_STATS_STRING_ENABLED
17706  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17707  #endif
17708  if(pAllocationInfo != VMA_NULL)
17709  {
17710  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17711  }
17712 
17713  return VK_SUCCESS;
17714  }
17715  allocator->FreeMemory(
17716  1, // allocationCount
17717  pAllocation);
17718  *pAllocation = VK_NULL_HANDLE;
17719  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17720  *pImage = VK_NULL_HANDLE;
17721  return res;
17722  }
17723  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17724  *pImage = VK_NULL_HANDLE;
17725  return res;
17726  }
17727  return res;
17728 }
17729 
17730 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
17731  VmaAllocator allocator,
17732  VkImage image,
17733  VmaAllocation allocation)
17734 {
17735  VMA_ASSERT(allocator);
17736 
17737  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17738  {
17739  return;
17740  }
17741 
17742  VMA_DEBUG_LOG("vmaDestroyImage");
17743 
17744  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17745 
17746 #if VMA_RECORDING_ENABLED
17747  if(allocator->GetRecorder() != VMA_NULL)
17748  {
17749  allocator->GetRecorder()->RecordDestroyImage(
17750  allocator->GetCurrentFrameIndex(),
17751  allocation);
17752  }
17753 #endif
17754 
17755  if(image != VK_NULL_HANDLE)
17756  {
17757  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17758  }
17759  if(allocation != VK_NULL_HANDLE)
17760  {
17761  allocator->FreeMemory(
17762  1, // allocationCount
17763  &allocation);
17764  }
17765 }
17766 
17767 #endif // #ifdef VMA_IMPLEMENTATION
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1982
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1940
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1866
VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1977
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1938
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1843
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2003
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2579
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3128
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2029
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:1927
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2180
vmaInvalidateAllocation
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2325
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2651
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2408
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1875
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2657
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2388
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1969
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1862
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2375
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2009
VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1975
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1943
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2794
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2148
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2482
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2154
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1942
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1769
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2419
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2205
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1954
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1997
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2349
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3114
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2670
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2412
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1900
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3208
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2403
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2263
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2144
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2607
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2619
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3146
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2660
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1931
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1929
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3113
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2799
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2331
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1939
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2600
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2475
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2162
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2338
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:1915
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3125
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1947
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3177
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3212
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2654
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1941
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1979
VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2295
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3162
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2317
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2073
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1946
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1948
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3191
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1849
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2158
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2624
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2088
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2141
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3216
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2285
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2152
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2150
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2012
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1870
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2632
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2604
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1864
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaPool
Represents custom memory pool.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2311
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2382
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2610
VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2327
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2146
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1945
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3122
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3203
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2184
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2598
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2456
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2429
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2153
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2015
VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2433
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2006
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2054
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2216
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1937
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2813
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2000
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3116
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1952
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2154
VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2302
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2426
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2423
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2451
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2153
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2804
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1985
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1944
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2818
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2562
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2161
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2066
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2673
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2440
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2160
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2079
vmaFlushAllocation
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2268
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3182
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1951
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2646
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1953
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3167
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2785
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2362
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1950
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2663
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2544
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3214
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2154
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2393
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2445
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1949
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2442
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2461
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3143
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3218
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2469
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3220
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1993
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2153
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2780
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2590
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2195
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2438
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3137
VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2594
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3172
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3198
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2399