Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1647 /*
1648 Define this macro to 0/1 to disable/enable support for recording functionality,
1649 available through VmaAllocatorCreateInfo::pRecordSettings.
1650 */
1651 #ifndef VMA_RECORDING_ENABLED
1652  #ifdef _WIN32
1653  #define VMA_RECORDING_ENABLED 1
1654  #else
1655  #define VMA_RECORDING_ENABLED 0
1656  #endif
1657 #endif
1658 
1659 #ifndef NOMINMAX
1660  #define NOMINMAX // For windows.h
1661 #endif
1662 
1663 #ifndef VULKAN_H_
1664  #include <vulkan/vulkan.h>
1665 #endif
1666 
1667 #if VMA_RECORDING_ENABLED
1668  #include <windows.h>
1669 #endif
1670 
1671 #if !defined(VMA_DEDICATED_ALLOCATION)
1672  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1673  #define VMA_DEDICATED_ALLOCATION 1
1674  #else
1675  #define VMA_DEDICATED_ALLOCATION 0
1676  #endif
1677 #endif
1678 
1688 VK_DEFINE_HANDLE(VmaAllocator)
1689 
1690 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1692  VmaAllocator allocator,
1693  uint32_t memoryType,
1694  VkDeviceMemory memory,
1695  VkDeviceSize size);
1697 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1698  VmaAllocator allocator,
1699  uint32_t memoryType,
1700  VkDeviceMemory memory,
1701  VkDeviceSize size);
1702 
1716 
1746 
1749 typedef VkFlags VmaAllocatorCreateFlags;
1750 
1755 typedef struct VmaVulkanFunctions {
1756  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1757  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1758  PFN_vkAllocateMemory vkAllocateMemory;
1759  PFN_vkFreeMemory vkFreeMemory;
1760  PFN_vkMapMemory vkMapMemory;
1761  PFN_vkUnmapMemory vkUnmapMemory;
1762  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1763  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1764  PFN_vkBindBufferMemory vkBindBufferMemory;
1765  PFN_vkBindImageMemory vkBindImageMemory;
1766  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1767  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1768  PFN_vkCreateBuffer vkCreateBuffer;
1769  PFN_vkDestroyBuffer vkDestroyBuffer;
1770  PFN_vkCreateImage vkCreateImage;
1771  PFN_vkDestroyImage vkDestroyImage;
1772  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1773 #if VMA_DEDICATED_ALLOCATION
1774  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1775  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1776 #endif
1778 
1780 typedef enum VmaRecordFlagBits {
1787 
1790 typedef VkFlags VmaRecordFlags;
1791 
1793 typedef struct VmaRecordSettings
1794 {
1804  const char* pFilePath;
1806 
1809 {
1813 
1814  VkPhysicalDevice physicalDevice;
1816 
1817  VkDevice device;
1819 
1822 
1823  const VkAllocationCallbacks* pAllocationCallbacks;
1825 
1865  const VkDeviceSize* pHeapSizeLimit;
1886 
1888 VkResult vmaCreateAllocator(
1889  const VmaAllocatorCreateInfo* pCreateInfo,
1890  VmaAllocator* pAllocator);
1891 
1893 void vmaDestroyAllocator(
1894  VmaAllocator allocator);
1895 
1901  VmaAllocator allocator,
1902  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1903 
1909  VmaAllocator allocator,
1910  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1911 
1919  VmaAllocator allocator,
1920  uint32_t memoryTypeIndex,
1921  VkMemoryPropertyFlags* pFlags);
1922 
1932  VmaAllocator allocator,
1933  uint32_t frameIndex);
1934 
1937 typedef struct VmaStatInfo
1938 {
1940  uint32_t blockCount;
1946  VkDeviceSize usedBytes;
1948  VkDeviceSize unusedBytes;
1951 } VmaStatInfo;
1952 
1954 typedef struct VmaStats
1955 {
1956  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1957  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1959 } VmaStats;
1960 
1962 void vmaCalculateStats(
1963  VmaAllocator allocator,
1964  VmaStats* pStats);
1965 
1966 #ifndef VMA_STATS_STRING_ENABLED
1967 #define VMA_STATS_STRING_ENABLED 1
1968 #endif
1969 
1970 #if VMA_STATS_STRING_ENABLED
1971 
1973 
1975 void vmaBuildStatsString(
1976  VmaAllocator allocator,
1977  char** ppStatsString,
1978  VkBool32 detailedMap);
1979 
1980 void vmaFreeStatsString(
1981  VmaAllocator allocator,
1982  char* pStatsString);
1983 
1984 #endif // #if VMA_STATS_STRING_ENABLED
1985 
1994 VK_DEFINE_HANDLE(VmaPool)
1995 
1996 typedef enum VmaMemoryUsage
1997 {
2046 } VmaMemoryUsage;
2047 
2057 
2118 
2134 
2144 
2151 
2155 
2157 {
2170  VkMemoryPropertyFlags requiredFlags;
2175  VkMemoryPropertyFlags preferredFlags;
2183  uint32_t memoryTypeBits;
2196  void* pUserData;
2198 
2215 VkResult vmaFindMemoryTypeIndex(
2216  VmaAllocator allocator,
2217  uint32_t memoryTypeBits,
2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2219  uint32_t* pMemoryTypeIndex);
2220 
2234  VmaAllocator allocator,
2235  const VkBufferCreateInfo* pBufferCreateInfo,
2236  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2237  uint32_t* pMemoryTypeIndex);
2238 
2252  VmaAllocator allocator,
2253  const VkImageCreateInfo* pImageCreateInfo,
2254  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2255  uint32_t* pMemoryTypeIndex);
2256 
2277 
2294 
2305 
2311 
2314 typedef VkFlags VmaPoolCreateFlags;
2315 
2318 typedef struct VmaPoolCreateInfo {
2333  VkDeviceSize blockSize;
2362 
2365 typedef struct VmaPoolStats {
2368  VkDeviceSize size;
2371  VkDeviceSize unusedSize;
2384  VkDeviceSize unusedRangeSizeMax;
2387  size_t blockCount;
2388 } VmaPoolStats;
2389 
2396 VkResult vmaCreatePool(
2397  VmaAllocator allocator,
2398  const VmaPoolCreateInfo* pCreateInfo,
2399  VmaPool* pPool);
2400 
2403 void vmaDestroyPool(
2404  VmaAllocator allocator,
2405  VmaPool pool);
2406 
2413 void vmaGetPoolStats(
2414  VmaAllocator allocator,
2415  VmaPool pool,
2416  VmaPoolStats* pPoolStats);
2417 
2425  VmaAllocator allocator,
2426  VmaPool pool,
2427  size_t* pLostAllocationCount);
2428 
2443 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2444 
2469 VK_DEFINE_HANDLE(VmaAllocation)
2470 
2471 
2473 typedef struct VmaAllocationInfo {
2478  uint32_t memoryType;
2487  VkDeviceMemory deviceMemory;
2492  VkDeviceSize offset;
2497  VkDeviceSize size;
2511  void* pUserData;
2513 
2524 VkResult vmaAllocateMemory(
2525  VmaAllocator allocator,
2526  const VkMemoryRequirements* pVkMemoryRequirements,
2527  const VmaAllocationCreateInfo* pCreateInfo,
2528  VmaAllocation* pAllocation,
2529  VmaAllocationInfo* pAllocationInfo);
2530 
2550 VkResult vmaAllocateMemoryPages(
2551  VmaAllocator allocator,
2552  const VkMemoryRequirements* pVkMemoryRequirements,
2553  const VmaAllocationCreateInfo* pCreateInfo,
2554  size_t allocationCount,
2555  VmaAllocation* pAllocations,
2556  VmaAllocationInfo* pAllocationInfo);
2557 
2565  VmaAllocator allocator,
2566  VkBuffer buffer,
2567  const VmaAllocationCreateInfo* pCreateInfo,
2568  VmaAllocation* pAllocation,
2569  VmaAllocationInfo* pAllocationInfo);
2570 
2572 VkResult vmaAllocateMemoryForImage(
2573  VmaAllocator allocator,
2574  VkImage image,
2575  const VmaAllocationCreateInfo* pCreateInfo,
2576  VmaAllocation* pAllocation,
2577  VmaAllocationInfo* pAllocationInfo);
2578 
2583 void vmaFreeMemory(
2584  VmaAllocator allocator,
2585  VmaAllocation allocation);
2586 
2597 void vmaFreeMemoryPages(
2598  VmaAllocator allocator,
2599  size_t allocationCount,
2600  VmaAllocation* pAllocations);
2601 
2622 VkResult vmaResizeAllocation(
2623  VmaAllocator allocator,
2624  VmaAllocation allocation,
2625  VkDeviceSize newSize);
2626 
2644  VmaAllocator allocator,
2645  VmaAllocation allocation,
2646  VmaAllocationInfo* pAllocationInfo);
2647 
2662 VkBool32 vmaTouchAllocation(
2663  VmaAllocator allocator,
2664  VmaAllocation allocation);
2665 
2680  VmaAllocator allocator,
2681  VmaAllocation allocation,
2682  void* pUserData);
2683 
2695  VmaAllocator allocator,
2696  VmaAllocation* pAllocation);
2697 
2732 VkResult vmaMapMemory(
2733  VmaAllocator allocator,
2734  VmaAllocation allocation,
2735  void** ppData);
2736 
2741 void vmaUnmapMemory(
2742  VmaAllocator allocator,
2743  VmaAllocation allocation);
2744 
2761 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2762 
2779 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2780 
2797 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2798 
2805 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2806 
2807 typedef enum VmaDefragmentationFlagBits {
2811 typedef VkFlags VmaDefragmentationFlags;
2812 
2817 typedef struct VmaDefragmentationInfo2 {
2841  uint32_t poolCount;
2862  VkDeviceSize maxCpuBytesToMove;
2872  VkDeviceSize maxGpuBytesToMove;
2886  VkCommandBuffer commandBuffer;
2888 
2893 typedef struct VmaDefragmentationInfo {
2898  VkDeviceSize maxBytesToMove;
2905 
2907 typedef struct VmaDefragmentationStats {
2909  VkDeviceSize bytesMoved;
2911  VkDeviceSize bytesFreed;
2917 
2947 VkResult vmaDefragmentationBegin(
2948  VmaAllocator allocator,
2949  const VmaDefragmentationInfo2* pInfo,
2950  VmaDefragmentationStats* pStats,
2951  VmaDefragmentationContext *pContext);
2952 
2958 VkResult vmaDefragmentationEnd(
2959  VmaAllocator allocator,
2960  VmaDefragmentationContext context);
2961 
3002 VkResult vmaDefragment(
3003  VmaAllocator allocator,
3004  VmaAllocation* pAllocations,
3005  size_t allocationCount,
3006  VkBool32* pAllocationsChanged,
3007  const VmaDefragmentationInfo *pDefragmentationInfo,
3008  VmaDefragmentationStats* pDefragmentationStats);
3009 
3022 VkResult vmaBindBufferMemory(
3023  VmaAllocator allocator,
3024  VmaAllocation allocation,
3025  VkBuffer buffer);
3026 
3039 VkResult vmaBindImageMemory(
3040  VmaAllocator allocator,
3041  VmaAllocation allocation,
3042  VkImage image);
3043 
3070 VkResult vmaCreateBuffer(
3071  VmaAllocator allocator,
3072  const VkBufferCreateInfo* pBufferCreateInfo,
3073  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3074  VkBuffer* pBuffer,
3075  VmaAllocation* pAllocation,
3076  VmaAllocationInfo* pAllocationInfo);
3077 
3089 void vmaDestroyBuffer(
3090  VmaAllocator allocator,
3091  VkBuffer buffer,
3092  VmaAllocation allocation);
3093 
3095 VkResult vmaCreateImage(
3096  VmaAllocator allocator,
3097  const VkImageCreateInfo* pImageCreateInfo,
3098  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3099  VkImage* pImage,
3100  VmaAllocation* pAllocation,
3101  VmaAllocationInfo* pAllocationInfo);
3102 
3114 void vmaDestroyImage(
3115  VmaAllocator allocator,
3116  VkImage image,
3117  VmaAllocation allocation);
3118 
3119 #ifdef __cplusplus
3120 }
3121 #endif
3122 
3123 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3124 
3125 // For Visual Studio IntelliSense.
3126 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3127 #define VMA_IMPLEMENTATION
3128 #endif
3129 
3130 #ifdef VMA_IMPLEMENTATION
3131 #undef VMA_IMPLEMENTATION
3132 
3133 #include <cstdint>
3134 #include <cstdlib>
3135 #include <cstring>
3136 
3137 /*******************************************************************************
3138 CONFIGURATION SECTION
3139 
3140 Define some of these macros before each #include of this header or change them
3141 here if you need other then default behavior depending on your environment.
3142 */
3143 
3144 /*
3145 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3146 internally, like:
3147 
3148  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3149 
3150 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3151 VmaAllocatorCreateInfo::pVulkanFunctions.
3152 */
3153 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3154 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3155 #endif
3156 
3157 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3158 //#define VMA_USE_STL_CONTAINERS 1
3159 
3160 /* Set this macro to 1 to make the library including and using STL containers:
3161 std::pair, std::vector, std::list, std::unordered_map.
3162 
3163 Set it to 0 or undefined to make the library using its own implementation of
3164 the containers.
3165 */
3166 #if VMA_USE_STL_CONTAINERS
3167  #define VMA_USE_STL_VECTOR 1
3168  #define VMA_USE_STL_UNORDERED_MAP 1
3169  #define VMA_USE_STL_LIST 1
3170 #endif
3171 
3172 #ifndef VMA_USE_STL_SHARED_MUTEX
3173  // Compiler conforms to C++17.
3174  #if __cplusplus >= 201703L
3175  #define VMA_USE_STL_SHARED_MUTEX 1
3176  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3177  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3178  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3179  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3180  #define VMA_USE_STL_SHARED_MUTEX 1
3181  #else
3182  #define VMA_USE_STL_SHARED_MUTEX 0
3183  #endif
3184 #endif
3185 
3186 /*
3187 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3188 Library has its own container implementation.
3189 */
3190 #if VMA_USE_STL_VECTOR
3191  #include <vector>
3192 #endif
3193 
3194 #if VMA_USE_STL_UNORDERED_MAP
3195  #include <unordered_map>
3196 #endif
3197 
3198 #if VMA_USE_STL_LIST
3199  #include <list>
3200 #endif
3201 
3202 /*
3203 Following headers are used in this CONFIGURATION section only, so feel free to
3204 remove them if not needed.
3205 */
3206 #include <cassert> // for assert
3207 #include <algorithm> // for min, max
3208 #include <mutex>
3209 
3210 #ifndef VMA_NULL
3211  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3212  #define VMA_NULL nullptr
3213 #endif
3214 
3215 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3216 #include <cstdlib>
3217 void *aligned_alloc(size_t alignment, size_t size)
3218 {
3219  // alignment must be >= sizeof(void*)
3220  if(alignment < sizeof(void*))
3221  {
3222  alignment = sizeof(void*);
3223  }
3224 
3225  return memalign(alignment, size);
3226 }
3227 #elif defined(__APPLE__) || defined(__ANDROID__)
3228 #include <cstdlib>
3229 void *aligned_alloc(size_t alignment, size_t size)
3230 {
3231  // alignment must be >= sizeof(void*)
3232  if(alignment < sizeof(void*))
3233  {
3234  alignment = sizeof(void*);
3235  }
3236 
3237  void *pointer;
3238  if(posix_memalign(&pointer, alignment, size) == 0)
3239  return pointer;
3240  return VMA_NULL;
3241 }
3242 #endif
3243 
3244 // If your compiler is not compatible with C++11 and definition of
3245 // aligned_alloc() function is missing, uncommeting following line may help:
3246 
3247 //#include <malloc.h>
3248 
3249 // Normal assert to check for programmer's errors, especially in Debug configuration.
3250 #ifndef VMA_ASSERT
3251  #ifdef _DEBUG
3252  #define VMA_ASSERT(expr) assert(expr)
3253  #else
3254  #define VMA_ASSERT(expr)
3255  #endif
3256 #endif
3257 
3258 // Assert that will be called very often, like inside data structures e.g. operator[].
3259 // Making it non-empty can make program slow.
3260 #ifndef VMA_HEAVY_ASSERT
3261  #ifdef _DEBUG
3262  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3263  #else
3264  #define VMA_HEAVY_ASSERT(expr)
3265  #endif
3266 #endif
3267 
3268 #ifndef VMA_ALIGN_OF
3269  #define VMA_ALIGN_OF(type) (__alignof(type))
3270 #endif
3271 
3272 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3273  #if defined(_WIN32)
3274  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3275  #else
3276  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3277  #endif
3278 #endif
3279 
3280 #ifndef VMA_SYSTEM_FREE
3281  #if defined(_WIN32)
3282  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3283  #else
3284  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3285  #endif
3286 #endif
3287 
3288 #ifndef VMA_MIN
3289  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3290 #endif
3291 
3292 #ifndef VMA_MAX
3293  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3294 #endif
3295 
3296 #ifndef VMA_SWAP
3297  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3298 #endif
3299 
3300 #ifndef VMA_SORT
3301  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3302 #endif
3303 
3304 #ifndef VMA_DEBUG_LOG
3305  #define VMA_DEBUG_LOG(format, ...)
3306  /*
3307  #define VMA_DEBUG_LOG(format, ...) do { \
3308  printf(format, __VA_ARGS__); \
3309  printf("\n"); \
3310  } while(false)
3311  */
3312 #endif
3313 
3314 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3315 #if VMA_STATS_STRING_ENABLED
3316  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3317  {
3318  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3319  }
3320  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3321  {
3322  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3323  }
3324  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3325  {
3326  snprintf(outStr, strLen, "%p", ptr);
3327  }
3328 #endif
3329 
3330 #ifndef VMA_MUTEX
3331  class VmaMutex
3332  {
3333  public:
3334  void Lock() { m_Mutex.lock(); }
3335  void Unlock() { m_Mutex.unlock(); }
3336  private:
3337  std::mutex m_Mutex;
3338  };
3339  #define VMA_MUTEX VmaMutex
3340 #endif
3341 
3342 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3343 #ifndef VMA_RW_MUTEX
3344  #if VMA_USE_STL_SHARED_MUTEX
3345  // Use std::shared_mutex from C++17.
3346  #include <shared_mutex>
3347  class VmaRWMutex
3348  {
3349  public:
3350  void LockRead() { m_Mutex.lock_shared(); }
3351  void UnlockRead() { m_Mutex.unlock_shared(); }
3352  void LockWrite() { m_Mutex.lock(); }
3353  void UnlockWrite() { m_Mutex.unlock(); }
3354  private:
3355  std::shared_mutex m_Mutex;
3356  };
3357  #define VMA_RW_MUTEX VmaRWMutex
3358  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3359  // Use SRWLOCK from WinAPI.
3360  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3361  class VmaRWMutex
3362  {
3363  public:
3364  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3365  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3366  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3367  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3368  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3369  private:
3370  SRWLOCK m_Lock;
3371  };
3372  #define VMA_RW_MUTEX VmaRWMutex
3373  #else
3374  // Less efficient fallback: Use normal mutex.
3375  class VmaRWMutex
3376  {
3377  public:
3378  void LockRead() { m_Mutex.Lock(); }
3379  void UnlockRead() { m_Mutex.Unlock(); }
3380  void LockWrite() { m_Mutex.Lock(); }
3381  void UnlockWrite() { m_Mutex.Unlock(); }
3382  private:
3383  VMA_MUTEX m_Mutex;
3384  };
3385  #define VMA_RW_MUTEX VmaRWMutex
3386  #endif // #if VMA_USE_STL_SHARED_MUTEX
3387 #endif // #ifndef VMA_RW_MUTEX
3388 
3389 /*
3390 If providing your own implementation, you need to implement a subset of std::atomic:
3391 
3392 - Constructor(uint32_t desired)
3393 - uint32_t load() const
3394 - void store(uint32_t desired)
3395 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3396 */
3397 #ifndef VMA_ATOMIC_UINT32
3398  #include <atomic>
3399  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3400 #endif
3401 
3402 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3403 
3407  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3408 #endif
3409 
3410 #ifndef VMA_DEBUG_ALIGNMENT
3411 
3415  #define VMA_DEBUG_ALIGNMENT (1)
3416 #endif
3417 
3418 #ifndef VMA_DEBUG_MARGIN
3419 
3423  #define VMA_DEBUG_MARGIN (0)
3424 #endif
3425 
3426 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3427 
3431  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3432 #endif
3433 
3434 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3435 
3440  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3441 #endif
3442 
3443 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3444 
3448  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3449 #endif
3450 
3451 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3452 
3456  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3457 #endif
3458 
3459 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3460  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3462 #endif
3463 
3464 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3465  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3467 #endif
3468 
3469 #ifndef VMA_CLASS_NO_COPY
3470  #define VMA_CLASS_NO_COPY(className) \
3471  private: \
3472  className(const className&) = delete; \
3473  className& operator=(const className&) = delete;
3474 #endif
3475 
3476 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3477 
3478 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3479 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3480 
3481 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3482 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3483 
3484 /*******************************************************************************
3485 END OF CONFIGURATION
3486 */
3487 
3488 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3489 
3490 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3491  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3492 
3493 // Returns number of bits set to 1 in (v).
3494 static inline uint32_t VmaCountBitsSet(uint32_t v)
3495 {
3496  uint32_t c = v - ((v >> 1) & 0x55555555);
3497  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3498  c = ((c >> 4) + c) & 0x0F0F0F0F;
3499  c = ((c >> 8) + c) & 0x00FF00FF;
3500  c = ((c >> 16) + c) & 0x0000FFFF;
3501  return c;
3502 }
3503 
3504 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3505 // Use types like uint32_t, uint64_t as T.
3506 template <typename T>
3507 static inline T VmaAlignUp(T val, T align)
3508 {
3509  return (val + align - 1) / align * align;
3510 }
3511 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3512 // Use types like uint32_t, uint64_t as T.
3513 template <typename T>
3514 static inline T VmaAlignDown(T val, T align)
3515 {
3516  return val / align * align;
3517 }
3518 
3519 // Division with mathematical rounding to nearest number.
3520 template <typename T>
3521 static inline T VmaRoundDiv(T x, T y)
3522 {
3523  return (x + (y / (T)2)) / y;
3524 }
3525 
3526 /*
3527 Returns true if given number is a power of two.
3528 T must be unsigned integer number or signed integer but always nonnegative.
3529 For 0 returns true.
3530 */
3531 template <typename T>
3532 inline bool VmaIsPow2(T x)
3533 {
3534  return (x & (x-1)) == 0;
3535 }
3536 
3537 // Returns smallest power of 2 greater or equal to v.
3538 static inline uint32_t VmaNextPow2(uint32_t v)
3539 {
3540  v--;
3541  v |= v >> 1;
3542  v |= v >> 2;
3543  v |= v >> 4;
3544  v |= v >> 8;
3545  v |= v >> 16;
3546  v++;
3547  return v;
3548 }
3549 static inline uint64_t VmaNextPow2(uint64_t v)
3550 {
3551  v--;
3552  v |= v >> 1;
3553  v |= v >> 2;
3554  v |= v >> 4;
3555  v |= v >> 8;
3556  v |= v >> 16;
3557  v |= v >> 32;
3558  v++;
3559  return v;
3560 }
3561 
3562 // Returns largest power of 2 less or equal to v.
3563 static inline uint32_t VmaPrevPow2(uint32_t v)
3564 {
3565  v |= v >> 1;
3566  v |= v >> 2;
3567  v |= v >> 4;
3568  v |= v >> 8;
3569  v |= v >> 16;
3570  v = v ^ (v >> 1);
3571  return v;
3572 }
3573 static inline uint64_t VmaPrevPow2(uint64_t v)
3574 {
3575  v |= v >> 1;
3576  v |= v >> 2;
3577  v |= v >> 4;
3578  v |= v >> 8;
3579  v |= v >> 16;
3580  v |= v >> 32;
3581  v = v ^ (v >> 1);
3582  return v;
3583 }
3584 
3585 static inline bool VmaStrIsEmpty(const char* pStr)
3586 {
3587  return pStr == VMA_NULL || *pStr == '\0';
3588 }
3589 
3590 #if VMA_STATS_STRING_ENABLED
3591 
3592 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3593 {
3594  switch(algorithm)
3595  {
3597  return "Linear";
3599  return "Buddy";
3600  case 0:
3601  return "Default";
3602  default:
3603  VMA_ASSERT(0);
3604  return "";
3605  }
3606 }
3607 
3608 #endif // #if VMA_STATS_STRING_ENABLED
3609 
3610 #ifndef VMA_SORT
3611 
3612 template<typename Iterator, typename Compare>
3613 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3614 {
3615  Iterator centerValue = end; --centerValue;
3616  Iterator insertIndex = beg;
3617  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3618  {
3619  if(cmp(*memTypeIndex, *centerValue))
3620  {
3621  if(insertIndex != memTypeIndex)
3622  {
3623  VMA_SWAP(*memTypeIndex, *insertIndex);
3624  }
3625  ++insertIndex;
3626  }
3627  }
3628  if(insertIndex != centerValue)
3629  {
3630  VMA_SWAP(*insertIndex, *centerValue);
3631  }
3632  return insertIndex;
3633 }
3634 
3635 template<typename Iterator, typename Compare>
3636 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3637 {
3638  if(beg < end)
3639  {
3640  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3641  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3642  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3643  }
3644 }
3645 
3646 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3647 
3648 #endif // #ifndef VMA_SORT
3649 
3650 /*
3651 Returns true if two memory blocks occupy overlapping pages.
3652 ResourceA must be in less memory offset than ResourceB.
3653 
3654 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3655 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3656 */
3657 static inline bool VmaBlocksOnSamePage(
3658  VkDeviceSize resourceAOffset,
3659  VkDeviceSize resourceASize,
3660  VkDeviceSize resourceBOffset,
3661  VkDeviceSize pageSize)
3662 {
3663  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3664  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3665  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3666  VkDeviceSize resourceBStart = resourceBOffset;
3667  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3668  return resourceAEndPage == resourceBStartPage;
3669 }
3670 
3671 enum VmaSuballocationType
3672 {
3673  VMA_SUBALLOCATION_TYPE_FREE = 0,
3674  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3675  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3676  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3677  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3678  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3679  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3680 };
3681 
3682 /*
3683 Returns true if given suballocation types could conflict and must respect
3684 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3685 or linear image and another one is optimal image. If type is unknown, behave
3686 conservatively.
3687 */
3688 static inline bool VmaIsBufferImageGranularityConflict(
3689  VmaSuballocationType suballocType1,
3690  VmaSuballocationType suballocType2)
3691 {
3692  if(suballocType1 > suballocType2)
3693  {
3694  VMA_SWAP(suballocType1, suballocType2);
3695  }
3696 
3697  switch(suballocType1)
3698  {
3699  case VMA_SUBALLOCATION_TYPE_FREE:
3700  return false;
3701  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3702  return true;
3703  case VMA_SUBALLOCATION_TYPE_BUFFER:
3704  return
3705  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3706  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3707  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3708  return
3709  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3710  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3711  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3712  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3713  return
3714  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3715  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3716  return false;
3717  default:
3718  VMA_ASSERT(0);
3719  return true;
3720  }
3721 }
3722 
3723 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3724 {
3725  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3726  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3727  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3728  {
3729  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3730  }
3731 }
3732 
3733 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3734 {
3735  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3736  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3737  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3738  {
3739  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3740  {
3741  return false;
3742  }
3743  }
3744  return true;
3745 }
3746 
3747 /*
3748 Fills structure with parameters of an example buffer to be used for transfers
3749 during GPU memory defragmentation.
3750 */
3751 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3752 {
3753  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3754  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
3755  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3756  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3757 }
3758 
3759 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3760 struct VmaMutexLock
3761 {
3762  VMA_CLASS_NO_COPY(VmaMutexLock)
3763 public:
3764  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3765  m_pMutex(useMutex ? &mutex : VMA_NULL)
3766  { if(m_pMutex) { m_pMutex->Lock(); } }
3767  ~VmaMutexLock()
3768  { if(m_pMutex) { m_pMutex->Unlock(); } }
3769 private:
3770  VMA_MUTEX* m_pMutex;
3771 };
3772 
3773 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3774 struct VmaMutexLockRead
3775 {
3776  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3777 public:
3778  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3779  m_pMutex(useMutex ? &mutex : VMA_NULL)
3780  { if(m_pMutex) { m_pMutex->LockRead(); } }
3781  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3782 private:
3783  VMA_RW_MUTEX* m_pMutex;
3784 };
3785 
3786 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3787 struct VmaMutexLockWrite
3788 {
3789  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3790 public:
3791  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3792  m_pMutex(useMutex ? &mutex : VMA_NULL)
3793  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3794  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3795 private:
3796  VMA_RW_MUTEX* m_pMutex;
3797 };
3798 
3799 #if VMA_DEBUG_GLOBAL_MUTEX
3800  static VMA_MUTEX gDebugGlobalMutex;
3801  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3802 #else
3803  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3804 #endif
3805 
3806 // Minimum size of a free suballocation to register it in the free suballocation collection.
3807 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3808 
3809 /*
3810 Performs binary search and returns iterator to first element that is greater or
3811 equal to (key), according to comparison (cmp).
3812 
3813 Cmp should return true if first argument is less than second argument.
3814 
3815 Returned value is the found element, if present in the collection or place where
3816 new element with value (key) should be inserted.
3817 */
3818 template <typename CmpLess, typename IterT, typename KeyT>
3819 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3820 {
3821  size_t down = 0, up = (end - beg);
3822  while(down < up)
3823  {
3824  const size_t mid = (down + up) / 2;
3825  if(cmp(*(beg+mid), key))
3826  {
3827  down = mid + 1;
3828  }
3829  else
3830  {
3831  up = mid;
3832  }
3833  }
3834  return beg + down;
3835 }
3836 
3837 /*
3838 Returns true if all pointers in the array are not-null and unique.
3839 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3840 T must be pointer type, e.g. VmaAllocation, VmaPool.
3841 */
3842 template<typename T>
3843 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3844 {
3845  for(uint32_t i = 0; i < count; ++i)
3846  {
3847  const T iPtr = arr[i];
3848  if(iPtr == VMA_NULL)
3849  {
3850  return false;
3851  }
3852  for(uint32_t j = i + 1; j < count; ++j)
3853  {
3854  if(iPtr == arr[j])
3855  {
3856  return false;
3857  }
3858  }
3859  }
3860  return true;
3861 }
3862 
3864 // Memory allocation
3865 
3866 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3867 {
3868  if((pAllocationCallbacks != VMA_NULL) &&
3869  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3870  {
3871  return (*pAllocationCallbacks->pfnAllocation)(
3872  pAllocationCallbacks->pUserData,
3873  size,
3874  alignment,
3875  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3876  }
3877  else
3878  {
3879  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3880  }
3881 }
3882 
3883 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3884 {
3885  if((pAllocationCallbacks != VMA_NULL) &&
3886  (pAllocationCallbacks->pfnFree != VMA_NULL))
3887  {
3888  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3889  }
3890  else
3891  {
3892  VMA_SYSTEM_FREE(ptr);
3893  }
3894 }
3895 
3896 template<typename T>
3897 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3898 {
3899  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3900 }
3901 
3902 template<typename T>
3903 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3904 {
3905  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3906 }
3907 
3908 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3909 
3910 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3911 
3912 template<typename T>
3913 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3914 {
3915  ptr->~T();
3916  VmaFree(pAllocationCallbacks, ptr);
3917 }
3918 
3919 template<typename T>
3920 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3921 {
3922  if(ptr != VMA_NULL)
3923  {
3924  for(size_t i = count; i--; )
3925  {
3926  ptr[i].~T();
3927  }
3928  VmaFree(pAllocationCallbacks, ptr);
3929  }
3930 }
3931 
3932 // STL-compatible allocator.
3933 template<typename T>
3934 class VmaStlAllocator
3935 {
3936 public:
3937  const VkAllocationCallbacks* const m_pCallbacks;
3938  typedef T value_type;
3939 
3940  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3941  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3942 
3943  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3944  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3945 
3946  template<typename U>
3947  bool operator==(const VmaStlAllocator<U>& rhs) const
3948  {
3949  return m_pCallbacks == rhs.m_pCallbacks;
3950  }
3951  template<typename U>
3952  bool operator!=(const VmaStlAllocator<U>& rhs) const
3953  {
3954  return m_pCallbacks != rhs.m_pCallbacks;
3955  }
3956 
3957  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3958 };
3959 
3960 #if VMA_USE_STL_VECTOR
3961 
3962 #define VmaVector std::vector
3963 
3964 template<typename T, typename allocatorT>
3965 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3966 {
3967  vec.insert(vec.begin() + index, item);
3968 }
3969 
3970 template<typename T, typename allocatorT>
3971 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3972 {
3973  vec.erase(vec.begin() + index);
3974 }
3975 
3976 #else // #if VMA_USE_STL_VECTOR
3977 
3978 /* Class with interface compatible with subset of std::vector.
3979 T must be POD because constructors and destructors are not called and memcpy is
3980 used for these objects. */
3981 template<typename T, typename AllocatorT>
3982 class VmaVector
3983 {
3984 public:
3985  typedef T value_type;
3986 
3987  VmaVector(const AllocatorT& allocator) :
3988  m_Allocator(allocator),
3989  m_pArray(VMA_NULL),
3990  m_Count(0),
3991  m_Capacity(0)
3992  {
3993  }
3994 
3995  VmaVector(size_t count, const AllocatorT& allocator) :
3996  m_Allocator(allocator),
3997  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3998  m_Count(count),
3999  m_Capacity(count)
4000  {
4001  }
4002 
4003  VmaVector(const VmaVector<T, AllocatorT>& src) :
4004  m_Allocator(src.m_Allocator),
4005  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4006  m_Count(src.m_Count),
4007  m_Capacity(src.m_Count)
4008  {
4009  if(m_Count != 0)
4010  {
4011  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4012  }
4013  }
4014 
4015  ~VmaVector()
4016  {
4017  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4018  }
4019 
4020  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4021  {
4022  if(&rhs != this)
4023  {
4024  resize(rhs.m_Count);
4025  if(m_Count != 0)
4026  {
4027  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4028  }
4029  }
4030  return *this;
4031  }
4032 
4033  bool empty() const { return m_Count == 0; }
4034  size_t size() const { return m_Count; }
4035  T* data() { return m_pArray; }
4036  const T* data() const { return m_pArray; }
4037 
4038  T& operator[](size_t index)
4039  {
4040  VMA_HEAVY_ASSERT(index < m_Count);
4041  return m_pArray[index];
4042  }
4043  const T& operator[](size_t index) const
4044  {
4045  VMA_HEAVY_ASSERT(index < m_Count);
4046  return m_pArray[index];
4047  }
4048 
4049  T& front()
4050  {
4051  VMA_HEAVY_ASSERT(m_Count > 0);
4052  return m_pArray[0];
4053  }
4054  const T& front() const
4055  {
4056  VMA_HEAVY_ASSERT(m_Count > 0);
4057  return m_pArray[0];
4058  }
4059  T& back()
4060  {
4061  VMA_HEAVY_ASSERT(m_Count > 0);
4062  return m_pArray[m_Count - 1];
4063  }
4064  const T& back() const
4065  {
4066  VMA_HEAVY_ASSERT(m_Count > 0);
4067  return m_pArray[m_Count - 1];
4068  }
4069 
4070  void reserve(size_t newCapacity, bool freeMemory = false)
4071  {
4072  newCapacity = VMA_MAX(newCapacity, m_Count);
4073 
4074  if((newCapacity < m_Capacity) && !freeMemory)
4075  {
4076  newCapacity = m_Capacity;
4077  }
4078 
4079  if(newCapacity != m_Capacity)
4080  {
4081  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4082  if(m_Count != 0)
4083  {
4084  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4085  }
4086  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4087  m_Capacity = newCapacity;
4088  m_pArray = newArray;
4089  }
4090  }
4091 
4092  void resize(size_t newCount, bool freeMemory = false)
4093  {
4094  size_t newCapacity = m_Capacity;
4095  if(newCount > m_Capacity)
4096  {
4097  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4098  }
4099  else if(freeMemory)
4100  {
4101  newCapacity = newCount;
4102  }
4103 
4104  if(newCapacity != m_Capacity)
4105  {
4106  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4107  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4108  if(elementsToCopy != 0)
4109  {
4110  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4111  }
4112  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4113  m_Capacity = newCapacity;
4114  m_pArray = newArray;
4115  }
4116 
4117  m_Count = newCount;
4118  }
4119 
4120  void clear(bool freeMemory = false)
4121  {
4122  resize(0, freeMemory);
4123  }
4124 
4125  void insert(size_t index, const T& src)
4126  {
4127  VMA_HEAVY_ASSERT(index <= m_Count);
4128  const size_t oldCount = size();
4129  resize(oldCount + 1);
4130  if(index < oldCount)
4131  {
4132  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4133  }
4134  m_pArray[index] = src;
4135  }
4136 
4137  void remove(size_t index)
4138  {
4139  VMA_HEAVY_ASSERT(index < m_Count);
4140  const size_t oldCount = size();
4141  if(index < oldCount - 1)
4142  {
4143  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4144  }
4145  resize(oldCount - 1);
4146  }
4147 
4148  void push_back(const T& src)
4149  {
4150  const size_t newIndex = size();
4151  resize(newIndex + 1);
4152  m_pArray[newIndex] = src;
4153  }
4154 
4155  void pop_back()
4156  {
4157  VMA_HEAVY_ASSERT(m_Count > 0);
4158  resize(size() - 1);
4159  }
4160 
4161  void push_front(const T& src)
4162  {
4163  insert(0, src);
4164  }
4165 
4166  void pop_front()
4167  {
4168  VMA_HEAVY_ASSERT(m_Count > 0);
4169  remove(0);
4170  }
4171 
4172  typedef T* iterator;
4173 
4174  iterator begin() { return m_pArray; }
4175  iterator end() { return m_pArray + m_Count; }
4176 
4177 private:
4178  AllocatorT m_Allocator;
4179  T* m_pArray;
4180  size_t m_Count;
4181  size_t m_Capacity;
4182 };
4183 
4184 template<typename T, typename allocatorT>
4185 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4186 {
4187  vec.insert(index, item);
4188 }
4189 
4190 template<typename T, typename allocatorT>
4191 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4192 {
4193  vec.remove(index);
4194 }
4195 
4196 #endif // #if VMA_USE_STL_VECTOR
4197 
4198 template<typename CmpLess, typename VectorT>
4199 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4200 {
4201  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4202  vector.data(),
4203  vector.data() + vector.size(),
4204  value,
4205  CmpLess()) - vector.data();
4206  VmaVectorInsert(vector, indexToInsert, value);
4207  return indexToInsert;
4208 }
4209 
4210 template<typename CmpLess, typename VectorT>
4211 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4212 {
4213  CmpLess comparator;
4214  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4215  vector.begin(),
4216  vector.end(),
4217  value,
4218  comparator);
4219  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4220  {
4221  size_t indexToRemove = it - vector.begin();
4222  VmaVectorRemove(vector, indexToRemove);
4223  return true;
4224  }
4225  return false;
4226 }
4227 
4228 template<typename CmpLess, typename IterT, typename KeyT>
4229 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4230 {
4231  CmpLess comparator;
4232  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4233  beg, end, value, comparator);
4234  if(it == end ||
4235  (!comparator(*it, value) && !comparator(value, *it)))
4236  {
4237  return it;
4238  }
4239  return end;
4240 }
4241 
4243 // class VmaPoolAllocator
4244 
4245 /*
4246 Allocator for objects of type T using a list of arrays (pools) to speed up
4247 allocation. Number of elements that can be allocated is not bounded because
4248 allocator can create multiple blocks.
4249 */
4250 template<typename T>
4251 class VmaPoolAllocator
4252 {
4253  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4254 public:
4255  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4256  ~VmaPoolAllocator();
4257  void Clear();
4258  T* Alloc();
4259  void Free(T* ptr);
4260 
4261 private:
4262  union Item
4263  {
4264  uint32_t NextFreeIndex;
4265  T Value;
4266  };
4267 
4268  struct ItemBlock
4269  {
4270  Item* pItems;
4271  uint32_t Capacity;
4272  uint32_t FirstFreeIndex;
4273  };
4274 
4275  const VkAllocationCallbacks* m_pAllocationCallbacks;
4276  const uint32_t m_FirstBlockCapacity;
4277  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4278 
4279  ItemBlock& CreateNewBlock();
4280 };
4281 
4282 template<typename T>
4283 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4284  m_pAllocationCallbacks(pAllocationCallbacks),
4285  m_FirstBlockCapacity(firstBlockCapacity),
4286  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4287 {
4288  VMA_ASSERT(m_FirstBlockCapacity > 1);
4289 }
4290 
4291 template<typename T>
4292 VmaPoolAllocator<T>::~VmaPoolAllocator()
4293 {
4294  Clear();
4295 }
4296 
4297 template<typename T>
4298 void VmaPoolAllocator<T>::Clear()
4299 {
4300  for(size_t i = m_ItemBlocks.size(); i--; )
4301  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4302  m_ItemBlocks.clear();
4303 }
4304 
4305 template<typename T>
4306 T* VmaPoolAllocator<T>::Alloc()
4307 {
4308  for(size_t i = m_ItemBlocks.size(); i--; )
4309  {
4310  ItemBlock& block = m_ItemBlocks[i];
4311  // This block has some free items: Use first one.
4312  if(block.FirstFreeIndex != UINT32_MAX)
4313  {
4314  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4315  block.FirstFreeIndex = pItem->NextFreeIndex;
4316  return &pItem->Value;
4317  }
4318  }
4319 
4320  // No block has free item: Create new one and use it.
4321  ItemBlock& newBlock = CreateNewBlock();
4322  Item* const pItem = &newBlock.pItems[0];
4323  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4324  return &pItem->Value;
4325 }
4326 
4327 template<typename T>
4328 void VmaPoolAllocator<T>::Free(T* ptr)
4329 {
4330  // Search all memory blocks to find ptr.
4331  for(size_t i = m_ItemBlocks.size(); i--; )
4332  {
4333  ItemBlock& block = m_ItemBlocks[i];
4334 
4335  // Casting to union.
4336  Item* pItemPtr;
4337  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4338 
4339  // Check if pItemPtr is in address range of this block.
4340  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4341  {
4342  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4343  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4344  block.FirstFreeIndex = index;
4345  return;
4346  }
4347  }
4348  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4349 }
4350 
4351 template<typename T>
4352 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4353 {
4354  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4355  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4356 
4357  const ItemBlock newBlock = {
4358  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4359  newBlockCapacity,
4360  0 };
4361 
4362  m_ItemBlocks.push_back(newBlock);
4363 
4364  // Setup singly-linked list of all free items in this block.
4365  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4366  newBlock.pItems[i].NextFreeIndex = i + 1;
4367  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4368  return m_ItemBlocks.back();
4369 }
4370 
4372 // class VmaRawList, VmaList
4373 
4374 #if VMA_USE_STL_LIST
4375 
4376 #define VmaList std::list
4377 
4378 #else // #if VMA_USE_STL_LIST
4379 
4380 template<typename T>
4381 struct VmaListItem
4382 {
4383  VmaListItem* pPrev;
4384  VmaListItem* pNext;
4385  T Value;
4386 };
4387 
4388 // Doubly linked list.
4389 template<typename T>
4390 class VmaRawList
4391 {
4392  VMA_CLASS_NO_COPY(VmaRawList)
4393 public:
4394  typedef VmaListItem<T> ItemType;
4395 
4396  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4397  ~VmaRawList();
4398  void Clear();
4399 
4400  size_t GetCount() const { return m_Count; }
4401  bool IsEmpty() const { return m_Count == 0; }
4402 
4403  ItemType* Front() { return m_pFront; }
4404  const ItemType* Front() const { return m_pFront; }
4405  ItemType* Back() { return m_pBack; }
4406  const ItemType* Back() const { return m_pBack; }
4407 
4408  ItemType* PushBack();
4409  ItemType* PushFront();
4410  ItemType* PushBack(const T& value);
4411  ItemType* PushFront(const T& value);
4412  void PopBack();
4413  void PopFront();
4414 
4415  // Item can be null - it means PushBack.
4416  ItemType* InsertBefore(ItemType* pItem);
4417  // Item can be null - it means PushFront.
4418  ItemType* InsertAfter(ItemType* pItem);
4419 
4420  ItemType* InsertBefore(ItemType* pItem, const T& value);
4421  ItemType* InsertAfter(ItemType* pItem, const T& value);
4422 
4423  void Remove(ItemType* pItem);
4424 
4425 private:
4426  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4427  VmaPoolAllocator<ItemType> m_ItemAllocator;
4428  ItemType* m_pFront;
4429  ItemType* m_pBack;
4430  size_t m_Count;
4431 };
4432 
4433 template<typename T>
4434 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4435  m_pAllocationCallbacks(pAllocationCallbacks),
4436  m_ItemAllocator(pAllocationCallbacks, 128),
4437  m_pFront(VMA_NULL),
4438  m_pBack(VMA_NULL),
4439  m_Count(0)
4440 {
4441 }
4442 
4443 template<typename T>
4444 VmaRawList<T>::~VmaRawList()
4445 {
4446  // Intentionally not calling Clear, because that would be unnecessary
4447  // computations to return all items to m_ItemAllocator as free.
4448 }
4449 
4450 template<typename T>
4451 void VmaRawList<T>::Clear()
4452 {
4453  if(IsEmpty() == false)
4454  {
4455  ItemType* pItem = m_pBack;
4456  while(pItem != VMA_NULL)
4457  {
4458  ItemType* const pPrevItem = pItem->pPrev;
4459  m_ItemAllocator.Free(pItem);
4460  pItem = pPrevItem;
4461  }
4462  m_pFront = VMA_NULL;
4463  m_pBack = VMA_NULL;
4464  m_Count = 0;
4465  }
4466 }
4467 
4468 template<typename T>
4469 VmaListItem<T>* VmaRawList<T>::PushBack()
4470 {
4471  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4472  pNewItem->pNext = VMA_NULL;
4473  if(IsEmpty())
4474  {
4475  pNewItem->pPrev = VMA_NULL;
4476  m_pFront = pNewItem;
4477  m_pBack = pNewItem;
4478  m_Count = 1;
4479  }
4480  else
4481  {
4482  pNewItem->pPrev = m_pBack;
4483  m_pBack->pNext = pNewItem;
4484  m_pBack = pNewItem;
4485  ++m_Count;
4486  }
4487  return pNewItem;
4488 }
4489 
4490 template<typename T>
4491 VmaListItem<T>* VmaRawList<T>::PushFront()
4492 {
4493  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4494  pNewItem->pPrev = VMA_NULL;
4495  if(IsEmpty())
4496  {
4497  pNewItem->pNext = VMA_NULL;
4498  m_pFront = pNewItem;
4499  m_pBack = pNewItem;
4500  m_Count = 1;
4501  }
4502  else
4503  {
4504  pNewItem->pNext = m_pFront;
4505  m_pFront->pPrev = pNewItem;
4506  m_pFront = pNewItem;
4507  ++m_Count;
4508  }
4509  return pNewItem;
4510 }
4511 
4512 template<typename T>
4513 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4514 {
4515  ItemType* const pNewItem = PushBack();
4516  pNewItem->Value = value;
4517  return pNewItem;
4518 }
4519 
4520 template<typename T>
4521 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4522 {
4523  ItemType* const pNewItem = PushFront();
4524  pNewItem->Value = value;
4525  return pNewItem;
4526 }
4527 
4528 template<typename T>
4529 void VmaRawList<T>::PopBack()
4530 {
4531  VMA_HEAVY_ASSERT(m_Count > 0);
4532  ItemType* const pBackItem = m_pBack;
4533  ItemType* const pPrevItem = pBackItem->pPrev;
4534  if(pPrevItem != VMA_NULL)
4535  {
4536  pPrevItem->pNext = VMA_NULL;
4537  }
4538  m_pBack = pPrevItem;
4539  m_ItemAllocator.Free(pBackItem);
4540  --m_Count;
4541 }
4542 
4543 template<typename T>
4544 void VmaRawList<T>::PopFront()
4545 {
4546  VMA_HEAVY_ASSERT(m_Count > 0);
4547  ItemType* const pFrontItem = m_pFront;
4548  ItemType* const pNextItem = pFrontItem->pNext;
4549  if(pNextItem != VMA_NULL)
4550  {
4551  pNextItem->pPrev = VMA_NULL;
4552  }
4553  m_pFront = pNextItem;
4554  m_ItemAllocator.Free(pFrontItem);
4555  --m_Count;
4556 }
4557 
4558 template<typename T>
4559 void VmaRawList<T>::Remove(ItemType* pItem)
4560 {
4561  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4562  VMA_HEAVY_ASSERT(m_Count > 0);
4563 
4564  if(pItem->pPrev != VMA_NULL)
4565  {
4566  pItem->pPrev->pNext = pItem->pNext;
4567  }
4568  else
4569  {
4570  VMA_HEAVY_ASSERT(m_pFront == pItem);
4571  m_pFront = pItem->pNext;
4572  }
4573 
4574  if(pItem->pNext != VMA_NULL)
4575  {
4576  pItem->pNext->pPrev = pItem->pPrev;
4577  }
4578  else
4579  {
4580  VMA_HEAVY_ASSERT(m_pBack == pItem);
4581  m_pBack = pItem->pPrev;
4582  }
4583 
4584  m_ItemAllocator.Free(pItem);
4585  --m_Count;
4586 }
4587 
4588 template<typename T>
4589 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4590 {
4591  if(pItem != VMA_NULL)
4592  {
4593  ItemType* const prevItem = pItem->pPrev;
4594  ItemType* const newItem = m_ItemAllocator.Alloc();
4595  newItem->pPrev = prevItem;
4596  newItem->pNext = pItem;
4597  pItem->pPrev = newItem;
4598  if(prevItem != VMA_NULL)
4599  {
4600  prevItem->pNext = newItem;
4601  }
4602  else
4603  {
4604  VMA_HEAVY_ASSERT(m_pFront == pItem);
4605  m_pFront = newItem;
4606  }
4607  ++m_Count;
4608  return newItem;
4609  }
4610  else
4611  return PushBack();
4612 }
4613 
4614 template<typename T>
4615 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4616 {
4617  if(pItem != VMA_NULL)
4618  {
4619  ItemType* const nextItem = pItem->pNext;
4620  ItemType* const newItem = m_ItemAllocator.Alloc();
4621  newItem->pNext = nextItem;
4622  newItem->pPrev = pItem;
4623  pItem->pNext = newItem;
4624  if(nextItem != VMA_NULL)
4625  {
4626  nextItem->pPrev = newItem;
4627  }
4628  else
4629  {
4630  VMA_HEAVY_ASSERT(m_pBack == pItem);
4631  m_pBack = newItem;
4632  }
4633  ++m_Count;
4634  return newItem;
4635  }
4636  else
4637  return PushFront();
4638 }
4639 
4640 template<typename T>
4641 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4642 {
4643  ItemType* const newItem = InsertBefore(pItem);
4644  newItem->Value = value;
4645  return newItem;
4646 }
4647 
4648 template<typename T>
4649 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4650 {
4651  ItemType* const newItem = InsertAfter(pItem);
4652  newItem->Value = value;
4653  return newItem;
4654 }
4655 
4656 template<typename T, typename AllocatorT>
4657 class VmaList
4658 {
4659  VMA_CLASS_NO_COPY(VmaList)
4660 public:
4661  class iterator
4662  {
4663  public:
4664  iterator() :
4665  m_pList(VMA_NULL),
4666  m_pItem(VMA_NULL)
4667  {
4668  }
4669 
4670  T& operator*() const
4671  {
4672  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4673  return m_pItem->Value;
4674  }
4675  T* operator->() const
4676  {
4677  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4678  return &m_pItem->Value;
4679  }
4680 
4681  iterator& operator++()
4682  {
4683  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4684  m_pItem = m_pItem->pNext;
4685  return *this;
4686  }
4687  iterator& operator--()
4688  {
4689  if(m_pItem != VMA_NULL)
4690  {
4691  m_pItem = m_pItem->pPrev;
4692  }
4693  else
4694  {
4695  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4696  m_pItem = m_pList->Back();
4697  }
4698  return *this;
4699  }
4700 
4701  iterator operator++(int)
4702  {
4703  iterator result = *this;
4704  ++*this;
4705  return result;
4706  }
4707  iterator operator--(int)
4708  {
4709  iterator result = *this;
4710  --*this;
4711  return result;
4712  }
4713 
4714  bool operator==(const iterator& rhs) const
4715  {
4716  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4717  return m_pItem == rhs.m_pItem;
4718  }
4719  bool operator!=(const iterator& rhs) const
4720  {
4721  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4722  return m_pItem != rhs.m_pItem;
4723  }
4724 
4725  private:
4726  VmaRawList<T>* m_pList;
4727  VmaListItem<T>* m_pItem;
4728 
4729  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4730  m_pList(pList),
4731  m_pItem(pItem)
4732  {
4733  }
4734 
4735  friend class VmaList<T, AllocatorT>;
4736  };
4737 
4738  class const_iterator
4739  {
4740  public:
4741  const_iterator() :
4742  m_pList(VMA_NULL),
4743  m_pItem(VMA_NULL)
4744  {
4745  }
4746 
4747  const_iterator(const iterator& src) :
4748  m_pList(src.m_pList),
4749  m_pItem(src.m_pItem)
4750  {
4751  }
4752 
4753  const T& operator*() const
4754  {
4755  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4756  return m_pItem->Value;
4757  }
4758  const T* operator->() const
4759  {
4760  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4761  return &m_pItem->Value;
4762  }
4763 
4764  const_iterator& operator++()
4765  {
4766  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4767  m_pItem = m_pItem->pNext;
4768  return *this;
4769  }
4770  const_iterator& operator--()
4771  {
4772  if(m_pItem != VMA_NULL)
4773  {
4774  m_pItem = m_pItem->pPrev;
4775  }
4776  else
4777  {
4778  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4779  m_pItem = m_pList->Back();
4780  }
4781  return *this;
4782  }
4783 
4784  const_iterator operator++(int)
4785  {
4786  const_iterator result = *this;
4787  ++*this;
4788  return result;
4789  }
4790  const_iterator operator--(int)
4791  {
4792  const_iterator result = *this;
4793  --*this;
4794  return result;
4795  }
4796 
4797  bool operator==(const const_iterator& rhs) const
4798  {
4799  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4800  return m_pItem == rhs.m_pItem;
4801  }
4802  bool operator!=(const const_iterator& rhs) const
4803  {
4804  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4805  return m_pItem != rhs.m_pItem;
4806  }
4807 
4808  private:
4809  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4810  m_pList(pList),
4811  m_pItem(pItem)
4812  {
4813  }
4814 
4815  const VmaRawList<T>* m_pList;
4816  const VmaListItem<T>* m_pItem;
4817 
4818  friend class VmaList<T, AllocatorT>;
4819  };
4820 
4821  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4822 
4823  bool empty() const { return m_RawList.IsEmpty(); }
4824  size_t size() const { return m_RawList.GetCount(); }
4825 
4826  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4827  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4828 
4829  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4830  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4831 
4832  void clear() { m_RawList.Clear(); }
4833  void push_back(const T& value) { m_RawList.PushBack(value); }
4834  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4835  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4836 
4837 private:
4838  VmaRawList<T> m_RawList;
4839 };
4840 
4841 #endif // #if VMA_USE_STL_LIST
4842 
4844 // class VmaMap
4845 
4846 // Unused in this version.
4847 #if 0
4848 
4849 #if VMA_USE_STL_UNORDERED_MAP
4850 
4851 #define VmaPair std::pair
4852 
4853 #define VMA_MAP_TYPE(KeyT, ValueT) \
4854  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4855 
4856 #else // #if VMA_USE_STL_UNORDERED_MAP
4857 
4858 template<typename T1, typename T2>
4859 struct VmaPair
4860 {
4861  T1 first;
4862  T2 second;
4863 
4864  VmaPair() : first(), second() { }
4865  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4866 };
4867 
4868 /* Class compatible with subset of interface of std::unordered_map.
4869 KeyT, ValueT must be POD because they will be stored in VmaVector.
4870 */
4871 template<typename KeyT, typename ValueT>
4872 class VmaMap
4873 {
4874 public:
4875  typedef VmaPair<KeyT, ValueT> PairType;
4876  typedef PairType* iterator;
4877 
4878  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4879 
4880  iterator begin() { return m_Vector.begin(); }
4881  iterator end() { return m_Vector.end(); }
4882 
4883  void insert(const PairType& pair);
4884  iterator find(const KeyT& key);
4885  void erase(iterator it);
4886 
4887 private:
4888  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4889 };
4890 
4891 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4892 
4893 template<typename FirstT, typename SecondT>
4894 struct VmaPairFirstLess
4895 {
4896  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4897  {
4898  return lhs.first < rhs.first;
4899  }
4900  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4901  {
4902  return lhs.first < rhsFirst;
4903  }
4904 };
4905 
4906 template<typename KeyT, typename ValueT>
4907 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4908 {
4909  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4910  m_Vector.data(),
4911  m_Vector.data() + m_Vector.size(),
4912  pair,
4913  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4914  VmaVectorInsert(m_Vector, indexToInsert, pair);
4915 }
4916 
4917 template<typename KeyT, typename ValueT>
4918 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4919 {
4920  PairType* it = VmaBinaryFindFirstNotLess(
4921  m_Vector.data(),
4922  m_Vector.data() + m_Vector.size(),
4923  key,
4924  VmaPairFirstLess<KeyT, ValueT>());
4925  if((it != m_Vector.end()) && (it->first == key))
4926  {
4927  return it;
4928  }
4929  else
4930  {
4931  return m_Vector.end();
4932  }
4933 }
4934 
4935 template<typename KeyT, typename ValueT>
4936 void VmaMap<KeyT, ValueT>::erase(iterator it)
4937 {
4938  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4939 }
4940 
4941 #endif // #if VMA_USE_STL_UNORDERED_MAP
4942 
4943 #endif // #if 0
4944 
4946 
4947 class VmaDeviceMemoryBlock;
4948 
4949 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4950 
4951 struct VmaAllocation_T
4952 {
4953 private:
4954  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4955 
4956  enum FLAGS
4957  {
4958  FLAG_USER_DATA_STRING = 0x01,
4959  };
4960 
4961 public:
4962  enum ALLOCATION_TYPE
4963  {
4964  ALLOCATION_TYPE_NONE,
4965  ALLOCATION_TYPE_BLOCK,
4966  ALLOCATION_TYPE_DEDICATED,
4967  };
4968 
4969  /*
4970  This struct cannot have constructor or destructor. It must be POD because it is
4971  allocated using VmaPoolAllocator.
4972  */
4973 
4974  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4975  {
4976  m_Alignment = 1;
4977  m_Size = 0;
4978  m_pUserData = VMA_NULL;
4979  m_LastUseFrameIndex = currentFrameIndex;
4980  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4981  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4982  m_MapCount = 0;
4983  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4984 
4985 #if VMA_STATS_STRING_ENABLED
4986  m_CreationFrameIndex = currentFrameIndex;
4987  m_BufferImageUsage = 0;
4988 #endif
4989  }
4990 
4991  void Dtor()
4992  {
4993  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4994 
4995  // Check if owned string was freed.
4996  VMA_ASSERT(m_pUserData == VMA_NULL);
4997  }
4998 
4999  void InitBlockAllocation(
5000  VmaDeviceMemoryBlock* block,
5001  VkDeviceSize offset,
5002  VkDeviceSize alignment,
5003  VkDeviceSize size,
5004  VmaSuballocationType suballocationType,
5005  bool mapped,
5006  bool canBecomeLost)
5007  {
5008  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5009  VMA_ASSERT(block != VMA_NULL);
5010  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5011  m_Alignment = alignment;
5012  m_Size = size;
5013  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5014  m_SuballocationType = (uint8_t)suballocationType;
5015  m_BlockAllocation.m_Block = block;
5016  m_BlockAllocation.m_Offset = offset;
5017  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5018  }
5019 
5020  void InitLost()
5021  {
5022  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5023  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5024  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5025  m_BlockAllocation.m_Block = VMA_NULL;
5026  m_BlockAllocation.m_Offset = 0;
5027  m_BlockAllocation.m_CanBecomeLost = true;
5028  }
5029 
5030  void ChangeBlockAllocation(
5031  VmaAllocator hAllocator,
5032  VmaDeviceMemoryBlock* block,
5033  VkDeviceSize offset);
5034 
5035  void ChangeSize(VkDeviceSize newSize);
5036  void ChangeOffset(VkDeviceSize newOffset);
5037 
5038  // pMappedData not null means allocation is created with MAPPED flag.
5039  void InitDedicatedAllocation(
5040  uint32_t memoryTypeIndex,
5041  VkDeviceMemory hMemory,
5042  VmaSuballocationType suballocationType,
5043  void* pMappedData,
5044  VkDeviceSize size)
5045  {
5046  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5047  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5048  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5049  m_Alignment = 0;
5050  m_Size = size;
5051  m_SuballocationType = (uint8_t)suballocationType;
5052  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5053  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5054  m_DedicatedAllocation.m_hMemory = hMemory;
5055  m_DedicatedAllocation.m_pMappedData = pMappedData;
5056  }
5057 
5058  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5059  VkDeviceSize GetAlignment() const { return m_Alignment; }
5060  VkDeviceSize GetSize() const { return m_Size; }
5061  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5062  void* GetUserData() const { return m_pUserData; }
5063  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5064  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5065 
5066  VmaDeviceMemoryBlock* GetBlock() const
5067  {
5068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5069  return m_BlockAllocation.m_Block;
5070  }
5071  VkDeviceSize GetOffset() const;
5072  VkDeviceMemory GetMemory() const;
5073  uint32_t GetMemoryTypeIndex() const;
5074  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5075  void* GetMappedData() const;
5076  bool CanBecomeLost() const;
5077 
5078  uint32_t GetLastUseFrameIndex() const
5079  {
5080  return m_LastUseFrameIndex.load();
5081  }
5082  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5083  {
5084  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5085  }
5086  /*
5087  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5088  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5089  - Else, returns false.
5090 
5091  If hAllocation is already lost, assert - you should not call it then.
5092  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5093  */
5094  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5095 
5096  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5097  {
5098  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5099  outInfo.blockCount = 1;
5100  outInfo.allocationCount = 1;
5101  outInfo.unusedRangeCount = 0;
5102  outInfo.usedBytes = m_Size;
5103  outInfo.unusedBytes = 0;
5104  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5105  outInfo.unusedRangeSizeMin = UINT64_MAX;
5106  outInfo.unusedRangeSizeMax = 0;
5107  }
5108 
5109  void BlockAllocMap();
5110  void BlockAllocUnmap();
5111  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5112  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5113 
5114 #if VMA_STATS_STRING_ENABLED
5115  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5116  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5117 
5118  void InitBufferImageUsage(uint32_t bufferImageUsage)
5119  {
5120  VMA_ASSERT(m_BufferImageUsage == 0);
5121  m_BufferImageUsage = bufferImageUsage;
5122  }
5123 
5124  void PrintParameters(class VmaJsonWriter& json) const;
5125 #endif
5126 
5127 private:
5128  VkDeviceSize m_Alignment;
5129  VkDeviceSize m_Size;
5130  void* m_pUserData;
5131  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5132  uint8_t m_Type; // ALLOCATION_TYPE
5133  uint8_t m_SuballocationType; // VmaSuballocationType
5134  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5135  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5136  uint8_t m_MapCount;
5137  uint8_t m_Flags; // enum FLAGS
5138 
5139  // Allocation out of VmaDeviceMemoryBlock.
5140  struct BlockAllocation
5141  {
5142  VmaDeviceMemoryBlock* m_Block;
5143  VkDeviceSize m_Offset;
5144  bool m_CanBecomeLost;
5145  };
5146 
5147  // Allocation for an object that has its own private VkDeviceMemory.
5148  struct DedicatedAllocation
5149  {
5150  uint32_t m_MemoryTypeIndex;
5151  VkDeviceMemory m_hMemory;
5152  void* m_pMappedData; // Not null means memory is mapped.
5153  };
5154 
5155  union
5156  {
5157  // Allocation out of VmaDeviceMemoryBlock.
5158  BlockAllocation m_BlockAllocation;
5159  // Allocation for an object that has its own private VkDeviceMemory.
5160  DedicatedAllocation m_DedicatedAllocation;
5161  };
5162 
5163 #if VMA_STATS_STRING_ENABLED
5164  uint32_t m_CreationFrameIndex;
5165  uint32_t m_BufferImageUsage; // 0 if unknown.
5166 #endif
5167 
5168  void FreeUserDataString(VmaAllocator hAllocator);
5169 };
5170 
5171 /*
5172 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5173 allocated memory block or free.
5174 */
5175 struct VmaSuballocation
5176 {
5177  VkDeviceSize offset;
5178  VkDeviceSize size;
5179  VmaAllocation hAllocation;
5180  VmaSuballocationType type;
5181 };
5182 
5183 // Comparator for offsets.
5184 struct VmaSuballocationOffsetLess
5185 {
5186  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5187  {
5188  return lhs.offset < rhs.offset;
5189  }
5190 };
5191 struct VmaSuballocationOffsetGreater
5192 {
5193  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5194  {
5195  return lhs.offset > rhs.offset;
5196  }
5197 };
5198 
5199 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5200 
5201 // Cost of one additional allocation lost, as equivalent in bytes.
5202 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5203 
5204 enum class VmaAllocationRequestType
5205 {
5206  Normal,
5207  // Used by "Linear" algorithm.
5208  UpperAddress,
5209  EndOf1st,
5210  EndOf2nd,
5211 };
5212 
5213 /*
5214 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5215 
5216 If canMakeOtherLost was false:
5217 - item points to a FREE suballocation.
5218 - itemsToMakeLostCount is 0.
5219 
5220 If canMakeOtherLost was true:
5221 - item points to first of sequence of suballocations, which are either FREE,
5222  or point to VmaAllocations that can become lost.
5223 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5224  the requested allocation to succeed.
5225 */
5226 struct VmaAllocationRequest
5227 {
5228  VkDeviceSize offset;
5229  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5230  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5231  VmaSuballocationList::iterator item;
5232  size_t itemsToMakeLostCount;
5233  void* customData;
5234  VmaAllocationRequestType type;
5235 
5236  VkDeviceSize CalcCost() const
5237  {
5238  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5239  }
5240 };
5241 
5242 /*
5243 Data structure used for bookkeeping of allocations and unused ranges of memory
5244 in a single VkDeviceMemory block.
5245 */
5246 class VmaBlockMetadata
5247 {
5248 public:
5249  VmaBlockMetadata(VmaAllocator hAllocator);
5250  virtual ~VmaBlockMetadata() { }
5251  virtual void Init(VkDeviceSize size) { m_Size = size; }
5252 
5253  // Validates all data structures inside this object. If not valid, returns false.
5254  virtual bool Validate() const = 0;
5255  VkDeviceSize GetSize() const { return m_Size; }
5256  virtual size_t GetAllocationCount() const = 0;
5257  virtual VkDeviceSize GetSumFreeSize() const = 0;
5258  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5259  // Returns true if this block is empty - contains only single free suballocation.
5260  virtual bool IsEmpty() const = 0;
5261 
5262  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5263  // Shouldn't modify blockCount.
5264  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5265 
5266 #if VMA_STATS_STRING_ENABLED
5267  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5268 #endif
5269 
5270  // Tries to find a place for suballocation with given parameters inside this block.
5271  // If succeeded, fills pAllocationRequest and returns true.
5272  // If failed, returns false.
5273  virtual bool CreateAllocationRequest(
5274  uint32_t currentFrameIndex,
5275  uint32_t frameInUseCount,
5276  VkDeviceSize bufferImageGranularity,
5277  VkDeviceSize allocSize,
5278  VkDeviceSize allocAlignment,
5279  bool upperAddress,
5280  VmaSuballocationType allocType,
5281  bool canMakeOtherLost,
5282  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5283  uint32_t strategy,
5284  VmaAllocationRequest* pAllocationRequest) = 0;
5285 
5286  virtual bool MakeRequestedAllocationsLost(
5287  uint32_t currentFrameIndex,
5288  uint32_t frameInUseCount,
5289  VmaAllocationRequest* pAllocationRequest) = 0;
5290 
5291  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5292 
5293  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5294 
5295  // Makes actual allocation based on request. Request must already be checked and valid.
5296  virtual void Alloc(
5297  const VmaAllocationRequest& request,
5298  VmaSuballocationType type,
5299  VkDeviceSize allocSize,
5300  VmaAllocation hAllocation) = 0;
5301 
5302  // Frees suballocation assigned to given memory region.
5303  virtual void Free(const VmaAllocation allocation) = 0;
5304  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5305 
5306  // Tries to resize (grow or shrink) space for given allocation, in place.
5307  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5308 
5309 protected:
5310  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5311 
5312 #if VMA_STATS_STRING_ENABLED
5313  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5314  VkDeviceSize unusedBytes,
5315  size_t allocationCount,
5316  size_t unusedRangeCount) const;
5317  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5318  VkDeviceSize offset,
5319  VmaAllocation hAllocation) const;
5320  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5321  VkDeviceSize offset,
5322  VkDeviceSize size) const;
5323  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5324 #endif
5325 
5326 private:
5327  VkDeviceSize m_Size;
5328  const VkAllocationCallbacks* m_pAllocationCallbacks;
5329 };
5330 
5331 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5332  VMA_ASSERT(0 && "Validation failed: " #cond); \
5333  return false; \
5334  } } while(false)
5335 
5336 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5337 {
5338  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5339 public:
5340  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5341  virtual ~VmaBlockMetadata_Generic();
5342  virtual void Init(VkDeviceSize size);
5343 
5344  virtual bool Validate() const;
5345  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5346  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5347  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5348  virtual bool IsEmpty() const;
5349 
5350  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5351  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5352 
5353 #if VMA_STATS_STRING_ENABLED
5354  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5355 #endif
5356 
5357  virtual bool CreateAllocationRequest(
5358  uint32_t currentFrameIndex,
5359  uint32_t frameInUseCount,
5360  VkDeviceSize bufferImageGranularity,
5361  VkDeviceSize allocSize,
5362  VkDeviceSize allocAlignment,
5363  bool upperAddress,
5364  VmaSuballocationType allocType,
5365  bool canMakeOtherLost,
5366  uint32_t strategy,
5367  VmaAllocationRequest* pAllocationRequest);
5368 
5369  virtual bool MakeRequestedAllocationsLost(
5370  uint32_t currentFrameIndex,
5371  uint32_t frameInUseCount,
5372  VmaAllocationRequest* pAllocationRequest);
5373 
5374  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5375 
5376  virtual VkResult CheckCorruption(const void* pBlockData);
5377 
5378  virtual void Alloc(
5379  const VmaAllocationRequest& request,
5380  VmaSuballocationType type,
5381  VkDeviceSize allocSize,
5382  VmaAllocation hAllocation);
5383 
5384  virtual void Free(const VmaAllocation allocation);
5385  virtual void FreeAtOffset(VkDeviceSize offset);
5386 
5387  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5388 
5390  // For defragmentation
5391 
5392  bool IsBufferImageGranularityConflictPossible(
5393  VkDeviceSize bufferImageGranularity,
5394  VmaSuballocationType& inOutPrevSuballocType) const;
5395 
5396 private:
5397  friend class VmaDefragmentationAlgorithm_Generic;
5398  friend class VmaDefragmentationAlgorithm_Fast;
5399 
5400  uint32_t m_FreeCount;
5401  VkDeviceSize m_SumFreeSize;
5402  VmaSuballocationList m_Suballocations;
5403  // Suballocations that are free and have size greater than certain threshold.
5404  // Sorted by size, ascending.
5405  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5406 
5407  bool ValidateFreeSuballocationList() const;
5408 
5409  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5410  // If yes, fills pOffset and returns true. If no, returns false.
5411  bool CheckAllocation(
5412  uint32_t currentFrameIndex,
5413  uint32_t frameInUseCount,
5414  VkDeviceSize bufferImageGranularity,
5415  VkDeviceSize allocSize,
5416  VkDeviceSize allocAlignment,
5417  VmaSuballocationType allocType,
5418  VmaSuballocationList::const_iterator suballocItem,
5419  bool canMakeOtherLost,
5420  VkDeviceSize* pOffset,
5421  size_t* itemsToMakeLostCount,
5422  VkDeviceSize* pSumFreeSize,
5423  VkDeviceSize* pSumItemSize) const;
5424  // Given free suballocation, it merges it with following one, which must also be free.
5425  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5426  // Releases given suballocation, making it free.
5427  // Merges it with adjacent free suballocations if applicable.
5428  // Returns iterator to new free suballocation at this place.
5429  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5430  // Given free suballocation, it inserts it into sorted list of
5431  // m_FreeSuballocationsBySize if it's suitable.
5432  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5433  // Given free suballocation, it removes it from sorted list of
5434  // m_FreeSuballocationsBySize if it's suitable.
5435  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5436 };
5437 
5438 /*
5439 Allocations and their references in internal data structure look like this:
5440 
5441 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5442 
5443  0 +-------+
5444  | |
5445  | |
5446  | |
5447  +-------+
5448  | Alloc | 1st[m_1stNullItemsBeginCount]
5449  +-------+
5450  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5451  +-------+
5452  | ... |
5453  +-------+
5454  | Alloc | 1st[1st.size() - 1]
5455  +-------+
5456  | |
5457  | |
5458  | |
5459 GetSize() +-------+
5460 
5461 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5462 
5463  0 +-------+
5464  | Alloc | 2nd[0]
5465  +-------+
5466  | Alloc | 2nd[1]
5467  +-------+
5468  | ... |
5469  +-------+
5470  | Alloc | 2nd[2nd.size() - 1]
5471  +-------+
5472  | |
5473  | |
5474  | |
5475  +-------+
5476  | Alloc | 1st[m_1stNullItemsBeginCount]
5477  +-------+
5478  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5479  +-------+
5480  | ... |
5481  +-------+
5482  | Alloc | 1st[1st.size() - 1]
5483  +-------+
5484  | |
5485 GetSize() +-------+
5486 
5487 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5488 
5489  0 +-------+
5490  | |
5491  | |
5492  | |
5493  +-------+
5494  | Alloc | 1st[m_1stNullItemsBeginCount]
5495  +-------+
5496  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5497  +-------+
5498  | ... |
5499  +-------+
5500  | Alloc | 1st[1st.size() - 1]
5501  +-------+
5502  | |
5503  | |
5504  | |
5505  +-------+
5506  | Alloc | 2nd[2nd.size() - 1]
5507  +-------+
5508  | ... |
5509  +-------+
5510  | Alloc | 2nd[1]
5511  +-------+
5512  | Alloc | 2nd[0]
5513 GetSize() +-------+
5514 
5515 */
5516 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5517 {
5518  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5519 public:
5520  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5521  virtual ~VmaBlockMetadata_Linear();
5522  virtual void Init(VkDeviceSize size);
5523 
5524  virtual bool Validate() const;
5525  virtual size_t GetAllocationCount() const;
5526  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5527  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5528  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5529 
5530  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5531  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5532 
5533 #if VMA_STATS_STRING_ENABLED
5534  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5535 #endif
5536 
5537  virtual bool CreateAllocationRequest(
5538  uint32_t currentFrameIndex,
5539  uint32_t frameInUseCount,
5540  VkDeviceSize bufferImageGranularity,
5541  VkDeviceSize allocSize,
5542  VkDeviceSize allocAlignment,
5543  bool upperAddress,
5544  VmaSuballocationType allocType,
5545  bool canMakeOtherLost,
5546  uint32_t strategy,
5547  VmaAllocationRequest* pAllocationRequest);
5548 
5549  virtual bool MakeRequestedAllocationsLost(
5550  uint32_t currentFrameIndex,
5551  uint32_t frameInUseCount,
5552  VmaAllocationRequest* pAllocationRequest);
5553 
5554  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5555 
5556  virtual VkResult CheckCorruption(const void* pBlockData);
5557 
5558  virtual void Alloc(
5559  const VmaAllocationRequest& request,
5560  VmaSuballocationType type,
5561  VkDeviceSize allocSize,
5562  VmaAllocation hAllocation);
5563 
5564  virtual void Free(const VmaAllocation allocation);
5565  virtual void FreeAtOffset(VkDeviceSize offset);
5566 
5567 private:
5568  /*
5569  There are two suballocation vectors, used in ping-pong way.
5570  The one with index m_1stVectorIndex is called 1st.
5571  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5572  2nd can be non-empty only when 1st is not empty.
5573  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5574  */
5575  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5576 
5577  enum SECOND_VECTOR_MODE
5578  {
5579  SECOND_VECTOR_EMPTY,
5580  /*
5581  Suballocations in 2nd vector are created later than the ones in 1st, but they
5582  all have smaller offset.
5583  */
5584  SECOND_VECTOR_RING_BUFFER,
5585  /*
5586  Suballocations in 2nd vector are upper side of double stack.
5587  They all have offsets higher than those in 1st vector.
5588  Top of this stack means smaller offsets, but higher indices in this vector.
5589  */
5590  SECOND_VECTOR_DOUBLE_STACK,
5591  };
5592 
5593  VkDeviceSize m_SumFreeSize;
5594  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5595  uint32_t m_1stVectorIndex;
5596  SECOND_VECTOR_MODE m_2ndVectorMode;
5597 
5598  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5599  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5600  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5601  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5602 
5603  // Number of items in 1st vector with hAllocation = null at the beginning.
5604  size_t m_1stNullItemsBeginCount;
5605  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5606  size_t m_1stNullItemsMiddleCount;
5607  // Number of items in 2nd vector with hAllocation = null.
5608  size_t m_2ndNullItemsCount;
5609 
5610  bool ShouldCompact1st() const;
5611  void CleanupAfterFree();
5612 
5613  bool CreateAllocationRequest_LowerAddress(
5614  uint32_t currentFrameIndex,
5615  uint32_t frameInUseCount,
5616  VkDeviceSize bufferImageGranularity,
5617  VkDeviceSize allocSize,
5618  VkDeviceSize allocAlignment,
5619  VmaSuballocationType allocType,
5620  bool canMakeOtherLost,
5621  uint32_t strategy,
5622  VmaAllocationRequest* pAllocationRequest);
5623  bool CreateAllocationRequest_UpperAddress(
5624  uint32_t currentFrameIndex,
5625  uint32_t frameInUseCount,
5626  VkDeviceSize bufferImageGranularity,
5627  VkDeviceSize allocSize,
5628  VkDeviceSize allocAlignment,
5629  VmaSuballocationType allocType,
5630  bool canMakeOtherLost,
5631  uint32_t strategy,
5632  VmaAllocationRequest* pAllocationRequest);
5633 };
5634 
5635 /*
5636 - GetSize() is the original size of allocated memory block.
5637 - m_UsableSize is this size aligned down to a power of two.
5638  All allocations and calculations happen relative to m_UsableSize.
5639 - GetUnusableSize() is the difference between them.
5640  It is repoted as separate, unused range, not available for allocations.
5641 
5642 Node at level 0 has size = m_UsableSize.
5643 Each next level contains nodes with size 2 times smaller than current level.
5644 m_LevelCount is the maximum number of levels to use in the current object.
5645 */
5646 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5647 {
5648  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5649 public:
5650  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5651  virtual ~VmaBlockMetadata_Buddy();
5652  virtual void Init(VkDeviceSize size);
5653 
5654  virtual bool Validate() const;
5655  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5656  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5657  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5658  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5659 
5660  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5661  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5662 
5663 #if VMA_STATS_STRING_ENABLED
5664  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5665 #endif
5666 
5667  virtual bool CreateAllocationRequest(
5668  uint32_t currentFrameIndex,
5669  uint32_t frameInUseCount,
5670  VkDeviceSize bufferImageGranularity,
5671  VkDeviceSize allocSize,
5672  VkDeviceSize allocAlignment,
5673  bool upperAddress,
5674  VmaSuballocationType allocType,
5675  bool canMakeOtherLost,
5676  uint32_t strategy,
5677  VmaAllocationRequest* pAllocationRequest);
5678 
5679  virtual bool MakeRequestedAllocationsLost(
5680  uint32_t currentFrameIndex,
5681  uint32_t frameInUseCount,
5682  VmaAllocationRequest* pAllocationRequest);
5683 
5684  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5685 
5686  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5687 
5688  virtual void Alloc(
5689  const VmaAllocationRequest& request,
5690  VmaSuballocationType type,
5691  VkDeviceSize allocSize,
5692  VmaAllocation hAllocation);
5693 
5694  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5695  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5696 
5697 private:
5698  static const VkDeviceSize MIN_NODE_SIZE = 32;
5699  static const size_t MAX_LEVELS = 30;
5700 
5701  struct ValidationContext
5702  {
5703  size_t calculatedAllocationCount;
5704  size_t calculatedFreeCount;
5705  VkDeviceSize calculatedSumFreeSize;
5706 
5707  ValidationContext() :
5708  calculatedAllocationCount(0),
5709  calculatedFreeCount(0),
5710  calculatedSumFreeSize(0) { }
5711  };
5712 
5713  struct Node
5714  {
5715  VkDeviceSize offset;
5716  enum TYPE
5717  {
5718  TYPE_FREE,
5719  TYPE_ALLOCATION,
5720  TYPE_SPLIT,
5721  TYPE_COUNT
5722  } type;
5723  Node* parent;
5724  Node* buddy;
5725 
5726  union
5727  {
5728  struct
5729  {
5730  Node* prev;
5731  Node* next;
5732  } free;
5733  struct
5734  {
5735  VmaAllocation alloc;
5736  } allocation;
5737  struct
5738  {
5739  Node* leftChild;
5740  } split;
5741  };
5742  };
5743 
5744  // Size of the memory block aligned down to a power of two.
5745  VkDeviceSize m_UsableSize;
5746  uint32_t m_LevelCount;
5747 
5748  Node* m_Root;
5749  struct {
5750  Node* front;
5751  Node* back;
5752  } m_FreeList[MAX_LEVELS];
5753  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5754  size_t m_AllocationCount;
5755  // Number of nodes in the tree with type == TYPE_FREE.
5756  size_t m_FreeCount;
5757  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5758  VkDeviceSize m_SumFreeSize;
5759 
5760  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5761  void DeleteNode(Node* node);
5762  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5763  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5764  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5765  // Alloc passed just for validation. Can be null.
5766  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5767  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5768  // Adds node to the front of FreeList at given level.
5769  // node->type must be FREE.
5770  // node->free.prev, next can be undefined.
5771  void AddToFreeListFront(uint32_t level, Node* node);
5772  // Removes node from FreeList at given level.
5773  // node->type must be FREE.
5774  // node->free.prev, next stay untouched.
5775  void RemoveFromFreeList(uint32_t level, Node* node);
5776 
5777 #if VMA_STATS_STRING_ENABLED
5778  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5779 #endif
5780 };
5781 
5782 /*
5783 Represents a single block of device memory (`VkDeviceMemory`) with all the
5784 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5785 
5786 Thread-safety: This class must be externally synchronized.
5787 */
5788 class VmaDeviceMemoryBlock
5789 {
5790  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5791 public:
5792  VmaBlockMetadata* m_pMetadata;
5793 
5794  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5795 
5796  ~VmaDeviceMemoryBlock()
5797  {
5798  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5799  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5800  }
5801 
5802  // Always call after construction.
5803  void Init(
5804  VmaAllocator hAllocator,
5805  VmaPool hParentPool,
5806  uint32_t newMemoryTypeIndex,
5807  VkDeviceMemory newMemory,
5808  VkDeviceSize newSize,
5809  uint32_t id,
5810  uint32_t algorithm);
5811  // Always call before destruction.
5812  void Destroy(VmaAllocator allocator);
5813 
5814  VmaPool GetParentPool() const { return m_hParentPool; }
5815  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5816  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5817  uint32_t GetId() const { return m_Id; }
5818  void* GetMappedData() const { return m_pMappedData; }
5819 
5820  // Validates all data structures inside this object. If not valid, returns false.
5821  bool Validate() const;
5822 
5823  VkResult CheckCorruption(VmaAllocator hAllocator);
5824 
5825  // ppData can be null.
5826  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5827  void Unmap(VmaAllocator hAllocator, uint32_t count);
5828 
5829  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5830  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5831 
5832  VkResult BindBufferMemory(
5833  const VmaAllocator hAllocator,
5834  const VmaAllocation hAllocation,
5835  VkBuffer hBuffer);
5836  VkResult BindImageMemory(
5837  const VmaAllocator hAllocator,
5838  const VmaAllocation hAllocation,
5839  VkImage hImage);
5840 
5841 private:
5842  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5843  uint32_t m_MemoryTypeIndex;
5844  uint32_t m_Id;
5845  VkDeviceMemory m_hMemory;
5846 
5847  /*
5848  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5849  Also protects m_MapCount, m_pMappedData.
5850  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5851  */
5852  VMA_MUTEX m_Mutex;
5853  uint32_t m_MapCount;
5854  void* m_pMappedData;
5855 };
5856 
5857 struct VmaPointerLess
5858 {
5859  bool operator()(const void* lhs, const void* rhs) const
5860  {
5861  return lhs < rhs;
5862  }
5863 };
5864 
5865 struct VmaDefragmentationMove
5866 {
5867  size_t srcBlockIndex;
5868  size_t dstBlockIndex;
5869  VkDeviceSize srcOffset;
5870  VkDeviceSize dstOffset;
5871  VkDeviceSize size;
5872 };
5873 
5874 class VmaDefragmentationAlgorithm;
5875 
5876 /*
5877 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5878 Vulkan memory type.
5879 
5880 Synchronized internally with a mutex.
5881 */
5882 struct VmaBlockVector
5883 {
5884  VMA_CLASS_NO_COPY(VmaBlockVector)
5885 public:
5886  VmaBlockVector(
5887  VmaAllocator hAllocator,
5888  VmaPool hParentPool,
5889  uint32_t memoryTypeIndex,
5890  VkDeviceSize preferredBlockSize,
5891  size_t minBlockCount,
5892  size_t maxBlockCount,
5893  VkDeviceSize bufferImageGranularity,
5894  uint32_t frameInUseCount,
5895  bool isCustomPool,
5896  bool explicitBlockSize,
5897  uint32_t algorithm);
5898  ~VmaBlockVector();
5899 
5900  VkResult CreateMinBlocks();
5901 
5902  VmaPool GetParentPool() const { return m_hParentPool; }
5903  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5904  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5905  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5906  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5907  uint32_t GetAlgorithm() const { return m_Algorithm; }
5908 
5909  void GetPoolStats(VmaPoolStats* pStats);
5910 
5911  bool IsEmpty() const { return m_Blocks.empty(); }
5912  bool IsCorruptionDetectionEnabled() const;
5913 
5914  VkResult Allocate(
5915  uint32_t currentFrameIndex,
5916  VkDeviceSize size,
5917  VkDeviceSize alignment,
5918  const VmaAllocationCreateInfo& createInfo,
5919  VmaSuballocationType suballocType,
5920  size_t allocationCount,
5921  VmaAllocation* pAllocations);
5922 
5923  void Free(
5924  VmaAllocation hAllocation);
5925 
5926  // Adds statistics of this BlockVector to pStats.
5927  void AddStats(VmaStats* pStats);
5928 
5929 #if VMA_STATS_STRING_ENABLED
5930  void PrintDetailedMap(class VmaJsonWriter& json);
5931 #endif
5932 
5933  void MakePoolAllocationsLost(
5934  uint32_t currentFrameIndex,
5935  size_t* pLostAllocationCount);
5936  VkResult CheckCorruption();
5937 
5938  // Saves results in pCtx->res.
5939  void Defragment(
5940  class VmaBlockVectorDefragmentationContext* pCtx,
5941  VmaDefragmentationStats* pStats,
5942  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5943  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5944  VkCommandBuffer commandBuffer);
5945  void DefragmentationEnd(
5946  class VmaBlockVectorDefragmentationContext* pCtx,
5947  VmaDefragmentationStats* pStats);
5948 
5950  // To be used only while the m_Mutex is locked. Used during defragmentation.
5951 
5952  size_t GetBlockCount() const { return m_Blocks.size(); }
5953  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5954  size_t CalcAllocationCount() const;
5955  bool IsBufferImageGranularityConflictPossible() const;
5956 
5957 private:
5958  friend class VmaDefragmentationAlgorithm_Generic;
5959 
5960  const VmaAllocator m_hAllocator;
5961  const VmaPool m_hParentPool;
5962  const uint32_t m_MemoryTypeIndex;
5963  const VkDeviceSize m_PreferredBlockSize;
5964  const size_t m_MinBlockCount;
5965  const size_t m_MaxBlockCount;
5966  const VkDeviceSize m_BufferImageGranularity;
5967  const uint32_t m_FrameInUseCount;
5968  const bool m_IsCustomPool;
5969  const bool m_ExplicitBlockSize;
5970  const uint32_t m_Algorithm;
5971  /* There can be at most one allocation that is completely empty - a
5972  hysteresis to avoid pessimistic case of alternating creation and destruction
5973  of a VkDeviceMemory. */
5974  bool m_HasEmptyBlock;
5975  VMA_RW_MUTEX m_Mutex;
5976  // Incrementally sorted by sumFreeSize, ascending.
5977  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5978  uint32_t m_NextBlockId;
5979 
5980  VkDeviceSize CalcMaxBlockSize() const;
5981 
5982  // Finds and removes given block from vector.
5983  void Remove(VmaDeviceMemoryBlock* pBlock);
5984 
5985  // Performs single step in sorting m_Blocks. They may not be fully sorted
5986  // after this call.
5987  void IncrementallySortBlocks();
5988 
5989  VkResult AllocatePage(
5990  uint32_t currentFrameIndex,
5991  VkDeviceSize size,
5992  VkDeviceSize alignment,
5993  const VmaAllocationCreateInfo& createInfo,
5994  VmaSuballocationType suballocType,
5995  VmaAllocation* pAllocation);
5996 
5997  // To be used only without CAN_MAKE_OTHER_LOST flag.
5998  VkResult AllocateFromBlock(
5999  VmaDeviceMemoryBlock* pBlock,
6000  uint32_t currentFrameIndex,
6001  VkDeviceSize size,
6002  VkDeviceSize alignment,
6003  VmaAllocationCreateFlags allocFlags,
6004  void* pUserData,
6005  VmaSuballocationType suballocType,
6006  uint32_t strategy,
6007  VmaAllocation* pAllocation);
6008 
6009  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6010 
6011  // Saves result to pCtx->res.
6012  void ApplyDefragmentationMovesCpu(
6013  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6014  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6015  // Saves result to pCtx->res.
6016  void ApplyDefragmentationMovesGpu(
6017  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6018  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6019  VkCommandBuffer commandBuffer);
6020 
6021  /*
6022  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6023  - updated with new data.
6024  */
6025  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6026 };
6027 
6028 struct VmaPool_T
6029 {
6030  VMA_CLASS_NO_COPY(VmaPool_T)
6031 public:
6032  VmaBlockVector m_BlockVector;
6033 
6034  VmaPool_T(
6035  VmaAllocator hAllocator,
6036  const VmaPoolCreateInfo& createInfo,
6037  VkDeviceSize preferredBlockSize);
6038  ~VmaPool_T();
6039 
6040  uint32_t GetId() const { return m_Id; }
6041  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6042 
6043 #if VMA_STATS_STRING_ENABLED
6044  //void PrintDetailedMap(class VmaStringBuilder& sb);
6045 #endif
6046 
6047 private:
6048  uint32_t m_Id;
6049 };
6050 
6051 /*
6052 Performs defragmentation:
6053 
6054 - Updates `pBlockVector->m_pMetadata`.
6055 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6056 - Does not move actual data, only returns requested moves as `moves`.
6057 */
6058 class VmaDefragmentationAlgorithm
6059 {
6060  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6061 public:
6062  VmaDefragmentationAlgorithm(
6063  VmaAllocator hAllocator,
6064  VmaBlockVector* pBlockVector,
6065  uint32_t currentFrameIndex) :
6066  m_hAllocator(hAllocator),
6067  m_pBlockVector(pBlockVector),
6068  m_CurrentFrameIndex(currentFrameIndex)
6069  {
6070  }
6071  virtual ~VmaDefragmentationAlgorithm()
6072  {
6073  }
6074 
6075  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6076  virtual void AddAll() = 0;
6077 
6078  virtual VkResult Defragment(
6079  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6080  VkDeviceSize maxBytesToMove,
6081  uint32_t maxAllocationsToMove) = 0;
6082 
6083  virtual VkDeviceSize GetBytesMoved() const = 0;
6084  virtual uint32_t GetAllocationsMoved() const = 0;
6085 
6086 protected:
6087  VmaAllocator const m_hAllocator;
6088  VmaBlockVector* const m_pBlockVector;
6089  const uint32_t m_CurrentFrameIndex;
6090 
6091  struct AllocationInfo
6092  {
6093  VmaAllocation m_hAllocation;
6094  VkBool32* m_pChanged;
6095 
6096  AllocationInfo() :
6097  m_hAllocation(VK_NULL_HANDLE),
6098  m_pChanged(VMA_NULL)
6099  {
6100  }
6101  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6102  m_hAllocation(hAlloc),
6103  m_pChanged(pChanged)
6104  {
6105  }
6106  };
6107 };
6108 
6109 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6110 {
6111  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6112 public:
6113  VmaDefragmentationAlgorithm_Generic(
6114  VmaAllocator hAllocator,
6115  VmaBlockVector* pBlockVector,
6116  uint32_t currentFrameIndex,
6117  bool overlappingMoveSupported);
6118  virtual ~VmaDefragmentationAlgorithm_Generic();
6119 
6120  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6121  virtual void AddAll() { m_AllAllocations = true; }
6122 
6123  virtual VkResult Defragment(
6124  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6125  VkDeviceSize maxBytesToMove,
6126  uint32_t maxAllocationsToMove);
6127 
6128  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6129  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6130 
6131 private:
6132  uint32_t m_AllocationCount;
6133  bool m_AllAllocations;
6134 
6135  VkDeviceSize m_BytesMoved;
6136  uint32_t m_AllocationsMoved;
6137 
6138  struct AllocationInfoSizeGreater
6139  {
6140  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6141  {
6142  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6143  }
6144  };
6145 
6146  struct AllocationInfoOffsetGreater
6147  {
6148  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6149  {
6150  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6151  }
6152  };
6153 
6154  struct BlockInfo
6155  {
6156  size_t m_OriginalBlockIndex;
6157  VmaDeviceMemoryBlock* m_pBlock;
6158  bool m_HasNonMovableAllocations;
6159  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6160 
6161  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6162  m_OriginalBlockIndex(SIZE_MAX),
6163  m_pBlock(VMA_NULL),
6164  m_HasNonMovableAllocations(true),
6165  m_Allocations(pAllocationCallbacks)
6166  {
6167  }
6168 
6169  void CalcHasNonMovableAllocations()
6170  {
6171  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6172  const size_t defragmentAllocCount = m_Allocations.size();
6173  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6174  }
6175 
6176  void SortAllocationsBySizeDescending()
6177  {
6178  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6179  }
6180 
6181  void SortAllocationsByOffsetDescending()
6182  {
6183  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6184  }
6185  };
6186 
6187  struct BlockPointerLess
6188  {
6189  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6190  {
6191  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6192  }
6193  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6194  {
6195  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6196  }
6197  };
6198 
6199  // 1. Blocks with some non-movable allocations go first.
6200  // 2. Blocks with smaller sumFreeSize go first.
6201  struct BlockInfoCompareMoveDestination
6202  {
6203  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6204  {
6205  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6206  {
6207  return true;
6208  }
6209  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6210  {
6211  return false;
6212  }
6213  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6214  {
6215  return true;
6216  }
6217  return false;
6218  }
6219  };
6220 
6221  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6222  BlockInfoVector m_Blocks;
6223 
6224  VkResult DefragmentRound(
6225  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6226  VkDeviceSize maxBytesToMove,
6227  uint32_t maxAllocationsToMove);
6228 
6229  size_t CalcBlocksWithNonMovableCount() const;
6230 
6231  static bool MoveMakesSense(
6232  size_t dstBlockIndex, VkDeviceSize dstOffset,
6233  size_t srcBlockIndex, VkDeviceSize srcOffset);
6234 };
6235 
6236 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6237 {
6238  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6239 public:
6240  VmaDefragmentationAlgorithm_Fast(
6241  VmaAllocator hAllocator,
6242  VmaBlockVector* pBlockVector,
6243  uint32_t currentFrameIndex,
6244  bool overlappingMoveSupported);
6245  virtual ~VmaDefragmentationAlgorithm_Fast();
6246 
6247  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6248  virtual void AddAll() { m_AllAllocations = true; }
6249 
6250  virtual VkResult Defragment(
6251  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6252  VkDeviceSize maxBytesToMove,
6253  uint32_t maxAllocationsToMove);
6254 
6255  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6256  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6257 
6258 private:
6259  struct BlockInfo
6260  {
6261  size_t origBlockIndex;
6262  };
6263 
6264  class FreeSpaceDatabase
6265  {
6266  public:
6267  FreeSpaceDatabase()
6268  {
6269  FreeSpace s = {};
6270  s.blockInfoIndex = SIZE_MAX;
6271  for(size_t i = 0; i < MAX_COUNT; ++i)
6272  {
6273  m_FreeSpaces[i] = s;
6274  }
6275  }
6276 
6277  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6278  {
6279  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6280  {
6281  return;
6282  }
6283 
6284  // Find first invalid or the smallest structure.
6285  size_t bestIndex = SIZE_MAX;
6286  for(size_t i = 0; i < MAX_COUNT; ++i)
6287  {
6288  // Empty structure.
6289  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6290  {
6291  bestIndex = i;
6292  break;
6293  }
6294  if(m_FreeSpaces[i].size < size &&
6295  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6296  {
6297  bestIndex = i;
6298  }
6299  }
6300 
6301  if(bestIndex != SIZE_MAX)
6302  {
6303  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6304  m_FreeSpaces[bestIndex].offset = offset;
6305  m_FreeSpaces[bestIndex].size = size;
6306  }
6307  }
6308 
6309  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6310  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6311  {
6312  size_t bestIndex = SIZE_MAX;
6313  VkDeviceSize bestFreeSpaceAfter = 0;
6314  for(size_t i = 0; i < MAX_COUNT; ++i)
6315  {
6316  // Structure is valid.
6317  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6318  {
6319  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6320  // Allocation fits into this structure.
6321  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6322  {
6323  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6324  (dstOffset + size);
6325  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6326  {
6327  bestIndex = i;
6328  bestFreeSpaceAfter = freeSpaceAfter;
6329  }
6330  }
6331  }
6332  }
6333 
6334  if(bestIndex != SIZE_MAX)
6335  {
6336  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6337  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6338 
6339  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6340  {
6341  // Leave this structure for remaining empty space.
6342  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6343  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6344  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6345  }
6346  else
6347  {
6348  // This structure becomes invalid.
6349  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6350  }
6351 
6352  return true;
6353  }
6354 
6355  return false;
6356  }
6357 
6358  private:
6359  static const size_t MAX_COUNT = 4;
6360 
6361  struct FreeSpace
6362  {
6363  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6364  VkDeviceSize offset;
6365  VkDeviceSize size;
6366  } m_FreeSpaces[MAX_COUNT];
6367  };
6368 
6369  const bool m_OverlappingMoveSupported;
6370 
6371  uint32_t m_AllocationCount;
6372  bool m_AllAllocations;
6373 
6374  VkDeviceSize m_BytesMoved;
6375  uint32_t m_AllocationsMoved;
6376 
6377  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6378 
6379  void PreprocessMetadata();
6380  void PostprocessMetadata();
6381  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6382 };
6383 
6384 struct VmaBlockDefragmentationContext
6385 {
6386  enum BLOCK_FLAG
6387  {
6388  BLOCK_FLAG_USED = 0x00000001,
6389  };
6390  uint32_t flags;
6391  VkBuffer hBuffer;
6392 
6393  VmaBlockDefragmentationContext() :
6394  flags(0),
6395  hBuffer(VK_NULL_HANDLE)
6396  {
6397  }
6398 };
6399 
6400 class VmaBlockVectorDefragmentationContext
6401 {
6402  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6403 public:
6404  VkResult res;
6405  bool mutexLocked;
6406  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6407 
6408  VmaBlockVectorDefragmentationContext(
6409  VmaAllocator hAllocator,
6410  VmaPool hCustomPool, // Optional.
6411  VmaBlockVector* pBlockVector,
6412  uint32_t currFrameIndex,
6413  uint32_t flags);
6414  ~VmaBlockVectorDefragmentationContext();
6415 
6416  VmaPool GetCustomPool() const { return m_hCustomPool; }
6417  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6418  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6419 
6420  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6421  void AddAll() { m_AllAllocations = true; }
6422 
6423  void Begin(bool overlappingMoveSupported);
6424 
6425 private:
6426  const VmaAllocator m_hAllocator;
6427  // Null if not from custom pool.
6428  const VmaPool m_hCustomPool;
6429  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6430  VmaBlockVector* const m_pBlockVector;
6431  const uint32_t m_CurrFrameIndex;
6432  const uint32_t m_AlgorithmFlags;
6433  // Owner of this object.
6434  VmaDefragmentationAlgorithm* m_pAlgorithm;
6435 
6436  struct AllocInfo
6437  {
6438  VmaAllocation hAlloc;
6439  VkBool32* pChanged;
6440  };
6441  // Used between constructor and Begin.
6442  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6443  bool m_AllAllocations;
6444 };
6445 
6446 struct VmaDefragmentationContext_T
6447 {
6448 private:
6449  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6450 public:
6451  VmaDefragmentationContext_T(
6452  VmaAllocator hAllocator,
6453  uint32_t currFrameIndex,
6454  uint32_t flags,
6455  VmaDefragmentationStats* pStats);
6456  ~VmaDefragmentationContext_T();
6457 
6458  void AddPools(uint32_t poolCount, VmaPool* pPools);
6459  void AddAllocations(
6460  uint32_t allocationCount,
6461  VmaAllocation* pAllocations,
6462  VkBool32* pAllocationsChanged);
6463 
6464  /*
6465  Returns:
6466  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6467  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6468  - Negative value if error occured and object can be destroyed immediately.
6469  */
6470  VkResult Defragment(
6471  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6472  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6473  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6474 
6475 private:
6476  const VmaAllocator m_hAllocator;
6477  const uint32_t m_CurrFrameIndex;
6478  const uint32_t m_Flags;
6479  VmaDefragmentationStats* const m_pStats;
6480  // Owner of these objects.
6481  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6482  // Owner of these objects.
6483  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6484 };
6485 
6486 #if VMA_RECORDING_ENABLED
6487 
6488 class VmaRecorder
6489 {
6490 public:
6491  VmaRecorder();
6492  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6493  void WriteConfiguration(
6494  const VkPhysicalDeviceProperties& devProps,
6495  const VkPhysicalDeviceMemoryProperties& memProps,
6496  bool dedicatedAllocationExtensionEnabled);
6497  ~VmaRecorder();
6498 
6499  void RecordCreateAllocator(uint32_t frameIndex);
6500  void RecordDestroyAllocator(uint32_t frameIndex);
6501  void RecordCreatePool(uint32_t frameIndex,
6502  const VmaPoolCreateInfo& createInfo,
6503  VmaPool pool);
6504  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6505  void RecordAllocateMemory(uint32_t frameIndex,
6506  const VkMemoryRequirements& vkMemReq,
6507  const VmaAllocationCreateInfo& createInfo,
6508  VmaAllocation allocation);
6509  void RecordAllocateMemoryPages(uint32_t frameIndex,
6510  const VkMemoryRequirements& vkMemReq,
6511  const VmaAllocationCreateInfo& createInfo,
6512  uint64_t allocationCount,
6513  const VmaAllocation* pAllocations);
6514  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6515  const VkMemoryRequirements& vkMemReq,
6516  bool requiresDedicatedAllocation,
6517  bool prefersDedicatedAllocation,
6518  const VmaAllocationCreateInfo& createInfo,
6519  VmaAllocation allocation);
6520  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6521  const VkMemoryRequirements& vkMemReq,
6522  bool requiresDedicatedAllocation,
6523  bool prefersDedicatedAllocation,
6524  const VmaAllocationCreateInfo& createInfo,
6525  VmaAllocation allocation);
6526  void RecordFreeMemory(uint32_t frameIndex,
6527  VmaAllocation allocation);
6528  void RecordFreeMemoryPages(uint32_t frameIndex,
6529  uint64_t allocationCount,
6530  const VmaAllocation* pAllocations);
6531  void RecordResizeAllocation(
6532  uint32_t frameIndex,
6533  VmaAllocation allocation,
6534  VkDeviceSize newSize);
6535  void RecordSetAllocationUserData(uint32_t frameIndex,
6536  VmaAllocation allocation,
6537  const void* pUserData);
6538  void RecordCreateLostAllocation(uint32_t frameIndex,
6539  VmaAllocation allocation);
6540  void RecordMapMemory(uint32_t frameIndex,
6541  VmaAllocation allocation);
6542  void RecordUnmapMemory(uint32_t frameIndex,
6543  VmaAllocation allocation);
6544  void RecordFlushAllocation(uint32_t frameIndex,
6545  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6546  void RecordInvalidateAllocation(uint32_t frameIndex,
6547  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6548  void RecordCreateBuffer(uint32_t frameIndex,
6549  const VkBufferCreateInfo& bufCreateInfo,
6550  const VmaAllocationCreateInfo& allocCreateInfo,
6551  VmaAllocation allocation);
6552  void RecordCreateImage(uint32_t frameIndex,
6553  const VkImageCreateInfo& imageCreateInfo,
6554  const VmaAllocationCreateInfo& allocCreateInfo,
6555  VmaAllocation allocation);
6556  void RecordDestroyBuffer(uint32_t frameIndex,
6557  VmaAllocation allocation);
6558  void RecordDestroyImage(uint32_t frameIndex,
6559  VmaAllocation allocation);
6560  void RecordTouchAllocation(uint32_t frameIndex,
6561  VmaAllocation allocation);
6562  void RecordGetAllocationInfo(uint32_t frameIndex,
6563  VmaAllocation allocation);
6564  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6565  VmaPool pool);
6566  void RecordDefragmentationBegin(uint32_t frameIndex,
6567  const VmaDefragmentationInfo2& info,
6569  void RecordDefragmentationEnd(uint32_t frameIndex,
6571 
6572 private:
6573  struct CallParams
6574  {
6575  uint32_t threadId;
6576  double time;
6577  };
6578 
6579  class UserDataString
6580  {
6581  public:
6582  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6583  const char* GetString() const { return m_Str; }
6584 
6585  private:
6586  char m_PtrStr[17];
6587  const char* m_Str;
6588  };
6589 
6590  bool m_UseMutex;
6591  VmaRecordFlags m_Flags;
6592  FILE* m_File;
6593  VMA_MUTEX m_FileMutex;
6594  int64_t m_Freq;
6595  int64_t m_StartCounter;
6596 
6597  void GetBasicParams(CallParams& outParams);
6598 
6599  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6600  template<typename T>
6601  void PrintPointerList(uint64_t count, const T* pItems)
6602  {
6603  if(count)
6604  {
6605  fprintf(m_File, "%p", pItems[0]);
6606  for(uint64_t i = 1; i < count; ++i)
6607  {
6608  fprintf(m_File, " %p", pItems[i]);
6609  }
6610  }
6611  }
6612 
6613  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6614  void Flush();
6615 };
6616 
6617 #endif // #if VMA_RECORDING_ENABLED
6618 
6619 /*
6620 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6621 */
6622 class VmaAllocationObjectAllocator
6623 {
6624  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6625 public:
6626  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6627 
6628  VmaAllocation Allocate();
6629  void Free(VmaAllocation hAlloc);
6630 
6631 private:
6632  VMA_MUTEX m_Mutex;
6633  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6634 };
6635 
6636 // Main allocator object.
6637 struct VmaAllocator_T
6638 {
6639  VMA_CLASS_NO_COPY(VmaAllocator_T)
6640 public:
6641  bool m_UseMutex;
6642  bool m_UseKhrDedicatedAllocation;
6643  VkDevice m_hDevice;
6644  bool m_AllocationCallbacksSpecified;
6645  VkAllocationCallbacks m_AllocationCallbacks;
6646  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6647  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6648 
6649  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6650  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6651  VMA_MUTEX m_HeapSizeLimitMutex;
6652 
6653  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6654  VkPhysicalDeviceMemoryProperties m_MemProps;
6655 
6656  // Default pools.
6657  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6658 
6659  // Each vector is sorted by memory (handle value).
6660  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6661  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6662  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6663 
6664  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6665  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6666  ~VmaAllocator_T();
6667 
6668  const VkAllocationCallbacks* GetAllocationCallbacks() const
6669  {
6670  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6671  }
6672  const VmaVulkanFunctions& GetVulkanFunctions() const
6673  {
6674  return m_VulkanFunctions;
6675  }
6676 
6677  VkDeviceSize GetBufferImageGranularity() const
6678  {
6679  return VMA_MAX(
6680  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6681  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6682  }
6683 
6684  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6685  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6686 
6687  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6688  {
6689  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6690  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6691  }
6692  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6693  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6694  {
6695  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6696  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6697  }
6698  // Minimum alignment for all allocations in specific memory type.
6699  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6700  {
6701  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6702  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6703  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6704  }
6705 
6706  bool IsIntegratedGpu() const
6707  {
6708  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6709  }
6710 
6711 #if VMA_RECORDING_ENABLED
6712  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6713 #endif
6714 
6715  void GetBufferMemoryRequirements(
6716  VkBuffer hBuffer,
6717  VkMemoryRequirements& memReq,
6718  bool& requiresDedicatedAllocation,
6719  bool& prefersDedicatedAllocation) const;
6720  void GetImageMemoryRequirements(
6721  VkImage hImage,
6722  VkMemoryRequirements& memReq,
6723  bool& requiresDedicatedAllocation,
6724  bool& prefersDedicatedAllocation) const;
6725 
6726  // Main allocation function.
6727  VkResult AllocateMemory(
6728  const VkMemoryRequirements& vkMemReq,
6729  bool requiresDedicatedAllocation,
6730  bool prefersDedicatedAllocation,
6731  VkBuffer dedicatedBuffer,
6732  VkImage dedicatedImage,
6733  const VmaAllocationCreateInfo& createInfo,
6734  VmaSuballocationType suballocType,
6735  size_t allocationCount,
6736  VmaAllocation* pAllocations);
6737 
6738  // Main deallocation function.
6739  void FreeMemory(
6740  size_t allocationCount,
6741  const VmaAllocation* pAllocations);
6742 
6743  VkResult ResizeAllocation(
6744  const VmaAllocation alloc,
6745  VkDeviceSize newSize);
6746 
6747  void CalculateStats(VmaStats* pStats);
6748 
6749 #if VMA_STATS_STRING_ENABLED
6750  void PrintDetailedMap(class VmaJsonWriter& json);
6751 #endif
6752 
6753  VkResult DefragmentationBegin(
6754  const VmaDefragmentationInfo2& info,
6755  VmaDefragmentationStats* pStats,
6756  VmaDefragmentationContext* pContext);
6757  VkResult DefragmentationEnd(
6758  VmaDefragmentationContext context);
6759 
6760  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6761  bool TouchAllocation(VmaAllocation hAllocation);
6762 
6763  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6764  void DestroyPool(VmaPool pool);
6765  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6766 
6767  void SetCurrentFrameIndex(uint32_t frameIndex);
6768  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6769 
6770  void MakePoolAllocationsLost(
6771  VmaPool hPool,
6772  size_t* pLostAllocationCount);
6773  VkResult CheckPoolCorruption(VmaPool hPool);
6774  VkResult CheckCorruption(uint32_t memoryTypeBits);
6775 
6776  void CreateLostAllocation(VmaAllocation* pAllocation);
6777 
6778  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6779  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6780 
6781  VkResult Map(VmaAllocation hAllocation, void** ppData);
6782  void Unmap(VmaAllocation hAllocation);
6783 
6784  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6785  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6786 
6787  void FlushOrInvalidateAllocation(
6788  VmaAllocation hAllocation,
6789  VkDeviceSize offset, VkDeviceSize size,
6790  VMA_CACHE_OPERATION op);
6791 
6792  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6793 
6794  /*
6795  Returns bit mask of memory types that can support defragmentation on GPU as
6796  they support creation of required buffer for copy operations.
6797  */
6798  uint32_t GetGpuDefragmentationMemoryTypeBits();
6799 
6800 private:
6801  VkDeviceSize m_PreferredLargeHeapBlockSize;
6802 
6803  VkPhysicalDevice m_PhysicalDevice;
6804  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6805  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
6806 
6807  VMA_RW_MUTEX m_PoolsMutex;
6808  // Protected by m_PoolsMutex. Sorted by pointer value.
6809  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6810  uint32_t m_NextPoolId;
6811 
6812  VmaVulkanFunctions m_VulkanFunctions;
6813 
6814 #if VMA_RECORDING_ENABLED
6815  VmaRecorder* m_pRecorder;
6816 #endif
6817 
6818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6819 
6820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6821 
6822  VkResult AllocateMemoryOfType(
6823  VkDeviceSize size,
6824  VkDeviceSize alignment,
6825  bool dedicatedAllocation,
6826  VkBuffer dedicatedBuffer,
6827  VkImage dedicatedImage,
6828  const VmaAllocationCreateInfo& createInfo,
6829  uint32_t memTypeIndex,
6830  VmaSuballocationType suballocType,
6831  size_t allocationCount,
6832  VmaAllocation* pAllocations);
6833 
6834  // Helper function only to be used inside AllocateDedicatedMemory.
6835  VkResult AllocateDedicatedMemoryPage(
6836  VkDeviceSize size,
6837  VmaSuballocationType suballocType,
6838  uint32_t memTypeIndex,
6839  const VkMemoryAllocateInfo& allocInfo,
6840  bool map,
6841  bool isUserDataString,
6842  void* pUserData,
6843  VmaAllocation* pAllocation);
6844 
6845  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6846  VkResult AllocateDedicatedMemory(
6847  VkDeviceSize size,
6848  VmaSuballocationType suballocType,
6849  uint32_t memTypeIndex,
6850  bool map,
6851  bool isUserDataString,
6852  void* pUserData,
6853  VkBuffer dedicatedBuffer,
6854  VkImage dedicatedImage,
6855  size_t allocationCount,
6856  VmaAllocation* pAllocations);
6857 
6858  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6859  void FreeDedicatedMemory(VmaAllocation allocation);
6860 
6861  /*
6862  Calculates and returns bit mask of memory types that can support defragmentation
6863  on GPU as they support creation of required buffer for copy operations.
6864  */
6865  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
6866 };
6867 
6869 // Memory allocation #2 after VmaAllocator_T definition
6870 
6871 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6872 {
6873  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6874 }
6875 
6876 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6877 {
6878  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6879 }
6880 
6881 template<typename T>
6882 static T* VmaAllocate(VmaAllocator hAllocator)
6883 {
6884  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6885 }
6886 
6887 template<typename T>
6888 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6889 {
6890  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6891 }
6892 
6893 template<typename T>
6894 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6895 {
6896  if(ptr != VMA_NULL)
6897  {
6898  ptr->~T();
6899  VmaFree(hAllocator, ptr);
6900  }
6901 }
6902 
6903 template<typename T>
6904 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6905 {
6906  if(ptr != VMA_NULL)
6907  {
6908  for(size_t i = count; i--; )
6909  ptr[i].~T();
6910  VmaFree(hAllocator, ptr);
6911  }
6912 }
6913 
6915 // VmaStringBuilder
6916 
6917 #if VMA_STATS_STRING_ENABLED
6918 
6919 class VmaStringBuilder
6920 {
6921 public:
6922  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6923  size_t GetLength() const { return m_Data.size(); }
6924  const char* GetData() const { return m_Data.data(); }
6925 
6926  void Add(char ch) { m_Data.push_back(ch); }
6927  void Add(const char* pStr);
6928  void AddNewLine() { Add('\n'); }
6929  void AddNumber(uint32_t num);
6930  void AddNumber(uint64_t num);
6931  void AddPointer(const void* ptr);
6932 
6933 private:
6934  VmaVector< char, VmaStlAllocator<char> > m_Data;
6935 };
6936 
6937 void VmaStringBuilder::Add(const char* pStr)
6938 {
6939  const size_t strLen = strlen(pStr);
6940  if(strLen > 0)
6941  {
6942  const size_t oldCount = m_Data.size();
6943  m_Data.resize(oldCount + strLen);
6944  memcpy(m_Data.data() + oldCount, pStr, strLen);
6945  }
6946 }
6947 
6948 void VmaStringBuilder::AddNumber(uint32_t num)
6949 {
6950  char buf[11];
6951  VmaUint32ToStr(buf, sizeof(buf), num);
6952  Add(buf);
6953 }
6954 
6955 void VmaStringBuilder::AddNumber(uint64_t num)
6956 {
6957  char buf[21];
6958  VmaUint64ToStr(buf, sizeof(buf), num);
6959  Add(buf);
6960 }
6961 
6962 void VmaStringBuilder::AddPointer(const void* ptr)
6963 {
6964  char buf[21];
6965  VmaPtrToStr(buf, sizeof(buf), ptr);
6966  Add(buf);
6967 }
6968 
6969 #endif // #if VMA_STATS_STRING_ENABLED
6970 
6972 // VmaJsonWriter
6973 
6974 #if VMA_STATS_STRING_ENABLED
6975 
6976 class VmaJsonWriter
6977 {
6978  VMA_CLASS_NO_COPY(VmaJsonWriter)
6979 public:
6980  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6981  ~VmaJsonWriter();
6982 
6983  void BeginObject(bool singleLine = false);
6984  void EndObject();
6985 
6986  void BeginArray(bool singleLine = false);
6987  void EndArray();
6988 
6989  void WriteString(const char* pStr);
6990  void BeginString(const char* pStr = VMA_NULL);
6991  void ContinueString(const char* pStr);
6992  void ContinueString(uint32_t n);
6993  void ContinueString(uint64_t n);
6994  void ContinueString_Pointer(const void* ptr);
6995  void EndString(const char* pStr = VMA_NULL);
6996 
6997  void WriteNumber(uint32_t n);
6998  void WriteNumber(uint64_t n);
6999  void WriteBool(bool b);
7000  void WriteNull();
7001 
7002 private:
7003  static const char* const INDENT;
7004 
7005  enum COLLECTION_TYPE
7006  {
7007  COLLECTION_TYPE_OBJECT,
7008  COLLECTION_TYPE_ARRAY,
7009  };
7010  struct StackItem
7011  {
7012  COLLECTION_TYPE type;
7013  uint32_t valueCount;
7014  bool singleLineMode;
7015  };
7016 
7017  VmaStringBuilder& m_SB;
7018  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7019  bool m_InsideString;
7020 
7021  void BeginValue(bool isString);
7022  void WriteIndent(bool oneLess = false);
7023 };
7024 
7025 const char* const VmaJsonWriter::INDENT = " ";
7026 
7027 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7028  m_SB(sb),
7029  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7030  m_InsideString(false)
7031 {
7032 }
7033 
7034 VmaJsonWriter::~VmaJsonWriter()
7035 {
7036  VMA_ASSERT(!m_InsideString);
7037  VMA_ASSERT(m_Stack.empty());
7038 }
7039 
7040 void VmaJsonWriter::BeginObject(bool singleLine)
7041 {
7042  VMA_ASSERT(!m_InsideString);
7043 
7044  BeginValue(false);
7045  m_SB.Add('{');
7046 
7047  StackItem item;
7048  item.type = COLLECTION_TYPE_OBJECT;
7049  item.valueCount = 0;
7050  item.singleLineMode = singleLine;
7051  m_Stack.push_back(item);
7052 }
7053 
7054 void VmaJsonWriter::EndObject()
7055 {
7056  VMA_ASSERT(!m_InsideString);
7057 
7058  WriteIndent(true);
7059  m_SB.Add('}');
7060 
7061  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7062  m_Stack.pop_back();
7063 }
7064 
7065 void VmaJsonWriter::BeginArray(bool singleLine)
7066 {
7067  VMA_ASSERT(!m_InsideString);
7068 
7069  BeginValue(false);
7070  m_SB.Add('[');
7071 
7072  StackItem item;
7073  item.type = COLLECTION_TYPE_ARRAY;
7074  item.valueCount = 0;
7075  item.singleLineMode = singleLine;
7076  m_Stack.push_back(item);
7077 }
7078 
7079 void VmaJsonWriter::EndArray()
7080 {
7081  VMA_ASSERT(!m_InsideString);
7082 
7083  WriteIndent(true);
7084  m_SB.Add(']');
7085 
7086  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7087  m_Stack.pop_back();
7088 }
7089 
7090 void VmaJsonWriter::WriteString(const char* pStr)
7091 {
7092  BeginString(pStr);
7093  EndString();
7094 }
7095 
7096 void VmaJsonWriter::BeginString(const char* pStr)
7097 {
7098  VMA_ASSERT(!m_InsideString);
7099 
7100  BeginValue(true);
7101  m_SB.Add('"');
7102  m_InsideString = true;
7103  if(pStr != VMA_NULL && pStr[0] != '\0')
7104  {
7105  ContinueString(pStr);
7106  }
7107 }
7108 
7109 void VmaJsonWriter::ContinueString(const char* pStr)
7110 {
7111  VMA_ASSERT(m_InsideString);
7112 
7113  const size_t strLen = strlen(pStr);
7114  for(size_t i = 0; i < strLen; ++i)
7115  {
7116  char ch = pStr[i];
7117  if(ch == '\\')
7118  {
7119  m_SB.Add("\\\\");
7120  }
7121  else if(ch == '"')
7122  {
7123  m_SB.Add("\\\"");
7124  }
7125  else if(ch >= 32)
7126  {
7127  m_SB.Add(ch);
7128  }
7129  else switch(ch)
7130  {
7131  case '\b':
7132  m_SB.Add("\\b");
7133  break;
7134  case '\f':
7135  m_SB.Add("\\f");
7136  break;
7137  case '\n':
7138  m_SB.Add("\\n");
7139  break;
7140  case '\r':
7141  m_SB.Add("\\r");
7142  break;
7143  case '\t':
7144  m_SB.Add("\\t");
7145  break;
7146  default:
7147  VMA_ASSERT(0 && "Character not currently supported.");
7148  break;
7149  }
7150  }
7151 }
7152 
7153 void VmaJsonWriter::ContinueString(uint32_t n)
7154 {
7155  VMA_ASSERT(m_InsideString);
7156  m_SB.AddNumber(n);
7157 }
7158 
7159 void VmaJsonWriter::ContinueString(uint64_t n)
7160 {
7161  VMA_ASSERT(m_InsideString);
7162  m_SB.AddNumber(n);
7163 }
7164 
7165 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7166 {
7167  VMA_ASSERT(m_InsideString);
7168  m_SB.AddPointer(ptr);
7169 }
7170 
7171 void VmaJsonWriter::EndString(const char* pStr)
7172 {
7173  VMA_ASSERT(m_InsideString);
7174  if(pStr != VMA_NULL && pStr[0] != '\0')
7175  {
7176  ContinueString(pStr);
7177  }
7178  m_SB.Add('"');
7179  m_InsideString = false;
7180 }
7181 
7182 void VmaJsonWriter::WriteNumber(uint32_t n)
7183 {
7184  VMA_ASSERT(!m_InsideString);
7185  BeginValue(false);
7186  m_SB.AddNumber(n);
7187 }
7188 
7189 void VmaJsonWriter::WriteNumber(uint64_t n)
7190 {
7191  VMA_ASSERT(!m_InsideString);
7192  BeginValue(false);
7193  m_SB.AddNumber(n);
7194 }
7195 
7196 void VmaJsonWriter::WriteBool(bool b)
7197 {
7198  VMA_ASSERT(!m_InsideString);
7199  BeginValue(false);
7200  m_SB.Add(b ? "true" : "false");
7201 }
7202 
7203 void VmaJsonWriter::WriteNull()
7204 {
7205  VMA_ASSERT(!m_InsideString);
7206  BeginValue(false);
7207  m_SB.Add("null");
7208 }
7209 
7210 void VmaJsonWriter::BeginValue(bool isString)
7211 {
7212  if(!m_Stack.empty())
7213  {
7214  StackItem& currItem = m_Stack.back();
7215  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7216  currItem.valueCount % 2 == 0)
7217  {
7218  VMA_ASSERT(isString);
7219  }
7220 
7221  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7222  currItem.valueCount % 2 != 0)
7223  {
7224  m_SB.Add(": ");
7225  }
7226  else if(currItem.valueCount > 0)
7227  {
7228  m_SB.Add(", ");
7229  WriteIndent();
7230  }
7231  else
7232  {
7233  WriteIndent();
7234  }
7235  ++currItem.valueCount;
7236  }
7237 }
7238 
7239 void VmaJsonWriter::WriteIndent(bool oneLess)
7240 {
7241  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7242  {
7243  m_SB.AddNewLine();
7244 
7245  size_t count = m_Stack.size();
7246  if(count > 0 && oneLess)
7247  {
7248  --count;
7249  }
7250  for(size_t i = 0; i < count; ++i)
7251  {
7252  m_SB.Add(INDENT);
7253  }
7254  }
7255 }
7256 
7257 #endif // #if VMA_STATS_STRING_ENABLED
7258 
7260 
7261 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7262 {
7263  if(IsUserDataString())
7264  {
7265  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7266 
7267  FreeUserDataString(hAllocator);
7268 
7269  if(pUserData != VMA_NULL)
7270  {
7271  const char* const newStrSrc = (char*)pUserData;
7272  const size_t newStrLen = strlen(newStrSrc);
7273  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7274  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7275  m_pUserData = newStrDst;
7276  }
7277  }
7278  else
7279  {
7280  m_pUserData = pUserData;
7281  }
7282 }
7283 
7284 void VmaAllocation_T::ChangeBlockAllocation(
7285  VmaAllocator hAllocator,
7286  VmaDeviceMemoryBlock* block,
7287  VkDeviceSize offset)
7288 {
7289  VMA_ASSERT(block != VMA_NULL);
7290  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7291 
7292  // Move mapping reference counter from old block to new block.
7293  if(block != m_BlockAllocation.m_Block)
7294  {
7295  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7296  if(IsPersistentMap())
7297  ++mapRefCount;
7298  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7299  block->Map(hAllocator, mapRefCount, VMA_NULL);
7300  }
7301 
7302  m_BlockAllocation.m_Block = block;
7303  m_BlockAllocation.m_Offset = offset;
7304 }
7305 
7306 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7307 {
7308  VMA_ASSERT(newSize > 0);
7309  m_Size = newSize;
7310 }
7311 
7312 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7313 {
7314  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7315  m_BlockAllocation.m_Offset = newOffset;
7316 }
7317 
7318 VkDeviceSize VmaAllocation_T::GetOffset() const
7319 {
7320  switch(m_Type)
7321  {
7322  case ALLOCATION_TYPE_BLOCK:
7323  return m_BlockAllocation.m_Offset;
7324  case ALLOCATION_TYPE_DEDICATED:
7325  return 0;
7326  default:
7327  VMA_ASSERT(0);
7328  return 0;
7329  }
7330 }
7331 
7332 VkDeviceMemory VmaAllocation_T::GetMemory() const
7333 {
7334  switch(m_Type)
7335  {
7336  case ALLOCATION_TYPE_BLOCK:
7337  return m_BlockAllocation.m_Block->GetDeviceMemory();
7338  case ALLOCATION_TYPE_DEDICATED:
7339  return m_DedicatedAllocation.m_hMemory;
7340  default:
7341  VMA_ASSERT(0);
7342  return VK_NULL_HANDLE;
7343  }
7344 }
7345 
7346 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7347 {
7348  switch(m_Type)
7349  {
7350  case ALLOCATION_TYPE_BLOCK:
7351  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7352  case ALLOCATION_TYPE_DEDICATED:
7353  return m_DedicatedAllocation.m_MemoryTypeIndex;
7354  default:
7355  VMA_ASSERT(0);
7356  return UINT32_MAX;
7357  }
7358 }
7359 
7360 void* VmaAllocation_T::GetMappedData() const
7361 {
7362  switch(m_Type)
7363  {
7364  case ALLOCATION_TYPE_BLOCK:
7365  if(m_MapCount != 0)
7366  {
7367  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7368  VMA_ASSERT(pBlockData != VMA_NULL);
7369  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7370  }
7371  else
7372  {
7373  return VMA_NULL;
7374  }
7375  break;
7376  case ALLOCATION_TYPE_DEDICATED:
7377  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7378  return m_DedicatedAllocation.m_pMappedData;
7379  default:
7380  VMA_ASSERT(0);
7381  return VMA_NULL;
7382  }
7383 }
7384 
7385 bool VmaAllocation_T::CanBecomeLost() const
7386 {
7387  switch(m_Type)
7388  {
7389  case ALLOCATION_TYPE_BLOCK:
7390  return m_BlockAllocation.m_CanBecomeLost;
7391  case ALLOCATION_TYPE_DEDICATED:
7392  return false;
7393  default:
7394  VMA_ASSERT(0);
7395  return false;
7396  }
7397 }
7398 
7399 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7400 {
7401  VMA_ASSERT(CanBecomeLost());
7402 
7403  /*
7404  Warning: This is a carefully designed algorithm.
7405  Do not modify unless you really know what you're doing :)
7406  */
7407  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7408  for(;;)
7409  {
7410  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7411  {
7412  VMA_ASSERT(0);
7413  return false;
7414  }
7415  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7416  {
7417  return false;
7418  }
7419  else // Last use time earlier than current time.
7420  {
7421  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7422  {
7423  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7424  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7425  return true;
7426  }
7427  }
7428  }
7429 }
7430 
7431 #if VMA_STATS_STRING_ENABLED
7432 
7433 // Correspond to values of enum VmaSuballocationType.
7434 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7435  "FREE",
7436  "UNKNOWN",
7437  "BUFFER",
7438  "IMAGE_UNKNOWN",
7439  "IMAGE_LINEAR",
7440  "IMAGE_OPTIMAL",
7441 };
7442 
7443 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7444 {
7445  json.WriteString("Type");
7446  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7447 
7448  json.WriteString("Size");
7449  json.WriteNumber(m_Size);
7450 
7451  if(m_pUserData != VMA_NULL)
7452  {
7453  json.WriteString("UserData");
7454  if(IsUserDataString())
7455  {
7456  json.WriteString((const char*)m_pUserData);
7457  }
7458  else
7459  {
7460  json.BeginString();
7461  json.ContinueString_Pointer(m_pUserData);
7462  json.EndString();
7463  }
7464  }
7465 
7466  json.WriteString("CreationFrameIndex");
7467  json.WriteNumber(m_CreationFrameIndex);
7468 
7469  json.WriteString("LastUseFrameIndex");
7470  json.WriteNumber(GetLastUseFrameIndex());
7471 
7472  if(m_BufferImageUsage != 0)
7473  {
7474  json.WriteString("Usage");
7475  json.WriteNumber(m_BufferImageUsage);
7476  }
7477 }
7478 
7479 #endif
7480 
7481 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7482 {
7483  VMA_ASSERT(IsUserDataString());
7484  if(m_pUserData != VMA_NULL)
7485  {
7486  char* const oldStr = (char*)m_pUserData;
7487  const size_t oldStrLen = strlen(oldStr);
7488  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7489  m_pUserData = VMA_NULL;
7490  }
7491 }
7492 
7493 void VmaAllocation_T::BlockAllocMap()
7494 {
7495  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7496 
7497  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7498  {
7499  ++m_MapCount;
7500  }
7501  else
7502  {
7503  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7504  }
7505 }
7506 
7507 void VmaAllocation_T::BlockAllocUnmap()
7508 {
7509  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7510 
7511  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7512  {
7513  --m_MapCount;
7514  }
7515  else
7516  {
7517  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7518  }
7519 }
7520 
7521 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7522 {
7523  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7524 
7525  if(m_MapCount != 0)
7526  {
7527  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7528  {
7529  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7530  *ppData = m_DedicatedAllocation.m_pMappedData;
7531  ++m_MapCount;
7532  return VK_SUCCESS;
7533  }
7534  else
7535  {
7536  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7537  return VK_ERROR_MEMORY_MAP_FAILED;
7538  }
7539  }
7540  else
7541  {
7542  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7543  hAllocator->m_hDevice,
7544  m_DedicatedAllocation.m_hMemory,
7545  0, // offset
7546  VK_WHOLE_SIZE,
7547  0, // flags
7548  ppData);
7549  if(result == VK_SUCCESS)
7550  {
7551  m_DedicatedAllocation.m_pMappedData = *ppData;
7552  m_MapCount = 1;
7553  }
7554  return result;
7555  }
7556 }
7557 
7558 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7559 {
7560  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7561 
7562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7563  {
7564  --m_MapCount;
7565  if(m_MapCount == 0)
7566  {
7567  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7568  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7569  hAllocator->m_hDevice,
7570  m_DedicatedAllocation.m_hMemory);
7571  }
7572  }
7573  else
7574  {
7575  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7576  }
7577 }
7578 
7579 #if VMA_STATS_STRING_ENABLED
7580 
7581 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7582 {
7583  json.BeginObject();
7584 
7585  json.WriteString("Blocks");
7586  json.WriteNumber(stat.blockCount);
7587 
7588  json.WriteString("Allocations");
7589  json.WriteNumber(stat.allocationCount);
7590 
7591  json.WriteString("UnusedRanges");
7592  json.WriteNumber(stat.unusedRangeCount);
7593 
7594  json.WriteString("UsedBytes");
7595  json.WriteNumber(stat.usedBytes);
7596 
7597  json.WriteString("UnusedBytes");
7598  json.WriteNumber(stat.unusedBytes);
7599 
7600  if(stat.allocationCount > 1)
7601  {
7602  json.WriteString("AllocationSize");
7603  json.BeginObject(true);
7604  json.WriteString("Min");
7605  json.WriteNumber(stat.allocationSizeMin);
7606  json.WriteString("Avg");
7607  json.WriteNumber(stat.allocationSizeAvg);
7608  json.WriteString("Max");
7609  json.WriteNumber(stat.allocationSizeMax);
7610  json.EndObject();
7611  }
7612 
7613  if(stat.unusedRangeCount > 1)
7614  {
7615  json.WriteString("UnusedRangeSize");
7616  json.BeginObject(true);
7617  json.WriteString("Min");
7618  json.WriteNumber(stat.unusedRangeSizeMin);
7619  json.WriteString("Avg");
7620  json.WriteNumber(stat.unusedRangeSizeAvg);
7621  json.WriteString("Max");
7622  json.WriteNumber(stat.unusedRangeSizeMax);
7623  json.EndObject();
7624  }
7625 
7626  json.EndObject();
7627 }
7628 
7629 #endif // #if VMA_STATS_STRING_ENABLED
7630 
7631 struct VmaSuballocationItemSizeLess
7632 {
7633  bool operator()(
7634  const VmaSuballocationList::iterator lhs,
7635  const VmaSuballocationList::iterator rhs) const
7636  {
7637  return lhs->size < rhs->size;
7638  }
7639  bool operator()(
7640  const VmaSuballocationList::iterator lhs,
7641  VkDeviceSize rhsSize) const
7642  {
7643  return lhs->size < rhsSize;
7644  }
7645 };
7646 
7647 
7649 // class VmaBlockMetadata
7650 
7651 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7652  m_Size(0),
7653  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7654 {
7655 }
7656 
7657 #if VMA_STATS_STRING_ENABLED
7658 
7659 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7660  VkDeviceSize unusedBytes,
7661  size_t allocationCount,
7662  size_t unusedRangeCount) const
7663 {
7664  json.BeginObject();
7665 
7666  json.WriteString("TotalBytes");
7667  json.WriteNumber(GetSize());
7668 
7669  json.WriteString("UnusedBytes");
7670  json.WriteNumber(unusedBytes);
7671 
7672  json.WriteString("Allocations");
7673  json.WriteNumber((uint64_t)allocationCount);
7674 
7675  json.WriteString("UnusedRanges");
7676  json.WriteNumber((uint64_t)unusedRangeCount);
7677 
7678  json.WriteString("Suballocations");
7679  json.BeginArray();
7680 }
7681 
7682 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7683  VkDeviceSize offset,
7684  VmaAllocation hAllocation) const
7685 {
7686  json.BeginObject(true);
7687 
7688  json.WriteString("Offset");
7689  json.WriteNumber(offset);
7690 
7691  hAllocation->PrintParameters(json);
7692 
7693  json.EndObject();
7694 }
7695 
7696 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7697  VkDeviceSize offset,
7698  VkDeviceSize size) const
7699 {
7700  json.BeginObject(true);
7701 
7702  json.WriteString("Offset");
7703  json.WriteNumber(offset);
7704 
7705  json.WriteString("Type");
7706  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7707 
7708  json.WriteString("Size");
7709  json.WriteNumber(size);
7710 
7711  json.EndObject();
7712 }
7713 
7714 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7715 {
7716  json.EndArray();
7717  json.EndObject();
7718 }
7719 
7720 #endif // #if VMA_STATS_STRING_ENABLED
7721 
7723 // class VmaBlockMetadata_Generic
7724 
7725 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7726  VmaBlockMetadata(hAllocator),
7727  m_FreeCount(0),
7728  m_SumFreeSize(0),
7729  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7730  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7731 {
7732 }
7733 
7734 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7735 {
7736 }
7737 
7738 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7739 {
7740  VmaBlockMetadata::Init(size);
7741 
7742  m_FreeCount = 1;
7743  m_SumFreeSize = size;
7744 
7745  VmaSuballocation suballoc = {};
7746  suballoc.offset = 0;
7747  suballoc.size = size;
7748  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7749  suballoc.hAllocation = VK_NULL_HANDLE;
7750 
7751  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7752  m_Suballocations.push_back(suballoc);
7753  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7754  --suballocItem;
7755  m_FreeSuballocationsBySize.push_back(suballocItem);
7756 }
7757 
7758 bool VmaBlockMetadata_Generic::Validate() const
7759 {
7760  VMA_VALIDATE(!m_Suballocations.empty());
7761 
7762  // Expected offset of new suballocation as calculated from previous ones.
7763  VkDeviceSize calculatedOffset = 0;
7764  // Expected number of free suballocations as calculated from traversing their list.
7765  uint32_t calculatedFreeCount = 0;
7766  // Expected sum size of free suballocations as calculated from traversing their list.
7767  VkDeviceSize calculatedSumFreeSize = 0;
7768  // Expected number of free suballocations that should be registered in
7769  // m_FreeSuballocationsBySize calculated from traversing their list.
7770  size_t freeSuballocationsToRegister = 0;
7771  // True if previous visited suballocation was free.
7772  bool prevFree = false;
7773 
7774  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7775  suballocItem != m_Suballocations.cend();
7776  ++suballocItem)
7777  {
7778  const VmaSuballocation& subAlloc = *suballocItem;
7779 
7780  // Actual offset of this suballocation doesn't match expected one.
7781  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7782 
7783  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7784  // Two adjacent free suballocations are invalid. They should be merged.
7785  VMA_VALIDATE(!prevFree || !currFree);
7786 
7787  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7788 
7789  if(currFree)
7790  {
7791  calculatedSumFreeSize += subAlloc.size;
7792  ++calculatedFreeCount;
7793  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7794  {
7795  ++freeSuballocationsToRegister;
7796  }
7797 
7798  // Margin required between allocations - every free space must be at least that large.
7799  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7800  }
7801  else
7802  {
7803  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7804  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7805 
7806  // Margin required between allocations - previous allocation must be free.
7807  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7808  }
7809 
7810  calculatedOffset += subAlloc.size;
7811  prevFree = currFree;
7812  }
7813 
7814  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7815  // match expected one.
7816  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7817 
7818  VkDeviceSize lastSize = 0;
7819  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7820  {
7821  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7822 
7823  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7824  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7825  // They must be sorted by size ascending.
7826  VMA_VALIDATE(suballocItem->size >= lastSize);
7827 
7828  lastSize = suballocItem->size;
7829  }
7830 
7831  // Check if totals match calculacted values.
7832  VMA_VALIDATE(ValidateFreeSuballocationList());
7833  VMA_VALIDATE(calculatedOffset == GetSize());
7834  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7835  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7836 
7837  return true;
7838 }
7839 
7840 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7841 {
7842  if(!m_FreeSuballocationsBySize.empty())
7843  {
7844  return m_FreeSuballocationsBySize.back()->size;
7845  }
7846  else
7847  {
7848  return 0;
7849  }
7850 }
7851 
7852 bool VmaBlockMetadata_Generic::IsEmpty() const
7853 {
7854  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7855 }
7856 
7857 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7858 {
7859  outInfo.blockCount = 1;
7860 
7861  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7862  outInfo.allocationCount = rangeCount - m_FreeCount;
7863  outInfo.unusedRangeCount = m_FreeCount;
7864 
7865  outInfo.unusedBytes = m_SumFreeSize;
7866  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7867 
7868  outInfo.allocationSizeMin = UINT64_MAX;
7869  outInfo.allocationSizeMax = 0;
7870  outInfo.unusedRangeSizeMin = UINT64_MAX;
7871  outInfo.unusedRangeSizeMax = 0;
7872 
7873  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7874  suballocItem != m_Suballocations.cend();
7875  ++suballocItem)
7876  {
7877  const VmaSuballocation& suballoc = *suballocItem;
7878  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7879  {
7880  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7881  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7882  }
7883  else
7884  {
7885  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7886  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7887  }
7888  }
7889 }
7890 
7891 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7892 {
7893  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7894 
7895  inoutStats.size += GetSize();
7896  inoutStats.unusedSize += m_SumFreeSize;
7897  inoutStats.allocationCount += rangeCount - m_FreeCount;
7898  inoutStats.unusedRangeCount += m_FreeCount;
7899  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7900 }
7901 
7902 #if VMA_STATS_STRING_ENABLED
7903 
7904 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7905 {
7906  PrintDetailedMap_Begin(json,
7907  m_SumFreeSize, // unusedBytes
7908  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7909  m_FreeCount); // unusedRangeCount
7910 
7911  size_t i = 0;
7912  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7913  suballocItem != m_Suballocations.cend();
7914  ++suballocItem, ++i)
7915  {
7916  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7917  {
7918  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7919  }
7920  else
7921  {
7922  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7923  }
7924  }
7925 
7926  PrintDetailedMap_End(json);
7927 }
7928 
7929 #endif // #if VMA_STATS_STRING_ENABLED
7930 
7931 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7932  uint32_t currentFrameIndex,
7933  uint32_t frameInUseCount,
7934  VkDeviceSize bufferImageGranularity,
7935  VkDeviceSize allocSize,
7936  VkDeviceSize allocAlignment,
7937  bool upperAddress,
7938  VmaSuballocationType allocType,
7939  bool canMakeOtherLost,
7940  uint32_t strategy,
7941  VmaAllocationRequest* pAllocationRequest)
7942 {
7943  VMA_ASSERT(allocSize > 0);
7944  VMA_ASSERT(!upperAddress);
7945  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7946  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7947  VMA_HEAVY_ASSERT(Validate());
7948 
7949  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7950 
7951  // There is not enough total free space in this block to fullfill the request: Early return.
7952  if(canMakeOtherLost == false &&
7953  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7954  {
7955  return false;
7956  }
7957 
7958  // New algorithm, efficiently searching freeSuballocationsBySize.
7959  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7960  if(freeSuballocCount > 0)
7961  {
7963  {
7964  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7965  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7966  m_FreeSuballocationsBySize.data(),
7967  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7968  allocSize + 2 * VMA_DEBUG_MARGIN,
7969  VmaSuballocationItemSizeLess());
7970  size_t index = it - m_FreeSuballocationsBySize.data();
7971  for(; index < freeSuballocCount; ++index)
7972  {
7973  if(CheckAllocation(
7974  currentFrameIndex,
7975  frameInUseCount,
7976  bufferImageGranularity,
7977  allocSize,
7978  allocAlignment,
7979  allocType,
7980  m_FreeSuballocationsBySize[index],
7981  false, // canMakeOtherLost
7982  &pAllocationRequest->offset,
7983  &pAllocationRequest->itemsToMakeLostCount,
7984  &pAllocationRequest->sumFreeSize,
7985  &pAllocationRequest->sumItemSize))
7986  {
7987  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7988  return true;
7989  }
7990  }
7991  }
7992  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7993  {
7994  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7995  it != m_Suballocations.end();
7996  ++it)
7997  {
7998  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7999  currentFrameIndex,
8000  frameInUseCount,
8001  bufferImageGranularity,
8002  allocSize,
8003  allocAlignment,
8004  allocType,
8005  it,
8006  false, // canMakeOtherLost
8007  &pAllocationRequest->offset,
8008  &pAllocationRequest->itemsToMakeLostCount,
8009  &pAllocationRequest->sumFreeSize,
8010  &pAllocationRequest->sumItemSize))
8011  {
8012  pAllocationRequest->item = it;
8013  return true;
8014  }
8015  }
8016  }
8017  else // WORST_FIT, FIRST_FIT
8018  {
8019  // Search staring from biggest suballocations.
8020  for(size_t index = freeSuballocCount; index--; )
8021  {
8022  if(CheckAllocation(
8023  currentFrameIndex,
8024  frameInUseCount,
8025  bufferImageGranularity,
8026  allocSize,
8027  allocAlignment,
8028  allocType,
8029  m_FreeSuballocationsBySize[index],
8030  false, // canMakeOtherLost
8031  &pAllocationRequest->offset,
8032  &pAllocationRequest->itemsToMakeLostCount,
8033  &pAllocationRequest->sumFreeSize,
8034  &pAllocationRequest->sumItemSize))
8035  {
8036  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8037  return true;
8038  }
8039  }
8040  }
8041  }
8042 
8043  if(canMakeOtherLost)
8044  {
8045  // Brute-force algorithm. TODO: Come up with something better.
8046 
8047  bool found = false;
8048  VmaAllocationRequest tmpAllocRequest = {};
8049  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8050  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8051  suballocIt != m_Suballocations.end();
8052  ++suballocIt)
8053  {
8054  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8055  suballocIt->hAllocation->CanBecomeLost())
8056  {
8057  if(CheckAllocation(
8058  currentFrameIndex,
8059  frameInUseCount,
8060  bufferImageGranularity,
8061  allocSize,
8062  allocAlignment,
8063  allocType,
8064  suballocIt,
8065  canMakeOtherLost,
8066  &tmpAllocRequest.offset,
8067  &tmpAllocRequest.itemsToMakeLostCount,
8068  &tmpAllocRequest.sumFreeSize,
8069  &tmpAllocRequest.sumItemSize))
8070  {
8072  {
8073  *pAllocationRequest = tmpAllocRequest;
8074  pAllocationRequest->item = suballocIt;
8075  break;
8076  }
8077  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8078  {
8079  *pAllocationRequest = tmpAllocRequest;
8080  pAllocationRequest->item = suballocIt;
8081  found = true;
8082  }
8083  }
8084  }
8085  }
8086 
8087  return found;
8088  }
8089 
8090  return false;
8091 }
8092 
8093 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8094  uint32_t currentFrameIndex,
8095  uint32_t frameInUseCount,
8096  VmaAllocationRequest* pAllocationRequest)
8097 {
8098  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8099 
8100  while(pAllocationRequest->itemsToMakeLostCount > 0)
8101  {
8102  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8103  {
8104  ++pAllocationRequest->item;
8105  }
8106  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8107  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8108  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8109  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8110  {
8111  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8112  --pAllocationRequest->itemsToMakeLostCount;
8113  }
8114  else
8115  {
8116  return false;
8117  }
8118  }
8119 
8120  VMA_HEAVY_ASSERT(Validate());
8121  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8122  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8123 
8124  return true;
8125 }
8126 
8127 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8128 {
8129  uint32_t lostAllocationCount = 0;
8130  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8131  it != m_Suballocations.end();
8132  ++it)
8133  {
8134  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8135  it->hAllocation->CanBecomeLost() &&
8136  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8137  {
8138  it = FreeSuballocation(it);
8139  ++lostAllocationCount;
8140  }
8141  }
8142  return lostAllocationCount;
8143 }
8144 
8145 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8146 {
8147  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8148  it != m_Suballocations.end();
8149  ++it)
8150  {
8151  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8152  {
8153  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8154  {
8155  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8156  return VK_ERROR_VALIDATION_FAILED_EXT;
8157  }
8158  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8159  {
8160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8161  return VK_ERROR_VALIDATION_FAILED_EXT;
8162  }
8163  }
8164  }
8165 
8166  return VK_SUCCESS;
8167 }
8168 
8169 void VmaBlockMetadata_Generic::Alloc(
8170  const VmaAllocationRequest& request,
8171  VmaSuballocationType type,
8172  VkDeviceSize allocSize,
8173  VmaAllocation hAllocation)
8174 {
8175  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8176  VMA_ASSERT(request.item != m_Suballocations.end());
8177  VmaSuballocation& suballoc = *request.item;
8178  // Given suballocation is a free block.
8179  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8180  // Given offset is inside this suballocation.
8181  VMA_ASSERT(request.offset >= suballoc.offset);
8182  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8183  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8184  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8185 
8186  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8187  // it to become used.
8188  UnregisterFreeSuballocation(request.item);
8189 
8190  suballoc.offset = request.offset;
8191  suballoc.size = allocSize;
8192  suballoc.type = type;
8193  suballoc.hAllocation = hAllocation;
8194 
8195  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8196  if(paddingEnd)
8197  {
8198  VmaSuballocation paddingSuballoc = {};
8199  paddingSuballoc.offset = request.offset + allocSize;
8200  paddingSuballoc.size = paddingEnd;
8201  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8202  VmaSuballocationList::iterator next = request.item;
8203  ++next;
8204  const VmaSuballocationList::iterator paddingEndItem =
8205  m_Suballocations.insert(next, paddingSuballoc);
8206  RegisterFreeSuballocation(paddingEndItem);
8207  }
8208 
8209  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8210  if(paddingBegin)
8211  {
8212  VmaSuballocation paddingSuballoc = {};
8213  paddingSuballoc.offset = request.offset - paddingBegin;
8214  paddingSuballoc.size = paddingBegin;
8215  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8216  const VmaSuballocationList::iterator paddingBeginItem =
8217  m_Suballocations.insert(request.item, paddingSuballoc);
8218  RegisterFreeSuballocation(paddingBeginItem);
8219  }
8220 
8221  // Update totals.
8222  m_FreeCount = m_FreeCount - 1;
8223  if(paddingBegin > 0)
8224  {
8225  ++m_FreeCount;
8226  }
8227  if(paddingEnd > 0)
8228  {
8229  ++m_FreeCount;
8230  }
8231  m_SumFreeSize -= allocSize;
8232 }
8233 
8234 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8235 {
8236  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8237  suballocItem != m_Suballocations.end();
8238  ++suballocItem)
8239  {
8240  VmaSuballocation& suballoc = *suballocItem;
8241  if(suballoc.hAllocation == allocation)
8242  {
8243  FreeSuballocation(suballocItem);
8244  VMA_HEAVY_ASSERT(Validate());
8245  return;
8246  }
8247  }
8248  VMA_ASSERT(0 && "Not found!");
8249 }
8250 
8251 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8252 {
8253  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8254  suballocItem != m_Suballocations.end();
8255  ++suballocItem)
8256  {
8257  VmaSuballocation& suballoc = *suballocItem;
8258  if(suballoc.offset == offset)
8259  {
8260  FreeSuballocation(suballocItem);
8261  return;
8262  }
8263  }
8264  VMA_ASSERT(0 && "Not found!");
8265 }
8266 
8267 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8268 {
8269  typedef VmaSuballocationList::iterator iter_type;
8270  for(iter_type suballocItem = m_Suballocations.begin();
8271  suballocItem != m_Suballocations.end();
8272  ++suballocItem)
8273  {
8274  VmaSuballocation& suballoc = *suballocItem;
8275  if(suballoc.hAllocation == alloc)
8276  {
8277  iter_type nextItem = suballocItem;
8278  ++nextItem;
8279 
8280  // Should have been ensured on higher level.
8281  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8282 
8283  // Shrinking.
8284  if(newSize < alloc->GetSize())
8285  {
8286  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8287 
8288  // There is next item.
8289  if(nextItem != m_Suballocations.end())
8290  {
8291  // Next item is free.
8292  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8293  {
8294  // Grow this next item backward.
8295  UnregisterFreeSuballocation(nextItem);
8296  nextItem->offset -= sizeDiff;
8297  nextItem->size += sizeDiff;
8298  RegisterFreeSuballocation(nextItem);
8299  }
8300  // Next item is not free.
8301  else
8302  {
8303  // Create free item after current one.
8304  VmaSuballocation newFreeSuballoc;
8305  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8306  newFreeSuballoc.offset = suballoc.offset + newSize;
8307  newFreeSuballoc.size = sizeDiff;
8308  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8309  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8310  RegisterFreeSuballocation(newFreeSuballocIt);
8311 
8312  ++m_FreeCount;
8313  }
8314  }
8315  // This is the last item.
8316  else
8317  {
8318  // Create free item at the end.
8319  VmaSuballocation newFreeSuballoc;
8320  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8321  newFreeSuballoc.offset = suballoc.offset + newSize;
8322  newFreeSuballoc.size = sizeDiff;
8323  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8324  m_Suballocations.push_back(newFreeSuballoc);
8325 
8326  iter_type newFreeSuballocIt = m_Suballocations.end();
8327  RegisterFreeSuballocation(--newFreeSuballocIt);
8328 
8329  ++m_FreeCount;
8330  }
8331 
8332  suballoc.size = newSize;
8333  m_SumFreeSize += sizeDiff;
8334  }
8335  // Growing.
8336  else
8337  {
8338  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8339 
8340  // There is next item.
8341  if(nextItem != m_Suballocations.end())
8342  {
8343  // Next item is free.
8344  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8345  {
8346  // There is not enough free space, including margin.
8347  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8348  {
8349  return false;
8350  }
8351 
8352  // There is more free space than required.
8353  if(nextItem->size > sizeDiff)
8354  {
8355  // Move and shrink this next item.
8356  UnregisterFreeSuballocation(nextItem);
8357  nextItem->offset += sizeDiff;
8358  nextItem->size -= sizeDiff;
8359  RegisterFreeSuballocation(nextItem);
8360  }
8361  // There is exactly the amount of free space required.
8362  else
8363  {
8364  // Remove this next free item.
8365  UnregisterFreeSuballocation(nextItem);
8366  m_Suballocations.erase(nextItem);
8367  --m_FreeCount;
8368  }
8369  }
8370  // Next item is not free - there is no space to grow.
8371  else
8372  {
8373  return false;
8374  }
8375  }
8376  // This is the last item - there is no space to grow.
8377  else
8378  {
8379  return false;
8380  }
8381 
8382  suballoc.size = newSize;
8383  m_SumFreeSize -= sizeDiff;
8384  }
8385 
8386  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8387  return true;
8388  }
8389  }
8390  VMA_ASSERT(0 && "Not found!");
8391  return false;
8392 }
8393 
8394 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8395 {
8396  VkDeviceSize lastSize = 0;
8397  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8398  {
8399  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8400 
8401  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8402  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8403  VMA_VALIDATE(it->size >= lastSize);
8404  lastSize = it->size;
8405  }
8406  return true;
8407 }
8408 
8409 bool VmaBlockMetadata_Generic::CheckAllocation(
8410  uint32_t currentFrameIndex,
8411  uint32_t frameInUseCount,
8412  VkDeviceSize bufferImageGranularity,
8413  VkDeviceSize allocSize,
8414  VkDeviceSize allocAlignment,
8415  VmaSuballocationType allocType,
8416  VmaSuballocationList::const_iterator suballocItem,
8417  bool canMakeOtherLost,
8418  VkDeviceSize* pOffset,
8419  size_t* itemsToMakeLostCount,
8420  VkDeviceSize* pSumFreeSize,
8421  VkDeviceSize* pSumItemSize) const
8422 {
8423  VMA_ASSERT(allocSize > 0);
8424  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8425  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8426  VMA_ASSERT(pOffset != VMA_NULL);
8427 
8428  *itemsToMakeLostCount = 0;
8429  *pSumFreeSize = 0;
8430  *pSumItemSize = 0;
8431 
8432  if(canMakeOtherLost)
8433  {
8434  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8435  {
8436  *pSumFreeSize = suballocItem->size;
8437  }
8438  else
8439  {
8440  if(suballocItem->hAllocation->CanBecomeLost() &&
8441  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8442  {
8443  ++*itemsToMakeLostCount;
8444  *pSumItemSize = suballocItem->size;
8445  }
8446  else
8447  {
8448  return false;
8449  }
8450  }
8451 
8452  // Remaining size is too small for this request: Early return.
8453  if(GetSize() - suballocItem->offset < allocSize)
8454  {
8455  return false;
8456  }
8457 
8458  // Start from offset equal to beginning of this suballocation.
8459  *pOffset = suballocItem->offset;
8460 
8461  // Apply VMA_DEBUG_MARGIN at the beginning.
8462  if(VMA_DEBUG_MARGIN > 0)
8463  {
8464  *pOffset += VMA_DEBUG_MARGIN;
8465  }
8466 
8467  // Apply alignment.
8468  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8469 
8470  // Check previous suballocations for BufferImageGranularity conflicts.
8471  // Make bigger alignment if necessary.
8472  if(bufferImageGranularity > 1)
8473  {
8474  bool bufferImageGranularityConflict = false;
8475  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8476  while(prevSuballocItem != m_Suballocations.cbegin())
8477  {
8478  --prevSuballocItem;
8479  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8480  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8481  {
8482  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8483  {
8484  bufferImageGranularityConflict = true;
8485  break;
8486  }
8487  }
8488  else
8489  // Already on previous page.
8490  break;
8491  }
8492  if(bufferImageGranularityConflict)
8493  {
8494  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8495  }
8496  }
8497 
8498  // Now that we have final *pOffset, check if we are past suballocItem.
8499  // If yes, return false - this function should be called for another suballocItem as starting point.
8500  if(*pOffset >= suballocItem->offset + suballocItem->size)
8501  {
8502  return false;
8503  }
8504 
8505  // Calculate padding at the beginning based on current offset.
8506  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8507 
8508  // Calculate required margin at the end.
8509  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8510 
8511  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8512  // Another early return check.
8513  if(suballocItem->offset + totalSize > GetSize())
8514  {
8515  return false;
8516  }
8517 
8518  // Advance lastSuballocItem until desired size is reached.
8519  // Update itemsToMakeLostCount.
8520  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8521  if(totalSize > suballocItem->size)
8522  {
8523  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8524  while(remainingSize > 0)
8525  {
8526  ++lastSuballocItem;
8527  if(lastSuballocItem == m_Suballocations.cend())
8528  {
8529  return false;
8530  }
8531  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8532  {
8533  *pSumFreeSize += lastSuballocItem->size;
8534  }
8535  else
8536  {
8537  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8538  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8539  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8540  {
8541  ++*itemsToMakeLostCount;
8542  *pSumItemSize += lastSuballocItem->size;
8543  }
8544  else
8545  {
8546  return false;
8547  }
8548  }
8549  remainingSize = (lastSuballocItem->size < remainingSize) ?
8550  remainingSize - lastSuballocItem->size : 0;
8551  }
8552  }
8553 
8554  // Check next suballocations for BufferImageGranularity conflicts.
8555  // If conflict exists, we must mark more allocations lost or fail.
8556  if(bufferImageGranularity > 1)
8557  {
8558  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8559  ++nextSuballocItem;
8560  while(nextSuballocItem != m_Suballocations.cend())
8561  {
8562  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8563  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8564  {
8565  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8566  {
8567  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8568  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8569  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8570  {
8571  ++*itemsToMakeLostCount;
8572  }
8573  else
8574  {
8575  return false;
8576  }
8577  }
8578  }
8579  else
8580  {
8581  // Already on next page.
8582  break;
8583  }
8584  ++nextSuballocItem;
8585  }
8586  }
8587  }
8588  else
8589  {
8590  const VmaSuballocation& suballoc = *suballocItem;
8591  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8592 
8593  *pSumFreeSize = suballoc.size;
8594 
8595  // Size of this suballocation is too small for this request: Early return.
8596  if(suballoc.size < allocSize)
8597  {
8598  return false;
8599  }
8600 
8601  // Start from offset equal to beginning of this suballocation.
8602  *pOffset = suballoc.offset;
8603 
8604  // Apply VMA_DEBUG_MARGIN at the beginning.
8605  if(VMA_DEBUG_MARGIN > 0)
8606  {
8607  *pOffset += VMA_DEBUG_MARGIN;
8608  }
8609 
8610  // Apply alignment.
8611  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8612 
8613  // Check previous suballocations for BufferImageGranularity conflicts.
8614  // Make bigger alignment if necessary.
8615  if(bufferImageGranularity > 1)
8616  {
8617  bool bufferImageGranularityConflict = false;
8618  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8619  while(prevSuballocItem != m_Suballocations.cbegin())
8620  {
8621  --prevSuballocItem;
8622  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8623  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8624  {
8625  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8626  {
8627  bufferImageGranularityConflict = true;
8628  break;
8629  }
8630  }
8631  else
8632  // Already on previous page.
8633  break;
8634  }
8635  if(bufferImageGranularityConflict)
8636  {
8637  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8638  }
8639  }
8640 
8641  // Calculate padding at the beginning based on current offset.
8642  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8643 
8644  // Calculate required margin at the end.
8645  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8646 
8647  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8648  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8649  {
8650  return false;
8651  }
8652 
8653  // Check next suballocations for BufferImageGranularity conflicts.
8654  // If conflict exists, allocation cannot be made here.
8655  if(bufferImageGranularity > 1)
8656  {
8657  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8658  ++nextSuballocItem;
8659  while(nextSuballocItem != m_Suballocations.cend())
8660  {
8661  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8662  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8663  {
8664  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8665  {
8666  return false;
8667  }
8668  }
8669  else
8670  {
8671  // Already on next page.
8672  break;
8673  }
8674  ++nextSuballocItem;
8675  }
8676  }
8677  }
8678 
8679  // All tests passed: Success. pOffset is already filled.
8680  return true;
8681 }
8682 
8683 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8684 {
8685  VMA_ASSERT(item != m_Suballocations.end());
8686  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8687 
8688  VmaSuballocationList::iterator nextItem = item;
8689  ++nextItem;
8690  VMA_ASSERT(nextItem != m_Suballocations.end());
8691  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8692 
8693  item->size += nextItem->size;
8694  --m_FreeCount;
8695  m_Suballocations.erase(nextItem);
8696 }
8697 
8698 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8699 {
8700  // Change this suballocation to be marked as free.
8701  VmaSuballocation& suballoc = *suballocItem;
8702  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8703  suballoc.hAllocation = VK_NULL_HANDLE;
8704 
8705  // Update totals.
8706  ++m_FreeCount;
8707  m_SumFreeSize += suballoc.size;
8708 
8709  // Merge with previous and/or next suballocation if it's also free.
8710  bool mergeWithNext = false;
8711  bool mergeWithPrev = false;
8712 
8713  VmaSuballocationList::iterator nextItem = suballocItem;
8714  ++nextItem;
8715  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8716  {
8717  mergeWithNext = true;
8718  }
8719 
8720  VmaSuballocationList::iterator prevItem = suballocItem;
8721  if(suballocItem != m_Suballocations.begin())
8722  {
8723  --prevItem;
8724  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8725  {
8726  mergeWithPrev = true;
8727  }
8728  }
8729 
8730  if(mergeWithNext)
8731  {
8732  UnregisterFreeSuballocation(nextItem);
8733  MergeFreeWithNext(suballocItem);
8734  }
8735 
8736  if(mergeWithPrev)
8737  {
8738  UnregisterFreeSuballocation(prevItem);
8739  MergeFreeWithNext(prevItem);
8740  RegisterFreeSuballocation(prevItem);
8741  return prevItem;
8742  }
8743  else
8744  {
8745  RegisterFreeSuballocation(suballocItem);
8746  return suballocItem;
8747  }
8748 }
8749 
8750 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8751 {
8752  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8753  VMA_ASSERT(item->size > 0);
8754 
8755  // You may want to enable this validation at the beginning or at the end of
8756  // this function, depending on what do you want to check.
8757  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8758 
8759  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8760  {
8761  if(m_FreeSuballocationsBySize.empty())
8762  {
8763  m_FreeSuballocationsBySize.push_back(item);
8764  }
8765  else
8766  {
8767  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8768  }
8769  }
8770 
8771  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8772 }
8773 
8774 
8775 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8776 {
8777  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8778  VMA_ASSERT(item->size > 0);
8779 
8780  // You may want to enable this validation at the beginning or at the end of
8781  // this function, depending on what do you want to check.
8782  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8783 
8784  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8785  {
8786  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8787  m_FreeSuballocationsBySize.data(),
8788  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8789  item,
8790  VmaSuballocationItemSizeLess());
8791  for(size_t index = it - m_FreeSuballocationsBySize.data();
8792  index < m_FreeSuballocationsBySize.size();
8793  ++index)
8794  {
8795  if(m_FreeSuballocationsBySize[index] == item)
8796  {
8797  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8798  return;
8799  }
8800  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8801  }
8802  VMA_ASSERT(0 && "Not found.");
8803  }
8804 
8805  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8806 }
8807 
8808 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8809  VkDeviceSize bufferImageGranularity,
8810  VmaSuballocationType& inOutPrevSuballocType) const
8811 {
8812  if(bufferImageGranularity == 1 || IsEmpty())
8813  {
8814  return false;
8815  }
8816 
8817  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8818  bool typeConflictFound = false;
8819  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8820  it != m_Suballocations.cend();
8821  ++it)
8822  {
8823  const VmaSuballocationType suballocType = it->type;
8824  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8825  {
8826  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8827  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8828  {
8829  typeConflictFound = true;
8830  }
8831  inOutPrevSuballocType = suballocType;
8832  }
8833  }
8834 
8835  return typeConflictFound || minAlignment >= bufferImageGranularity;
8836 }
8837 
8839 // class VmaBlockMetadata_Linear
8840 
8841 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8842  VmaBlockMetadata(hAllocator),
8843  m_SumFreeSize(0),
8844  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8845  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8846  m_1stVectorIndex(0),
8847  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8848  m_1stNullItemsBeginCount(0),
8849  m_1stNullItemsMiddleCount(0),
8850  m_2ndNullItemsCount(0)
8851 {
8852 }
8853 
8854 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8855 {
8856 }
8857 
8858 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8859 {
8860  VmaBlockMetadata::Init(size);
8861  m_SumFreeSize = size;
8862 }
8863 
8864 bool VmaBlockMetadata_Linear::Validate() const
8865 {
8866  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8867  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8868 
8869  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8870  VMA_VALIDATE(!suballocations1st.empty() ||
8871  suballocations2nd.empty() ||
8872  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8873 
8874  if(!suballocations1st.empty())
8875  {
8876  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8877  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8878  // Null item at the end should be just pop_back().
8879  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8880  }
8881  if(!suballocations2nd.empty())
8882  {
8883  // Null item at the end should be just pop_back().
8884  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8885  }
8886 
8887  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8888  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8889 
8890  VkDeviceSize sumUsedSize = 0;
8891  const size_t suballoc1stCount = suballocations1st.size();
8892  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8893 
8894  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8895  {
8896  const size_t suballoc2ndCount = suballocations2nd.size();
8897  size_t nullItem2ndCount = 0;
8898  for(size_t i = 0; i < suballoc2ndCount; ++i)
8899  {
8900  const VmaSuballocation& suballoc = suballocations2nd[i];
8901  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8902 
8903  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8904  VMA_VALIDATE(suballoc.offset >= offset);
8905 
8906  if(!currFree)
8907  {
8908  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8909  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8910  sumUsedSize += suballoc.size;
8911  }
8912  else
8913  {
8914  ++nullItem2ndCount;
8915  }
8916 
8917  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8918  }
8919 
8920  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8921  }
8922 
8923  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8924  {
8925  const VmaSuballocation& suballoc = suballocations1st[i];
8926  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8927  suballoc.hAllocation == VK_NULL_HANDLE);
8928  }
8929 
8930  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8931 
8932  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8933  {
8934  const VmaSuballocation& suballoc = suballocations1st[i];
8935  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8936 
8937  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8938  VMA_VALIDATE(suballoc.offset >= offset);
8939  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8940 
8941  if(!currFree)
8942  {
8943  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8944  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8945  sumUsedSize += suballoc.size;
8946  }
8947  else
8948  {
8949  ++nullItem1stCount;
8950  }
8951 
8952  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8953  }
8954  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8955 
8956  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8957  {
8958  const size_t suballoc2ndCount = suballocations2nd.size();
8959  size_t nullItem2ndCount = 0;
8960  for(size_t i = suballoc2ndCount; i--; )
8961  {
8962  const VmaSuballocation& suballoc = suballocations2nd[i];
8963  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8964 
8965  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8966  VMA_VALIDATE(suballoc.offset >= offset);
8967 
8968  if(!currFree)
8969  {
8970  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8971  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8972  sumUsedSize += suballoc.size;
8973  }
8974  else
8975  {
8976  ++nullItem2ndCount;
8977  }
8978 
8979  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8980  }
8981 
8982  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8983  }
8984 
8985  VMA_VALIDATE(offset <= GetSize());
8986  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8987 
8988  return true;
8989 }
8990 
8991 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8992 {
8993  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8994  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8995 }
8996 
8997 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8998 {
8999  const VkDeviceSize size = GetSize();
9000 
9001  /*
9002  We don't consider gaps inside allocation vectors with freed allocations because
9003  they are not suitable for reuse in linear allocator. We consider only space that
9004  is available for new allocations.
9005  */
9006  if(IsEmpty())
9007  {
9008  return size;
9009  }
9010 
9011  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9012 
9013  switch(m_2ndVectorMode)
9014  {
9015  case SECOND_VECTOR_EMPTY:
9016  /*
9017  Available space is after end of 1st, as well as before beginning of 1st (which
9018  whould make it a ring buffer).
9019  */
9020  {
9021  const size_t suballocations1stCount = suballocations1st.size();
9022  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9023  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9024  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9025  return VMA_MAX(
9026  firstSuballoc.offset,
9027  size - (lastSuballoc.offset + lastSuballoc.size));
9028  }
9029  break;
9030 
9031  case SECOND_VECTOR_RING_BUFFER:
9032  /*
9033  Available space is only between end of 2nd and beginning of 1st.
9034  */
9035  {
9036  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9037  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9038  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9039  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9040  }
9041  break;
9042 
9043  case SECOND_VECTOR_DOUBLE_STACK:
9044  /*
9045  Available space is only between end of 1st and top of 2nd.
9046  */
9047  {
9048  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9049  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9050  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9051  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9052  }
9053  break;
9054 
9055  default:
9056  VMA_ASSERT(0);
9057  return 0;
9058  }
9059 }
9060 
9061 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9062 {
9063  const VkDeviceSize size = GetSize();
9064  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9065  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9066  const size_t suballoc1stCount = suballocations1st.size();
9067  const size_t suballoc2ndCount = suballocations2nd.size();
9068 
9069  outInfo.blockCount = 1;
9070  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9071  outInfo.unusedRangeCount = 0;
9072  outInfo.usedBytes = 0;
9073  outInfo.allocationSizeMin = UINT64_MAX;
9074  outInfo.allocationSizeMax = 0;
9075  outInfo.unusedRangeSizeMin = UINT64_MAX;
9076  outInfo.unusedRangeSizeMax = 0;
9077 
9078  VkDeviceSize lastOffset = 0;
9079 
9080  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9081  {
9082  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9083  size_t nextAlloc2ndIndex = 0;
9084  while(lastOffset < freeSpace2ndTo1stEnd)
9085  {
9086  // Find next non-null allocation or move nextAllocIndex to the end.
9087  while(nextAlloc2ndIndex < suballoc2ndCount &&
9088  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9089  {
9090  ++nextAlloc2ndIndex;
9091  }
9092 
9093  // Found non-null allocation.
9094  if(nextAlloc2ndIndex < suballoc2ndCount)
9095  {
9096  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9097 
9098  // 1. Process free space before this allocation.
9099  if(lastOffset < suballoc.offset)
9100  {
9101  // There is free space from lastOffset to suballoc.offset.
9102  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9103  ++outInfo.unusedRangeCount;
9104  outInfo.unusedBytes += unusedRangeSize;
9105  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9106  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9107  }
9108 
9109  // 2. Process this allocation.
9110  // There is allocation with suballoc.offset, suballoc.size.
9111  outInfo.usedBytes += suballoc.size;
9112  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9113  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9114 
9115  // 3. Prepare for next iteration.
9116  lastOffset = suballoc.offset + suballoc.size;
9117  ++nextAlloc2ndIndex;
9118  }
9119  // We are at the end.
9120  else
9121  {
9122  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9123  if(lastOffset < freeSpace2ndTo1stEnd)
9124  {
9125  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9126  ++outInfo.unusedRangeCount;
9127  outInfo.unusedBytes += unusedRangeSize;
9128  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9129  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9130  }
9131 
9132  // End of loop.
9133  lastOffset = freeSpace2ndTo1stEnd;
9134  }
9135  }
9136  }
9137 
9138  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9139  const VkDeviceSize freeSpace1stTo2ndEnd =
9140  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9141  while(lastOffset < freeSpace1stTo2ndEnd)
9142  {
9143  // Find next non-null allocation or move nextAllocIndex to the end.
9144  while(nextAlloc1stIndex < suballoc1stCount &&
9145  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9146  {
9147  ++nextAlloc1stIndex;
9148  }
9149 
9150  // Found non-null allocation.
9151  if(nextAlloc1stIndex < suballoc1stCount)
9152  {
9153  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9154 
9155  // 1. Process free space before this allocation.
9156  if(lastOffset < suballoc.offset)
9157  {
9158  // There is free space from lastOffset to suballoc.offset.
9159  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9160  ++outInfo.unusedRangeCount;
9161  outInfo.unusedBytes += unusedRangeSize;
9162  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9163  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9164  }
9165 
9166  // 2. Process this allocation.
9167  // There is allocation with suballoc.offset, suballoc.size.
9168  outInfo.usedBytes += suballoc.size;
9169  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9170  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9171 
9172  // 3. Prepare for next iteration.
9173  lastOffset = suballoc.offset + suballoc.size;
9174  ++nextAlloc1stIndex;
9175  }
9176  // We are at the end.
9177  else
9178  {
9179  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9180  if(lastOffset < freeSpace1stTo2ndEnd)
9181  {
9182  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9183  ++outInfo.unusedRangeCount;
9184  outInfo.unusedBytes += unusedRangeSize;
9185  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9186  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9187  }
9188 
9189  // End of loop.
9190  lastOffset = freeSpace1stTo2ndEnd;
9191  }
9192  }
9193 
9194  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9195  {
9196  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9197  while(lastOffset < size)
9198  {
9199  // Find next non-null allocation or move nextAllocIndex to the end.
9200  while(nextAlloc2ndIndex != SIZE_MAX &&
9201  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9202  {
9203  --nextAlloc2ndIndex;
9204  }
9205 
9206  // Found non-null allocation.
9207  if(nextAlloc2ndIndex != SIZE_MAX)
9208  {
9209  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9210 
9211  // 1. Process free space before this allocation.
9212  if(lastOffset < suballoc.offset)
9213  {
9214  // There is free space from lastOffset to suballoc.offset.
9215  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9216  ++outInfo.unusedRangeCount;
9217  outInfo.unusedBytes += unusedRangeSize;
9218  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9219  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9220  }
9221 
9222  // 2. Process this allocation.
9223  // There is allocation with suballoc.offset, suballoc.size.
9224  outInfo.usedBytes += suballoc.size;
9225  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9226  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9227 
9228  // 3. Prepare for next iteration.
9229  lastOffset = suballoc.offset + suballoc.size;
9230  --nextAlloc2ndIndex;
9231  }
9232  // We are at the end.
9233  else
9234  {
9235  // There is free space from lastOffset to size.
9236  if(lastOffset < size)
9237  {
9238  const VkDeviceSize unusedRangeSize = size - lastOffset;
9239  ++outInfo.unusedRangeCount;
9240  outInfo.unusedBytes += unusedRangeSize;
9241  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9242  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9243  }
9244 
9245  // End of loop.
9246  lastOffset = size;
9247  }
9248  }
9249  }
9250 
9251  outInfo.unusedBytes = size - outInfo.usedBytes;
9252 }
9253 
9254 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9255 {
9256  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9257  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9258  const VkDeviceSize size = GetSize();
9259  const size_t suballoc1stCount = suballocations1st.size();
9260  const size_t suballoc2ndCount = suballocations2nd.size();
9261 
9262  inoutStats.size += size;
9263 
9264  VkDeviceSize lastOffset = 0;
9265 
9266  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9267  {
9268  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9269  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9270  while(lastOffset < freeSpace2ndTo1stEnd)
9271  {
9272  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9273  while(nextAlloc2ndIndex < suballoc2ndCount &&
9274  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9275  {
9276  ++nextAlloc2ndIndex;
9277  }
9278 
9279  // Found non-null allocation.
9280  if(nextAlloc2ndIndex < suballoc2ndCount)
9281  {
9282  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9283 
9284  // 1. Process free space before this allocation.
9285  if(lastOffset < suballoc.offset)
9286  {
9287  // There is free space from lastOffset to suballoc.offset.
9288  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9289  inoutStats.unusedSize += unusedRangeSize;
9290  ++inoutStats.unusedRangeCount;
9291  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9292  }
9293 
9294  // 2. Process this allocation.
9295  // There is allocation with suballoc.offset, suballoc.size.
9296  ++inoutStats.allocationCount;
9297 
9298  // 3. Prepare for next iteration.
9299  lastOffset = suballoc.offset + suballoc.size;
9300  ++nextAlloc2ndIndex;
9301  }
9302  // We are at the end.
9303  else
9304  {
9305  if(lastOffset < freeSpace2ndTo1stEnd)
9306  {
9307  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9308  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9309  inoutStats.unusedSize += unusedRangeSize;
9310  ++inoutStats.unusedRangeCount;
9311  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9312  }
9313 
9314  // End of loop.
9315  lastOffset = freeSpace2ndTo1stEnd;
9316  }
9317  }
9318  }
9319 
9320  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9321  const VkDeviceSize freeSpace1stTo2ndEnd =
9322  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9323  while(lastOffset < freeSpace1stTo2ndEnd)
9324  {
9325  // Find next non-null allocation or move nextAllocIndex to the end.
9326  while(nextAlloc1stIndex < suballoc1stCount &&
9327  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9328  {
9329  ++nextAlloc1stIndex;
9330  }
9331 
9332  // Found non-null allocation.
9333  if(nextAlloc1stIndex < suballoc1stCount)
9334  {
9335  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9336 
9337  // 1. Process free space before this allocation.
9338  if(lastOffset < suballoc.offset)
9339  {
9340  // There is free space from lastOffset to suballoc.offset.
9341  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9342  inoutStats.unusedSize += unusedRangeSize;
9343  ++inoutStats.unusedRangeCount;
9344  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9345  }
9346 
9347  // 2. Process this allocation.
9348  // There is allocation with suballoc.offset, suballoc.size.
9349  ++inoutStats.allocationCount;
9350 
9351  // 3. Prepare for next iteration.
9352  lastOffset = suballoc.offset + suballoc.size;
9353  ++nextAlloc1stIndex;
9354  }
9355  // We are at the end.
9356  else
9357  {
9358  if(lastOffset < freeSpace1stTo2ndEnd)
9359  {
9360  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9361  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9362  inoutStats.unusedSize += unusedRangeSize;
9363  ++inoutStats.unusedRangeCount;
9364  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9365  }
9366 
9367  // End of loop.
9368  lastOffset = freeSpace1stTo2ndEnd;
9369  }
9370  }
9371 
9372  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9373  {
9374  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9375  while(lastOffset < size)
9376  {
9377  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9378  while(nextAlloc2ndIndex != SIZE_MAX &&
9379  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9380  {
9381  --nextAlloc2ndIndex;
9382  }
9383 
9384  // Found non-null allocation.
9385  if(nextAlloc2ndIndex != SIZE_MAX)
9386  {
9387  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9388 
9389  // 1. Process free space before this allocation.
9390  if(lastOffset < suballoc.offset)
9391  {
9392  // There is free space from lastOffset to suballoc.offset.
9393  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9394  inoutStats.unusedSize += unusedRangeSize;
9395  ++inoutStats.unusedRangeCount;
9396  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9397  }
9398 
9399  // 2. Process this allocation.
9400  // There is allocation with suballoc.offset, suballoc.size.
9401  ++inoutStats.allocationCount;
9402 
9403  // 3. Prepare for next iteration.
9404  lastOffset = suballoc.offset + suballoc.size;
9405  --nextAlloc2ndIndex;
9406  }
9407  // We are at the end.
9408  else
9409  {
9410  if(lastOffset < size)
9411  {
9412  // There is free space from lastOffset to size.
9413  const VkDeviceSize unusedRangeSize = size - lastOffset;
9414  inoutStats.unusedSize += unusedRangeSize;
9415  ++inoutStats.unusedRangeCount;
9416  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9417  }
9418 
9419  // End of loop.
9420  lastOffset = size;
9421  }
9422  }
9423  }
9424 }
9425 
9426 #if VMA_STATS_STRING_ENABLED
9427 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9428 {
9429  const VkDeviceSize size = GetSize();
9430  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9431  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9432  const size_t suballoc1stCount = suballocations1st.size();
9433  const size_t suballoc2ndCount = suballocations2nd.size();
9434 
9435  // FIRST PASS
9436 
9437  size_t unusedRangeCount = 0;
9438  VkDeviceSize usedBytes = 0;
9439 
9440  VkDeviceSize lastOffset = 0;
9441 
9442  size_t alloc2ndCount = 0;
9443  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9444  {
9445  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9446  size_t nextAlloc2ndIndex = 0;
9447  while(lastOffset < freeSpace2ndTo1stEnd)
9448  {
9449  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9450  while(nextAlloc2ndIndex < suballoc2ndCount &&
9451  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9452  {
9453  ++nextAlloc2ndIndex;
9454  }
9455 
9456  // Found non-null allocation.
9457  if(nextAlloc2ndIndex < suballoc2ndCount)
9458  {
9459  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9460 
9461  // 1. Process free space before this allocation.
9462  if(lastOffset < suballoc.offset)
9463  {
9464  // There is free space from lastOffset to suballoc.offset.
9465  ++unusedRangeCount;
9466  }
9467 
9468  // 2. Process this allocation.
9469  // There is allocation with suballoc.offset, suballoc.size.
9470  ++alloc2ndCount;
9471  usedBytes += suballoc.size;
9472 
9473  // 3. Prepare for next iteration.
9474  lastOffset = suballoc.offset + suballoc.size;
9475  ++nextAlloc2ndIndex;
9476  }
9477  // We are at the end.
9478  else
9479  {
9480  if(lastOffset < freeSpace2ndTo1stEnd)
9481  {
9482  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9483  ++unusedRangeCount;
9484  }
9485 
9486  // End of loop.
9487  lastOffset = freeSpace2ndTo1stEnd;
9488  }
9489  }
9490  }
9491 
9492  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9493  size_t alloc1stCount = 0;
9494  const VkDeviceSize freeSpace1stTo2ndEnd =
9495  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9496  while(lastOffset < freeSpace1stTo2ndEnd)
9497  {
9498  // Find next non-null allocation or move nextAllocIndex to the end.
9499  while(nextAlloc1stIndex < suballoc1stCount &&
9500  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9501  {
9502  ++nextAlloc1stIndex;
9503  }
9504 
9505  // Found non-null allocation.
9506  if(nextAlloc1stIndex < suballoc1stCount)
9507  {
9508  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9509 
9510  // 1. Process free space before this allocation.
9511  if(lastOffset < suballoc.offset)
9512  {
9513  // There is free space from lastOffset to suballoc.offset.
9514  ++unusedRangeCount;
9515  }
9516 
9517  // 2. Process this allocation.
9518  // There is allocation with suballoc.offset, suballoc.size.
9519  ++alloc1stCount;
9520  usedBytes += suballoc.size;
9521 
9522  // 3. Prepare for next iteration.
9523  lastOffset = suballoc.offset + suballoc.size;
9524  ++nextAlloc1stIndex;
9525  }
9526  // We are at the end.
9527  else
9528  {
9529  if(lastOffset < size)
9530  {
9531  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9532  ++unusedRangeCount;
9533  }
9534 
9535  // End of loop.
9536  lastOffset = freeSpace1stTo2ndEnd;
9537  }
9538  }
9539 
9540  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9541  {
9542  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9543  while(lastOffset < size)
9544  {
9545  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9546  while(nextAlloc2ndIndex != SIZE_MAX &&
9547  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9548  {
9549  --nextAlloc2ndIndex;
9550  }
9551 
9552  // Found non-null allocation.
9553  if(nextAlloc2ndIndex != SIZE_MAX)
9554  {
9555  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9556 
9557  // 1. Process free space before this allocation.
9558  if(lastOffset < suballoc.offset)
9559  {
9560  // There is free space from lastOffset to suballoc.offset.
9561  ++unusedRangeCount;
9562  }
9563 
9564  // 2. Process this allocation.
9565  // There is allocation with suballoc.offset, suballoc.size.
9566  ++alloc2ndCount;
9567  usedBytes += suballoc.size;
9568 
9569  // 3. Prepare for next iteration.
9570  lastOffset = suballoc.offset + suballoc.size;
9571  --nextAlloc2ndIndex;
9572  }
9573  // We are at the end.
9574  else
9575  {
9576  if(lastOffset < size)
9577  {
9578  // There is free space from lastOffset to size.
9579  ++unusedRangeCount;
9580  }
9581 
9582  // End of loop.
9583  lastOffset = size;
9584  }
9585  }
9586  }
9587 
9588  const VkDeviceSize unusedBytes = size - usedBytes;
9589  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9590 
9591  // SECOND PASS
9592  lastOffset = 0;
9593 
9594  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9595  {
9596  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9597  size_t nextAlloc2ndIndex = 0;
9598  while(lastOffset < freeSpace2ndTo1stEnd)
9599  {
9600  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9601  while(nextAlloc2ndIndex < suballoc2ndCount &&
9602  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9603  {
9604  ++nextAlloc2ndIndex;
9605  }
9606 
9607  // Found non-null allocation.
9608  if(nextAlloc2ndIndex < suballoc2ndCount)
9609  {
9610  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9611 
9612  // 1. Process free space before this allocation.
9613  if(lastOffset < suballoc.offset)
9614  {
9615  // There is free space from lastOffset to suballoc.offset.
9616  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9617  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9618  }
9619 
9620  // 2. Process this allocation.
9621  // There is allocation with suballoc.offset, suballoc.size.
9622  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9623 
9624  // 3. Prepare for next iteration.
9625  lastOffset = suballoc.offset + suballoc.size;
9626  ++nextAlloc2ndIndex;
9627  }
9628  // We are at the end.
9629  else
9630  {
9631  if(lastOffset < freeSpace2ndTo1stEnd)
9632  {
9633  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9634  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9635  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9636  }
9637 
9638  // End of loop.
9639  lastOffset = freeSpace2ndTo1stEnd;
9640  }
9641  }
9642  }
9643 
9644  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9645  while(lastOffset < freeSpace1stTo2ndEnd)
9646  {
9647  // Find next non-null allocation or move nextAllocIndex to the end.
9648  while(nextAlloc1stIndex < suballoc1stCount &&
9649  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9650  {
9651  ++nextAlloc1stIndex;
9652  }
9653 
9654  // Found non-null allocation.
9655  if(nextAlloc1stIndex < suballoc1stCount)
9656  {
9657  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9658 
9659  // 1. Process free space before this allocation.
9660  if(lastOffset < suballoc.offset)
9661  {
9662  // There is free space from lastOffset to suballoc.offset.
9663  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9664  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9665  }
9666 
9667  // 2. Process this allocation.
9668  // There is allocation with suballoc.offset, suballoc.size.
9669  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9670 
9671  // 3. Prepare for next iteration.
9672  lastOffset = suballoc.offset + suballoc.size;
9673  ++nextAlloc1stIndex;
9674  }
9675  // We are at the end.
9676  else
9677  {
9678  if(lastOffset < freeSpace1stTo2ndEnd)
9679  {
9680  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9681  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9682  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9683  }
9684 
9685  // End of loop.
9686  lastOffset = freeSpace1stTo2ndEnd;
9687  }
9688  }
9689 
9690  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9691  {
9692  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9693  while(lastOffset < size)
9694  {
9695  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9696  while(nextAlloc2ndIndex != SIZE_MAX &&
9697  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9698  {
9699  --nextAlloc2ndIndex;
9700  }
9701 
9702  // Found non-null allocation.
9703  if(nextAlloc2ndIndex != SIZE_MAX)
9704  {
9705  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9706 
9707  // 1. Process free space before this allocation.
9708  if(lastOffset < suballoc.offset)
9709  {
9710  // There is free space from lastOffset to suballoc.offset.
9711  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9712  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9713  }
9714 
9715  // 2. Process this allocation.
9716  // There is allocation with suballoc.offset, suballoc.size.
9717  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9718 
9719  // 3. Prepare for next iteration.
9720  lastOffset = suballoc.offset + suballoc.size;
9721  --nextAlloc2ndIndex;
9722  }
9723  // We are at the end.
9724  else
9725  {
9726  if(lastOffset < size)
9727  {
9728  // There is free space from lastOffset to size.
9729  const VkDeviceSize unusedRangeSize = size - lastOffset;
9730  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9731  }
9732 
9733  // End of loop.
9734  lastOffset = size;
9735  }
9736  }
9737  }
9738 
9739  PrintDetailedMap_End(json);
9740 }
9741 #endif // #if VMA_STATS_STRING_ENABLED
9742 
9743 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9744  uint32_t currentFrameIndex,
9745  uint32_t frameInUseCount,
9746  VkDeviceSize bufferImageGranularity,
9747  VkDeviceSize allocSize,
9748  VkDeviceSize allocAlignment,
9749  bool upperAddress,
9750  VmaSuballocationType allocType,
9751  bool canMakeOtherLost,
9752  uint32_t strategy,
9753  VmaAllocationRequest* pAllocationRequest)
9754 {
9755  VMA_ASSERT(allocSize > 0);
9756  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9757  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9758  VMA_HEAVY_ASSERT(Validate());
9759  return upperAddress ?
9760  CreateAllocationRequest_UpperAddress(
9761  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9762  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9763  CreateAllocationRequest_LowerAddress(
9764  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9765  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9766 }
9767 
9768 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9769  uint32_t currentFrameIndex,
9770  uint32_t frameInUseCount,
9771  VkDeviceSize bufferImageGranularity,
9772  VkDeviceSize allocSize,
9773  VkDeviceSize allocAlignment,
9774  VmaSuballocationType allocType,
9775  bool canMakeOtherLost,
9776  uint32_t strategy,
9777  VmaAllocationRequest* pAllocationRequest)
9778 {
9779  const VkDeviceSize size = GetSize();
9780  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9781  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9782 
9783  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9784  {
9785  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9786  return false;
9787  }
9788 
9789  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9790  if(allocSize > size)
9791  {
9792  return false;
9793  }
9794  VkDeviceSize resultBaseOffset = size - allocSize;
9795  if(!suballocations2nd.empty())
9796  {
9797  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9798  resultBaseOffset = lastSuballoc.offset - allocSize;
9799  if(allocSize > lastSuballoc.offset)
9800  {
9801  return false;
9802  }
9803  }
9804 
9805  // Start from offset equal to end of free space.
9806  VkDeviceSize resultOffset = resultBaseOffset;
9807 
9808  // Apply VMA_DEBUG_MARGIN at the end.
9809  if(VMA_DEBUG_MARGIN > 0)
9810  {
9811  if(resultOffset < VMA_DEBUG_MARGIN)
9812  {
9813  return false;
9814  }
9815  resultOffset -= VMA_DEBUG_MARGIN;
9816  }
9817 
9818  // Apply alignment.
9819  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9820 
9821  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9822  // Make bigger alignment if necessary.
9823  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9824  {
9825  bool bufferImageGranularityConflict = false;
9826  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9827  {
9828  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9829  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9830  {
9831  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9832  {
9833  bufferImageGranularityConflict = true;
9834  break;
9835  }
9836  }
9837  else
9838  // Already on previous page.
9839  break;
9840  }
9841  if(bufferImageGranularityConflict)
9842  {
9843  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9844  }
9845  }
9846 
9847  // There is enough free space.
9848  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9849  suballocations1st.back().offset + suballocations1st.back().size :
9850  0;
9851  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9852  {
9853  // Check previous suballocations for BufferImageGranularity conflicts.
9854  // If conflict exists, allocation cannot be made here.
9855  if(bufferImageGranularity > 1)
9856  {
9857  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9858  {
9859  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9860  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9861  {
9862  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9863  {
9864  return false;
9865  }
9866  }
9867  else
9868  {
9869  // Already on next page.
9870  break;
9871  }
9872  }
9873  }
9874 
9875  // All tests passed: Success.
9876  pAllocationRequest->offset = resultOffset;
9877  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9878  pAllocationRequest->sumItemSize = 0;
9879  // pAllocationRequest->item unused.
9880  pAllocationRequest->itemsToMakeLostCount = 0;
9881  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9882  return true;
9883  }
9884 
9885  return false;
9886 }
9887 
9888 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9889  uint32_t currentFrameIndex,
9890  uint32_t frameInUseCount,
9891  VkDeviceSize bufferImageGranularity,
9892  VkDeviceSize allocSize,
9893  VkDeviceSize allocAlignment,
9894  VmaSuballocationType allocType,
9895  bool canMakeOtherLost,
9896  uint32_t strategy,
9897  VmaAllocationRequest* pAllocationRequest)
9898 {
9899  const VkDeviceSize size = GetSize();
9900  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9901  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9902 
9903  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9904  {
9905  // Try to allocate at the end of 1st vector.
9906 
9907  VkDeviceSize resultBaseOffset = 0;
9908  if(!suballocations1st.empty())
9909  {
9910  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9911  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9912  }
9913 
9914  // Start from offset equal to beginning of free space.
9915  VkDeviceSize resultOffset = resultBaseOffset;
9916 
9917  // Apply VMA_DEBUG_MARGIN at the beginning.
9918  if(VMA_DEBUG_MARGIN > 0)
9919  {
9920  resultOffset += VMA_DEBUG_MARGIN;
9921  }
9922 
9923  // Apply alignment.
9924  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9925 
9926  // Check previous suballocations for BufferImageGranularity conflicts.
9927  // Make bigger alignment if necessary.
9928  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9929  {
9930  bool bufferImageGranularityConflict = false;
9931  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9932  {
9933  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9934  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9935  {
9936  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9937  {
9938  bufferImageGranularityConflict = true;
9939  break;
9940  }
9941  }
9942  else
9943  // Already on previous page.
9944  break;
9945  }
9946  if(bufferImageGranularityConflict)
9947  {
9948  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9949  }
9950  }
9951 
9952  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9953  suballocations2nd.back().offset : size;
9954 
9955  // There is enough free space at the end after alignment.
9956  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9957  {
9958  // Check next suballocations for BufferImageGranularity conflicts.
9959  // If conflict exists, allocation cannot be made here.
9960  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9961  {
9962  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9963  {
9964  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9965  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9966  {
9967  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9968  {
9969  return false;
9970  }
9971  }
9972  else
9973  {
9974  // Already on previous page.
9975  break;
9976  }
9977  }
9978  }
9979 
9980  // All tests passed: Success.
9981  pAllocationRequest->offset = resultOffset;
9982  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9983  pAllocationRequest->sumItemSize = 0;
9984  // pAllocationRequest->item, customData unused.
9985  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9986  pAllocationRequest->itemsToMakeLostCount = 0;
9987  return true;
9988  }
9989  }
9990 
9991  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9992  // beginning of 1st vector as the end of free space.
9993  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9994  {
9995  VMA_ASSERT(!suballocations1st.empty());
9996 
9997  VkDeviceSize resultBaseOffset = 0;
9998  if(!suballocations2nd.empty())
9999  {
10000  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10001  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10002  }
10003 
10004  // Start from offset equal to beginning of free space.
10005  VkDeviceSize resultOffset = resultBaseOffset;
10006 
10007  // Apply VMA_DEBUG_MARGIN at the beginning.
10008  if(VMA_DEBUG_MARGIN > 0)
10009  {
10010  resultOffset += VMA_DEBUG_MARGIN;
10011  }
10012 
10013  // Apply alignment.
10014  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10015 
10016  // Check previous suballocations for BufferImageGranularity conflicts.
10017  // Make bigger alignment if necessary.
10018  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10019  {
10020  bool bufferImageGranularityConflict = false;
10021  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10022  {
10023  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10024  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10025  {
10026  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10027  {
10028  bufferImageGranularityConflict = true;
10029  break;
10030  }
10031  }
10032  else
10033  // Already on previous page.
10034  break;
10035  }
10036  if(bufferImageGranularityConflict)
10037  {
10038  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10039  }
10040  }
10041 
10042  pAllocationRequest->itemsToMakeLostCount = 0;
10043  pAllocationRequest->sumItemSize = 0;
10044  size_t index1st = m_1stNullItemsBeginCount;
10045 
10046  if(canMakeOtherLost)
10047  {
10048  while(index1st < suballocations1st.size() &&
10049  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10050  {
10051  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10052  const VmaSuballocation& suballoc = suballocations1st[index1st];
10053  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10054  {
10055  // No problem.
10056  }
10057  else
10058  {
10059  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10060  if(suballoc.hAllocation->CanBecomeLost() &&
10061  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10062  {
10063  ++pAllocationRequest->itemsToMakeLostCount;
10064  pAllocationRequest->sumItemSize += suballoc.size;
10065  }
10066  else
10067  {
10068  return false;
10069  }
10070  }
10071  ++index1st;
10072  }
10073 
10074  // Check next suballocations for BufferImageGranularity conflicts.
10075  // If conflict exists, we must mark more allocations lost or fail.
10076  if(bufferImageGranularity > 1)
10077  {
10078  while(index1st < suballocations1st.size())
10079  {
10080  const VmaSuballocation& suballoc = suballocations1st[index1st];
10081  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10082  {
10083  if(suballoc.hAllocation != VK_NULL_HANDLE)
10084  {
10085  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10086  if(suballoc.hAllocation->CanBecomeLost() &&
10087  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10088  {
10089  ++pAllocationRequest->itemsToMakeLostCount;
10090  pAllocationRequest->sumItemSize += suballoc.size;
10091  }
10092  else
10093  {
10094  return false;
10095  }
10096  }
10097  }
10098  else
10099  {
10100  // Already on next page.
10101  break;
10102  }
10103  ++index1st;
10104  }
10105  }
10106 
10107  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10108  if(index1st == suballocations1st.size() &&
10109  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10110  {
10111  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10112  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10113  }
10114  }
10115 
10116  // There is enough free space at the end after alignment.
10117  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10118  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10119  {
10120  // Check next suballocations for BufferImageGranularity conflicts.
10121  // If conflict exists, allocation cannot be made here.
10122  if(bufferImageGranularity > 1)
10123  {
10124  for(size_t nextSuballocIndex = index1st;
10125  nextSuballocIndex < suballocations1st.size();
10126  nextSuballocIndex++)
10127  {
10128  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10129  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10130  {
10131  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10132  {
10133  return false;
10134  }
10135  }
10136  else
10137  {
10138  // Already on next page.
10139  break;
10140  }
10141  }
10142  }
10143 
10144  // All tests passed: Success.
10145  pAllocationRequest->offset = resultOffset;
10146  pAllocationRequest->sumFreeSize =
10147  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10148  - resultBaseOffset
10149  - pAllocationRequest->sumItemSize;
10150  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10151  // pAllocationRequest->item, customData unused.
10152  return true;
10153  }
10154  }
10155 
10156  return false;
10157 }
10158 
10159 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10160  uint32_t currentFrameIndex,
10161  uint32_t frameInUseCount,
10162  VmaAllocationRequest* pAllocationRequest)
10163 {
10164  if(pAllocationRequest->itemsToMakeLostCount == 0)
10165  {
10166  return true;
10167  }
10168 
10169  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10170 
10171  // We always start from 1st.
10172  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10173  size_t index = m_1stNullItemsBeginCount;
10174  size_t madeLostCount = 0;
10175  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10176  {
10177  if(index == suballocations->size())
10178  {
10179  index = 0;
10180  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10181  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10182  {
10183  suballocations = &AccessSuballocations2nd();
10184  }
10185  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10186  // suballocations continues pointing at AccessSuballocations1st().
10187  VMA_ASSERT(!suballocations->empty());
10188  }
10189  VmaSuballocation& suballoc = (*suballocations)[index];
10190  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10191  {
10192  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10193  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10194  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10195  {
10196  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10197  suballoc.hAllocation = VK_NULL_HANDLE;
10198  m_SumFreeSize += suballoc.size;
10199  if(suballocations == &AccessSuballocations1st())
10200  {
10201  ++m_1stNullItemsMiddleCount;
10202  }
10203  else
10204  {
10205  ++m_2ndNullItemsCount;
10206  }
10207  ++madeLostCount;
10208  }
10209  else
10210  {
10211  return false;
10212  }
10213  }
10214  ++index;
10215  }
10216 
10217  CleanupAfterFree();
10218  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10219 
10220  return true;
10221 }
10222 
10223 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10224 {
10225  uint32_t lostAllocationCount = 0;
10226 
10227  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10228  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10229  {
10230  VmaSuballocation& suballoc = suballocations1st[i];
10231  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10232  suballoc.hAllocation->CanBecomeLost() &&
10233  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10234  {
10235  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10236  suballoc.hAllocation = VK_NULL_HANDLE;
10237  ++m_1stNullItemsMiddleCount;
10238  m_SumFreeSize += suballoc.size;
10239  ++lostAllocationCount;
10240  }
10241  }
10242 
10243  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10244  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10245  {
10246  VmaSuballocation& suballoc = suballocations2nd[i];
10247  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10248  suballoc.hAllocation->CanBecomeLost() &&
10249  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10250  {
10251  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10252  suballoc.hAllocation = VK_NULL_HANDLE;
10253  ++m_2ndNullItemsCount;
10254  m_SumFreeSize += suballoc.size;
10255  ++lostAllocationCount;
10256  }
10257  }
10258 
10259  if(lostAllocationCount)
10260  {
10261  CleanupAfterFree();
10262  }
10263 
10264  return lostAllocationCount;
10265 }
10266 
10267 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10268 {
10269  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10270  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10271  {
10272  const VmaSuballocation& suballoc = suballocations1st[i];
10273  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10274  {
10275  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10276  {
10277  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10278  return VK_ERROR_VALIDATION_FAILED_EXT;
10279  }
10280  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10281  {
10282  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10283  return VK_ERROR_VALIDATION_FAILED_EXT;
10284  }
10285  }
10286  }
10287 
10288  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10289  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10290  {
10291  const VmaSuballocation& suballoc = suballocations2nd[i];
10292  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10293  {
10294  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10295  {
10296  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10297  return VK_ERROR_VALIDATION_FAILED_EXT;
10298  }
10299  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10300  {
10301  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10302  return VK_ERROR_VALIDATION_FAILED_EXT;
10303  }
10304  }
10305  }
10306 
10307  return VK_SUCCESS;
10308 }
10309 
10310 void VmaBlockMetadata_Linear::Alloc(
10311  const VmaAllocationRequest& request,
10312  VmaSuballocationType type,
10313  VkDeviceSize allocSize,
10314  VmaAllocation hAllocation)
10315 {
10316  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10317 
10318  switch(request.type)
10319  {
10320  case VmaAllocationRequestType::UpperAddress:
10321  {
10322  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10323  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10324  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10325  suballocations2nd.push_back(newSuballoc);
10326  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10327  }
10328  break;
10329  case VmaAllocationRequestType::EndOf1st:
10330  {
10331  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10332 
10333  VMA_ASSERT(suballocations1st.empty() ||
10334  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10335  // Check if it fits before the end of the block.
10336  VMA_ASSERT(request.offset + allocSize <= GetSize());
10337 
10338  suballocations1st.push_back(newSuballoc);
10339  }
10340  break;
10341  case VmaAllocationRequestType::EndOf2nd:
10342  {
10343  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10344  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10345  VMA_ASSERT(!suballocations1st.empty() &&
10346  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10347  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10348 
10349  switch(m_2ndVectorMode)
10350  {
10351  case SECOND_VECTOR_EMPTY:
10352  // First allocation from second part ring buffer.
10353  VMA_ASSERT(suballocations2nd.empty());
10354  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10355  break;
10356  case SECOND_VECTOR_RING_BUFFER:
10357  // 2-part ring buffer is already started.
10358  VMA_ASSERT(!suballocations2nd.empty());
10359  break;
10360  case SECOND_VECTOR_DOUBLE_STACK:
10361  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10362  break;
10363  default:
10364  VMA_ASSERT(0);
10365  }
10366 
10367  suballocations2nd.push_back(newSuballoc);
10368  }
10369  break;
10370  default:
10371  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10372  }
10373 
10374  m_SumFreeSize -= newSuballoc.size;
10375 }
10376 
10377 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10378 {
10379  FreeAtOffset(allocation->GetOffset());
10380 }
10381 
10382 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10383 {
10384  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10385  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10386 
10387  if(!suballocations1st.empty())
10388  {
10389  // First allocation: Mark it as next empty at the beginning.
10390  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10391  if(firstSuballoc.offset == offset)
10392  {
10393  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10394  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10395  m_SumFreeSize += firstSuballoc.size;
10396  ++m_1stNullItemsBeginCount;
10397  CleanupAfterFree();
10398  return;
10399  }
10400  }
10401 
10402  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10403  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10404  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10405  {
10406  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10407  if(lastSuballoc.offset == offset)
10408  {
10409  m_SumFreeSize += lastSuballoc.size;
10410  suballocations2nd.pop_back();
10411  CleanupAfterFree();
10412  return;
10413  }
10414  }
10415  // Last allocation in 1st vector.
10416  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10417  {
10418  VmaSuballocation& lastSuballoc = suballocations1st.back();
10419  if(lastSuballoc.offset == offset)
10420  {
10421  m_SumFreeSize += lastSuballoc.size;
10422  suballocations1st.pop_back();
10423  CleanupAfterFree();
10424  return;
10425  }
10426  }
10427 
10428  // Item from the middle of 1st vector.
10429  {
10430  VmaSuballocation refSuballoc;
10431  refSuballoc.offset = offset;
10432  // Rest of members stays uninitialized intentionally for better performance.
10433  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10434  suballocations1st.begin() + m_1stNullItemsBeginCount,
10435  suballocations1st.end(),
10436  refSuballoc);
10437  if(it != suballocations1st.end())
10438  {
10439  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10440  it->hAllocation = VK_NULL_HANDLE;
10441  ++m_1stNullItemsMiddleCount;
10442  m_SumFreeSize += it->size;
10443  CleanupAfterFree();
10444  return;
10445  }
10446  }
10447 
10448  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10449  {
10450  // Item from the middle of 2nd vector.
10451  VmaSuballocation refSuballoc;
10452  refSuballoc.offset = offset;
10453  // Rest of members stays uninitialized intentionally for better performance.
10454  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10455  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10456  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10457  if(it != suballocations2nd.end())
10458  {
10459  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10460  it->hAllocation = VK_NULL_HANDLE;
10461  ++m_2ndNullItemsCount;
10462  m_SumFreeSize += it->size;
10463  CleanupAfterFree();
10464  return;
10465  }
10466  }
10467 
10468  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10469 }
10470 
10471 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10472 {
10473  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10474  const size_t suballocCount = AccessSuballocations1st().size();
10475  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10476 }
10477 
10478 void VmaBlockMetadata_Linear::CleanupAfterFree()
10479 {
10480  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10481  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10482 
10483  if(IsEmpty())
10484  {
10485  suballocations1st.clear();
10486  suballocations2nd.clear();
10487  m_1stNullItemsBeginCount = 0;
10488  m_1stNullItemsMiddleCount = 0;
10489  m_2ndNullItemsCount = 0;
10490  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10491  }
10492  else
10493  {
10494  const size_t suballoc1stCount = suballocations1st.size();
10495  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10496  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10497 
10498  // Find more null items at the beginning of 1st vector.
10499  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10500  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10501  {
10502  ++m_1stNullItemsBeginCount;
10503  --m_1stNullItemsMiddleCount;
10504  }
10505 
10506  // Find more null items at the end of 1st vector.
10507  while(m_1stNullItemsMiddleCount > 0 &&
10508  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10509  {
10510  --m_1stNullItemsMiddleCount;
10511  suballocations1st.pop_back();
10512  }
10513 
10514  // Find more null items at the end of 2nd vector.
10515  while(m_2ndNullItemsCount > 0 &&
10516  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10517  {
10518  --m_2ndNullItemsCount;
10519  suballocations2nd.pop_back();
10520  }
10521 
10522  // Find more null items at the beginning of 2nd vector.
10523  while(m_2ndNullItemsCount > 0 &&
10524  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10525  {
10526  --m_2ndNullItemsCount;
10527  suballocations2nd.remove(0);
10528  }
10529 
10530  if(ShouldCompact1st())
10531  {
10532  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10533  size_t srcIndex = m_1stNullItemsBeginCount;
10534  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10535  {
10536  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10537  {
10538  ++srcIndex;
10539  }
10540  if(dstIndex != srcIndex)
10541  {
10542  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10543  }
10544  ++srcIndex;
10545  }
10546  suballocations1st.resize(nonNullItemCount);
10547  m_1stNullItemsBeginCount = 0;
10548  m_1stNullItemsMiddleCount = 0;
10549  }
10550 
10551  // 2nd vector became empty.
10552  if(suballocations2nd.empty())
10553  {
10554  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10555  }
10556 
10557  // 1st vector became empty.
10558  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10559  {
10560  suballocations1st.clear();
10561  m_1stNullItemsBeginCount = 0;
10562 
10563  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10564  {
10565  // Swap 1st with 2nd. Now 2nd is empty.
10566  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10567  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10568  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10569  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10570  {
10571  ++m_1stNullItemsBeginCount;
10572  --m_1stNullItemsMiddleCount;
10573  }
10574  m_2ndNullItemsCount = 0;
10575  m_1stVectorIndex ^= 1;
10576  }
10577  }
10578  }
10579 
10580  VMA_HEAVY_ASSERT(Validate());
10581 }
10582 
10583 
10585 // class VmaBlockMetadata_Buddy
10586 
10587 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10588  VmaBlockMetadata(hAllocator),
10589  m_Root(VMA_NULL),
10590  m_AllocationCount(0),
10591  m_FreeCount(1),
10592  m_SumFreeSize(0)
10593 {
10594  memset(m_FreeList, 0, sizeof(m_FreeList));
10595 }
10596 
10597 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10598 {
10599  DeleteNode(m_Root);
10600 }
10601 
10602 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10603 {
10604  VmaBlockMetadata::Init(size);
10605 
10606  m_UsableSize = VmaPrevPow2(size);
10607  m_SumFreeSize = m_UsableSize;
10608 
10609  // Calculate m_LevelCount.
10610  m_LevelCount = 1;
10611  while(m_LevelCount < MAX_LEVELS &&
10612  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10613  {
10614  ++m_LevelCount;
10615  }
10616 
10617  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10618  rootNode->offset = 0;
10619  rootNode->type = Node::TYPE_FREE;
10620  rootNode->parent = VMA_NULL;
10621  rootNode->buddy = VMA_NULL;
10622 
10623  m_Root = rootNode;
10624  AddToFreeListFront(0, rootNode);
10625 }
10626 
10627 bool VmaBlockMetadata_Buddy::Validate() const
10628 {
10629  // Validate tree.
10630  ValidationContext ctx;
10631  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10632  {
10633  VMA_VALIDATE(false && "ValidateNode failed.");
10634  }
10635  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10636  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10637 
10638  // Validate free node lists.
10639  for(uint32_t level = 0; level < m_LevelCount; ++level)
10640  {
10641  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10642  m_FreeList[level].front->free.prev == VMA_NULL);
10643 
10644  for(Node* node = m_FreeList[level].front;
10645  node != VMA_NULL;
10646  node = node->free.next)
10647  {
10648  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10649 
10650  if(node->free.next == VMA_NULL)
10651  {
10652  VMA_VALIDATE(m_FreeList[level].back == node);
10653  }
10654  else
10655  {
10656  VMA_VALIDATE(node->free.next->free.prev == node);
10657  }
10658  }
10659  }
10660 
10661  // Validate that free lists ar higher levels are empty.
10662  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10663  {
10664  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10665  }
10666 
10667  return true;
10668 }
10669 
10670 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10671 {
10672  for(uint32_t level = 0; level < m_LevelCount; ++level)
10673  {
10674  if(m_FreeList[level].front != VMA_NULL)
10675  {
10676  return LevelToNodeSize(level);
10677  }
10678  }
10679  return 0;
10680 }
10681 
10682 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10683 {
10684  const VkDeviceSize unusableSize = GetUnusableSize();
10685 
10686  outInfo.blockCount = 1;
10687 
10688  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10689  outInfo.usedBytes = outInfo.unusedBytes = 0;
10690 
10691  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10692  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10693  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10694 
10695  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10696 
10697  if(unusableSize > 0)
10698  {
10699  ++outInfo.unusedRangeCount;
10700  outInfo.unusedBytes += unusableSize;
10701  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10702  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10703  }
10704 }
10705 
10706 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10707 {
10708  const VkDeviceSize unusableSize = GetUnusableSize();
10709 
10710  inoutStats.size += GetSize();
10711  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10712  inoutStats.allocationCount += m_AllocationCount;
10713  inoutStats.unusedRangeCount += m_FreeCount;
10714  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10715 
10716  if(unusableSize > 0)
10717  {
10718  ++inoutStats.unusedRangeCount;
10719  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10720  }
10721 }
10722 
10723 #if VMA_STATS_STRING_ENABLED
10724 
10725 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10726 {
10727  // TODO optimize
10728  VmaStatInfo stat;
10729  CalcAllocationStatInfo(stat);
10730 
10731  PrintDetailedMap_Begin(
10732  json,
10733  stat.unusedBytes,
10734  stat.allocationCount,
10735  stat.unusedRangeCount);
10736 
10737  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10738 
10739  const VkDeviceSize unusableSize = GetUnusableSize();
10740  if(unusableSize > 0)
10741  {
10742  PrintDetailedMap_UnusedRange(json,
10743  m_UsableSize, // offset
10744  unusableSize); // size
10745  }
10746 
10747  PrintDetailedMap_End(json);
10748 }
10749 
10750 #endif // #if VMA_STATS_STRING_ENABLED
10751 
10752 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10753  uint32_t currentFrameIndex,
10754  uint32_t frameInUseCount,
10755  VkDeviceSize bufferImageGranularity,
10756  VkDeviceSize allocSize,
10757  VkDeviceSize allocAlignment,
10758  bool upperAddress,
10759  VmaSuballocationType allocType,
10760  bool canMakeOtherLost,
10761  uint32_t strategy,
10762  VmaAllocationRequest* pAllocationRequest)
10763 {
10764  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10765 
10766  // Simple way to respect bufferImageGranularity. May be optimized some day.
10767  // Whenever it might be an OPTIMAL image...
10768  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10769  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10770  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10771  {
10772  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10773  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10774  }
10775 
10776  if(allocSize > m_UsableSize)
10777  {
10778  return false;
10779  }
10780 
10781  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10782  for(uint32_t level = targetLevel + 1; level--; )
10783  {
10784  for(Node* freeNode = m_FreeList[level].front;
10785  freeNode != VMA_NULL;
10786  freeNode = freeNode->free.next)
10787  {
10788  if(freeNode->offset % allocAlignment == 0)
10789  {
10790  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10791  pAllocationRequest->offset = freeNode->offset;
10792  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10793  pAllocationRequest->sumItemSize = 0;
10794  pAllocationRequest->itemsToMakeLostCount = 0;
10795  pAllocationRequest->customData = (void*)(uintptr_t)level;
10796  return true;
10797  }
10798  }
10799  }
10800 
10801  return false;
10802 }
10803 
10804 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10805  uint32_t currentFrameIndex,
10806  uint32_t frameInUseCount,
10807  VmaAllocationRequest* pAllocationRequest)
10808 {
10809  /*
10810  Lost allocations are not supported in buddy allocator at the moment.
10811  Support might be added in the future.
10812  */
10813  return pAllocationRequest->itemsToMakeLostCount == 0;
10814 }
10815 
10816 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10817 {
10818  /*
10819  Lost allocations are not supported in buddy allocator at the moment.
10820  Support might be added in the future.
10821  */
10822  return 0;
10823 }
10824 
10825 void VmaBlockMetadata_Buddy::Alloc(
10826  const VmaAllocationRequest& request,
10827  VmaSuballocationType type,
10828  VkDeviceSize allocSize,
10829  VmaAllocation hAllocation)
10830 {
10831  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10832 
10833  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10834  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10835 
10836  Node* currNode = m_FreeList[currLevel].front;
10837  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10838  while(currNode->offset != request.offset)
10839  {
10840  currNode = currNode->free.next;
10841  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10842  }
10843 
10844  // Go down, splitting free nodes.
10845  while(currLevel < targetLevel)
10846  {
10847  // currNode is already first free node at currLevel.
10848  // Remove it from list of free nodes at this currLevel.
10849  RemoveFromFreeList(currLevel, currNode);
10850 
10851  const uint32_t childrenLevel = currLevel + 1;
10852 
10853  // Create two free sub-nodes.
10854  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10855  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10856 
10857  leftChild->offset = currNode->offset;
10858  leftChild->type = Node::TYPE_FREE;
10859  leftChild->parent = currNode;
10860  leftChild->buddy = rightChild;
10861 
10862  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10863  rightChild->type = Node::TYPE_FREE;
10864  rightChild->parent = currNode;
10865  rightChild->buddy = leftChild;
10866 
10867  // Convert current currNode to split type.
10868  currNode->type = Node::TYPE_SPLIT;
10869  currNode->split.leftChild = leftChild;
10870 
10871  // Add child nodes to free list. Order is important!
10872  AddToFreeListFront(childrenLevel, rightChild);
10873  AddToFreeListFront(childrenLevel, leftChild);
10874 
10875  ++m_FreeCount;
10876  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10877  ++currLevel;
10878  currNode = m_FreeList[currLevel].front;
10879 
10880  /*
10881  We can be sure that currNode, as left child of node previously split,
10882  also fullfills the alignment requirement.
10883  */
10884  }
10885 
10886  // Remove from free list.
10887  VMA_ASSERT(currLevel == targetLevel &&
10888  currNode != VMA_NULL &&
10889  currNode->type == Node::TYPE_FREE);
10890  RemoveFromFreeList(currLevel, currNode);
10891 
10892  // Convert to allocation node.
10893  currNode->type = Node::TYPE_ALLOCATION;
10894  currNode->allocation.alloc = hAllocation;
10895 
10896  ++m_AllocationCount;
10897  --m_FreeCount;
10898  m_SumFreeSize -= allocSize;
10899 }
10900 
10901 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10902 {
10903  if(node->type == Node::TYPE_SPLIT)
10904  {
10905  DeleteNode(node->split.leftChild->buddy);
10906  DeleteNode(node->split.leftChild);
10907  }
10908 
10909  vma_delete(GetAllocationCallbacks(), node);
10910 }
10911 
10912 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10913 {
10914  VMA_VALIDATE(level < m_LevelCount);
10915  VMA_VALIDATE(curr->parent == parent);
10916  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10917  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10918  switch(curr->type)
10919  {
10920  case Node::TYPE_FREE:
10921  // curr->free.prev, next are validated separately.
10922  ctx.calculatedSumFreeSize += levelNodeSize;
10923  ++ctx.calculatedFreeCount;
10924  break;
10925  case Node::TYPE_ALLOCATION:
10926  ++ctx.calculatedAllocationCount;
10927  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10928  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10929  break;
10930  case Node::TYPE_SPLIT:
10931  {
10932  const uint32_t childrenLevel = level + 1;
10933  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10934  const Node* const leftChild = curr->split.leftChild;
10935  VMA_VALIDATE(leftChild != VMA_NULL);
10936  VMA_VALIDATE(leftChild->offset == curr->offset);
10937  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10938  {
10939  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10940  }
10941  const Node* const rightChild = leftChild->buddy;
10942  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10943  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10944  {
10945  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10946  }
10947  }
10948  break;
10949  default:
10950  return false;
10951  }
10952 
10953  return true;
10954 }
10955 
10956 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10957 {
10958  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10959  uint32_t level = 0;
10960  VkDeviceSize currLevelNodeSize = m_UsableSize;
10961  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10962  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10963  {
10964  ++level;
10965  currLevelNodeSize = nextLevelNodeSize;
10966  nextLevelNodeSize = currLevelNodeSize >> 1;
10967  }
10968  return level;
10969 }
10970 
10971 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10972 {
10973  // Find node and level.
10974  Node* node = m_Root;
10975  VkDeviceSize nodeOffset = 0;
10976  uint32_t level = 0;
10977  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10978  while(node->type == Node::TYPE_SPLIT)
10979  {
10980  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10981  if(offset < nodeOffset + nextLevelSize)
10982  {
10983  node = node->split.leftChild;
10984  }
10985  else
10986  {
10987  node = node->split.leftChild->buddy;
10988  nodeOffset += nextLevelSize;
10989  }
10990  ++level;
10991  levelNodeSize = nextLevelSize;
10992  }
10993 
10994  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10995  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10996 
10997  ++m_FreeCount;
10998  --m_AllocationCount;
10999  m_SumFreeSize += alloc->GetSize();
11000 
11001  node->type = Node::TYPE_FREE;
11002 
11003  // Join free nodes if possible.
11004  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11005  {
11006  RemoveFromFreeList(level, node->buddy);
11007  Node* const parent = node->parent;
11008 
11009  vma_delete(GetAllocationCallbacks(), node->buddy);
11010  vma_delete(GetAllocationCallbacks(), node);
11011  parent->type = Node::TYPE_FREE;
11012 
11013  node = parent;
11014  --level;
11015  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11016  --m_FreeCount;
11017  }
11018 
11019  AddToFreeListFront(level, node);
11020 }
11021 
11022 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11023 {
11024  switch(node->type)
11025  {
11026  case Node::TYPE_FREE:
11027  ++outInfo.unusedRangeCount;
11028  outInfo.unusedBytes += levelNodeSize;
11029  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11030  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11031  break;
11032  case Node::TYPE_ALLOCATION:
11033  {
11034  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11035  ++outInfo.allocationCount;
11036  outInfo.usedBytes += allocSize;
11037  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11038  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11039 
11040  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11041  if(unusedRangeSize > 0)
11042  {
11043  ++outInfo.unusedRangeCount;
11044  outInfo.unusedBytes += unusedRangeSize;
11045  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11046  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11047  }
11048  }
11049  break;
11050  case Node::TYPE_SPLIT:
11051  {
11052  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11053  const Node* const leftChild = node->split.leftChild;
11054  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11055  const Node* const rightChild = leftChild->buddy;
11056  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11057  }
11058  break;
11059  default:
11060  VMA_ASSERT(0);
11061  }
11062 }
11063 
11064 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11065 {
11066  VMA_ASSERT(node->type == Node::TYPE_FREE);
11067 
11068  // List is empty.
11069  Node* const frontNode = m_FreeList[level].front;
11070  if(frontNode == VMA_NULL)
11071  {
11072  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11073  node->free.prev = node->free.next = VMA_NULL;
11074  m_FreeList[level].front = m_FreeList[level].back = node;
11075  }
11076  else
11077  {
11078  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11079  node->free.prev = VMA_NULL;
11080  node->free.next = frontNode;
11081  frontNode->free.prev = node;
11082  m_FreeList[level].front = node;
11083  }
11084 }
11085 
11086 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11087 {
11088  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11089 
11090  // It is at the front.
11091  if(node->free.prev == VMA_NULL)
11092  {
11093  VMA_ASSERT(m_FreeList[level].front == node);
11094  m_FreeList[level].front = node->free.next;
11095  }
11096  else
11097  {
11098  Node* const prevFreeNode = node->free.prev;
11099  VMA_ASSERT(prevFreeNode->free.next == node);
11100  prevFreeNode->free.next = node->free.next;
11101  }
11102 
11103  // It is at the back.
11104  if(node->free.next == VMA_NULL)
11105  {
11106  VMA_ASSERT(m_FreeList[level].back == node);
11107  m_FreeList[level].back = node->free.prev;
11108  }
11109  else
11110  {
11111  Node* const nextFreeNode = node->free.next;
11112  VMA_ASSERT(nextFreeNode->free.prev == node);
11113  nextFreeNode->free.prev = node->free.prev;
11114  }
11115 }
11116 
11117 #if VMA_STATS_STRING_ENABLED
11118 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11119 {
11120  switch(node->type)
11121  {
11122  case Node::TYPE_FREE:
11123  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11124  break;
11125  case Node::TYPE_ALLOCATION:
11126  {
11127  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11128  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11129  if(allocSize < levelNodeSize)
11130  {
11131  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11132  }
11133  }
11134  break;
11135  case Node::TYPE_SPLIT:
11136  {
11137  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11138  const Node* const leftChild = node->split.leftChild;
11139  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11140  const Node* const rightChild = leftChild->buddy;
11141  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11142  }
11143  break;
11144  default:
11145  VMA_ASSERT(0);
11146  }
11147 }
11148 #endif // #if VMA_STATS_STRING_ENABLED
11149 
11150 
11152 // class VmaDeviceMemoryBlock
11153 
11154 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11155  m_pMetadata(VMA_NULL),
11156  m_MemoryTypeIndex(UINT32_MAX),
11157  m_Id(0),
11158  m_hMemory(VK_NULL_HANDLE),
11159  m_MapCount(0),
11160  m_pMappedData(VMA_NULL)
11161 {
11162 }
11163 
11164 void VmaDeviceMemoryBlock::Init(
11165  VmaAllocator hAllocator,
11166  VmaPool hParentPool,
11167  uint32_t newMemoryTypeIndex,
11168  VkDeviceMemory newMemory,
11169  VkDeviceSize newSize,
11170  uint32_t id,
11171  uint32_t algorithm)
11172 {
11173  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11174 
11175  m_hParentPool = hParentPool;
11176  m_MemoryTypeIndex = newMemoryTypeIndex;
11177  m_Id = id;
11178  m_hMemory = newMemory;
11179 
11180  switch(algorithm)
11181  {
11183  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11184  break;
11186  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11187  break;
11188  default:
11189  VMA_ASSERT(0);
11190  // Fall-through.
11191  case 0:
11192  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11193  }
11194  m_pMetadata->Init(newSize);
11195 }
11196 
11197 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11198 {
11199  // This is the most important assert in the entire library.
11200  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11201  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11202 
11203  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11204  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11205  m_hMemory = VK_NULL_HANDLE;
11206 
11207  vma_delete(allocator, m_pMetadata);
11208  m_pMetadata = VMA_NULL;
11209 }
11210 
11211 bool VmaDeviceMemoryBlock::Validate() const
11212 {
11213  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11214  (m_pMetadata->GetSize() != 0));
11215 
11216  return m_pMetadata->Validate();
11217 }
11218 
11219 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11220 {
11221  void* pData = nullptr;
11222  VkResult res = Map(hAllocator, 1, &pData);
11223  if(res != VK_SUCCESS)
11224  {
11225  return res;
11226  }
11227 
11228  res = m_pMetadata->CheckCorruption(pData);
11229 
11230  Unmap(hAllocator, 1);
11231 
11232  return res;
11233 }
11234 
11235 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11236 {
11237  if(count == 0)
11238  {
11239  return VK_SUCCESS;
11240  }
11241 
11242  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11243  if(m_MapCount != 0)
11244  {
11245  m_MapCount += count;
11246  VMA_ASSERT(m_pMappedData != VMA_NULL);
11247  if(ppData != VMA_NULL)
11248  {
11249  *ppData = m_pMappedData;
11250  }
11251  return VK_SUCCESS;
11252  }
11253  else
11254  {
11255  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11256  hAllocator->m_hDevice,
11257  m_hMemory,
11258  0, // offset
11259  VK_WHOLE_SIZE,
11260  0, // flags
11261  &m_pMappedData);
11262  if(result == VK_SUCCESS)
11263  {
11264  if(ppData != VMA_NULL)
11265  {
11266  *ppData = m_pMappedData;
11267  }
11268  m_MapCount = count;
11269  }
11270  return result;
11271  }
11272 }
11273 
11274 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11275 {
11276  if(count == 0)
11277  {
11278  return;
11279  }
11280 
11281  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11282  if(m_MapCount >= count)
11283  {
11284  m_MapCount -= count;
11285  if(m_MapCount == 0)
11286  {
11287  m_pMappedData = VMA_NULL;
11288  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11289  }
11290  }
11291  else
11292  {
11293  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11294  }
11295 }
11296 
11297 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11298 {
11299  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11300  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11301 
11302  void* pData;
11303  VkResult res = Map(hAllocator, 1, &pData);
11304  if(res != VK_SUCCESS)
11305  {
11306  return res;
11307  }
11308 
11309  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11310  VmaWriteMagicValue(pData, allocOffset + allocSize);
11311 
11312  Unmap(hAllocator, 1);
11313 
11314  return VK_SUCCESS;
11315 }
11316 
11317 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11318 {
11319  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11320  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11321 
11322  void* pData;
11323  VkResult res = Map(hAllocator, 1, &pData);
11324  if(res != VK_SUCCESS)
11325  {
11326  return res;
11327  }
11328 
11329  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11330  {
11331  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11332  }
11333  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11334  {
11335  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11336  }
11337 
11338  Unmap(hAllocator, 1);
11339 
11340  return VK_SUCCESS;
11341 }
11342 
11343 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11344  const VmaAllocator hAllocator,
11345  const VmaAllocation hAllocation,
11346  VkBuffer hBuffer)
11347 {
11348  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11349  hAllocation->GetBlock() == this);
11350  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11351  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11352  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11353  hAllocator->m_hDevice,
11354  hBuffer,
11355  m_hMemory,
11356  hAllocation->GetOffset());
11357 }
11358 
11359 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11360  const VmaAllocator hAllocator,
11361  const VmaAllocation hAllocation,
11362  VkImage hImage)
11363 {
11364  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11365  hAllocation->GetBlock() == this);
11366  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11367  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11368  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11369  hAllocator->m_hDevice,
11370  hImage,
11371  m_hMemory,
11372  hAllocation->GetOffset());
11373 }
11374 
11375 static void InitStatInfo(VmaStatInfo& outInfo)
11376 {
11377  memset(&outInfo, 0, sizeof(outInfo));
11378  outInfo.allocationSizeMin = UINT64_MAX;
11379  outInfo.unusedRangeSizeMin = UINT64_MAX;
11380 }
11381 
11382 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11383 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11384 {
11385  inoutInfo.blockCount += srcInfo.blockCount;
11386  inoutInfo.allocationCount += srcInfo.allocationCount;
11387  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11388  inoutInfo.usedBytes += srcInfo.usedBytes;
11389  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11390  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11391  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11392  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11393  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11394 }
11395 
11396 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11397 {
11398  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11399  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11400  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11401  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11402 }
11403 
11404 VmaPool_T::VmaPool_T(
11405  VmaAllocator hAllocator,
11406  const VmaPoolCreateInfo& createInfo,
11407  VkDeviceSize preferredBlockSize) :
11408  m_BlockVector(
11409  hAllocator,
11410  this, // hParentPool
11411  createInfo.memoryTypeIndex,
11412  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11413  createInfo.minBlockCount,
11414  createInfo.maxBlockCount,
11415  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11416  createInfo.frameInUseCount,
11417  true, // isCustomPool
11418  createInfo.blockSize != 0, // explicitBlockSize
11419  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11420  m_Id(0)
11421 {
11422 }
11423 
11424 VmaPool_T::~VmaPool_T()
11425 {
11426 }
11427 
11428 #if VMA_STATS_STRING_ENABLED
11429 
11430 #endif // #if VMA_STATS_STRING_ENABLED
11431 
11432 VmaBlockVector::VmaBlockVector(
11433  VmaAllocator hAllocator,
11434  VmaPool hParentPool,
11435  uint32_t memoryTypeIndex,
11436  VkDeviceSize preferredBlockSize,
11437  size_t minBlockCount,
11438  size_t maxBlockCount,
11439  VkDeviceSize bufferImageGranularity,
11440  uint32_t frameInUseCount,
11441  bool isCustomPool,
11442  bool explicitBlockSize,
11443  uint32_t algorithm) :
11444  m_hAllocator(hAllocator),
11445  m_hParentPool(hParentPool),
11446  m_MemoryTypeIndex(memoryTypeIndex),
11447  m_PreferredBlockSize(preferredBlockSize),
11448  m_MinBlockCount(minBlockCount),
11449  m_MaxBlockCount(maxBlockCount),
11450  m_BufferImageGranularity(bufferImageGranularity),
11451  m_FrameInUseCount(frameInUseCount),
11452  m_IsCustomPool(isCustomPool),
11453  m_ExplicitBlockSize(explicitBlockSize),
11454  m_Algorithm(algorithm),
11455  m_HasEmptyBlock(false),
11456  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11457  m_NextBlockId(0)
11458 {
11459 }
11460 
11461 VmaBlockVector::~VmaBlockVector()
11462 {
11463  for(size_t i = m_Blocks.size(); i--; )
11464  {
11465  m_Blocks[i]->Destroy(m_hAllocator);
11466  vma_delete(m_hAllocator, m_Blocks[i]);
11467  }
11468 }
11469 
11470 VkResult VmaBlockVector::CreateMinBlocks()
11471 {
11472  for(size_t i = 0; i < m_MinBlockCount; ++i)
11473  {
11474  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11475  if(res != VK_SUCCESS)
11476  {
11477  return res;
11478  }
11479  }
11480  return VK_SUCCESS;
11481 }
11482 
11483 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11484 {
11485  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11486 
11487  const size_t blockCount = m_Blocks.size();
11488 
11489  pStats->size = 0;
11490  pStats->unusedSize = 0;
11491  pStats->allocationCount = 0;
11492  pStats->unusedRangeCount = 0;
11493  pStats->unusedRangeSizeMax = 0;
11494  pStats->blockCount = blockCount;
11495 
11496  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11497  {
11498  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11499  VMA_ASSERT(pBlock);
11500  VMA_HEAVY_ASSERT(pBlock->Validate());
11501  pBlock->m_pMetadata->AddPoolStats(*pStats);
11502  }
11503 }
11504 
11505 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11506 {
11507  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11508  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11509  (VMA_DEBUG_MARGIN > 0) &&
11510  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11511  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11512 }
11513 
11514 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11515 
11516 VkResult VmaBlockVector::Allocate(
11517  uint32_t currentFrameIndex,
11518  VkDeviceSize size,
11519  VkDeviceSize alignment,
11520  const VmaAllocationCreateInfo& createInfo,
11521  VmaSuballocationType suballocType,
11522  size_t allocationCount,
11523  VmaAllocation* pAllocations)
11524 {
11525  size_t allocIndex;
11526  VkResult res = VK_SUCCESS;
11527 
11528  if(IsCorruptionDetectionEnabled())
11529  {
11530  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11531  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11532  }
11533 
11534  {
11535  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11536  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11537  {
11538  res = AllocatePage(
11539  currentFrameIndex,
11540  size,
11541  alignment,
11542  createInfo,
11543  suballocType,
11544  pAllocations + allocIndex);
11545  if(res != VK_SUCCESS)
11546  {
11547  break;
11548  }
11549  }
11550  }
11551 
11552  if(res != VK_SUCCESS)
11553  {
11554  // Free all already created allocations.
11555  while(allocIndex--)
11556  {
11557  Free(pAllocations[allocIndex]);
11558  }
11559  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11560  }
11561 
11562  return res;
11563 }
11564 
11565 VkResult VmaBlockVector::AllocatePage(
11566  uint32_t currentFrameIndex,
11567  VkDeviceSize size,
11568  VkDeviceSize alignment,
11569  const VmaAllocationCreateInfo& createInfo,
11570  VmaSuballocationType suballocType,
11571  VmaAllocation* pAllocation)
11572 {
11573  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11574  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11575  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11576  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11577  const bool canCreateNewBlock =
11578  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11579  (m_Blocks.size() < m_MaxBlockCount);
11580  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11581 
11582  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11583  // Which in turn is available only when maxBlockCount = 1.
11584  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11585  {
11586  canMakeOtherLost = false;
11587  }
11588 
11589  // Upper address can only be used with linear allocator and within single memory block.
11590  if(isUpperAddress &&
11591  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11592  {
11593  return VK_ERROR_FEATURE_NOT_PRESENT;
11594  }
11595 
11596  // Validate strategy.
11597  switch(strategy)
11598  {
11599  case 0:
11601  break;
11605  break;
11606  default:
11607  return VK_ERROR_FEATURE_NOT_PRESENT;
11608  }
11609 
11610  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11611  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11612  {
11613  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11614  }
11615 
11616  /*
11617  Under certain condition, this whole section can be skipped for optimization, so
11618  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11619  e.g. for custom pools with linear algorithm.
11620  */
11621  if(!canMakeOtherLost || canCreateNewBlock)
11622  {
11623  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11624  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11626 
11627  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11628  {
11629  // Use only last block.
11630  if(!m_Blocks.empty())
11631  {
11632  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11633  VMA_ASSERT(pCurrBlock);
11634  VkResult res = AllocateFromBlock(
11635  pCurrBlock,
11636  currentFrameIndex,
11637  size,
11638  alignment,
11639  allocFlagsCopy,
11640  createInfo.pUserData,
11641  suballocType,
11642  strategy,
11643  pAllocation);
11644  if(res == VK_SUCCESS)
11645  {
11646  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11647  return VK_SUCCESS;
11648  }
11649  }
11650  }
11651  else
11652  {
11654  {
11655  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11656  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11657  {
11658  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11659  VMA_ASSERT(pCurrBlock);
11660  VkResult res = AllocateFromBlock(
11661  pCurrBlock,
11662  currentFrameIndex,
11663  size,
11664  alignment,
11665  allocFlagsCopy,
11666  createInfo.pUserData,
11667  suballocType,
11668  strategy,
11669  pAllocation);
11670  if(res == VK_SUCCESS)
11671  {
11672  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11673  return VK_SUCCESS;
11674  }
11675  }
11676  }
11677  else // WORST_FIT, FIRST_FIT
11678  {
11679  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11680  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11681  {
11682  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11683  VMA_ASSERT(pCurrBlock);
11684  VkResult res = AllocateFromBlock(
11685  pCurrBlock,
11686  currentFrameIndex,
11687  size,
11688  alignment,
11689  allocFlagsCopy,
11690  createInfo.pUserData,
11691  suballocType,
11692  strategy,
11693  pAllocation);
11694  if(res == VK_SUCCESS)
11695  {
11696  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11697  return VK_SUCCESS;
11698  }
11699  }
11700  }
11701  }
11702 
11703  // 2. Try to create new block.
11704  if(canCreateNewBlock)
11705  {
11706  // Calculate optimal size for new block.
11707  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11708  uint32_t newBlockSizeShift = 0;
11709  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11710 
11711  if(!m_ExplicitBlockSize)
11712  {
11713  // Allocate 1/8, 1/4, 1/2 as first blocks.
11714  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11715  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11716  {
11717  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11718  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11719  {
11720  newBlockSize = smallerNewBlockSize;
11721  ++newBlockSizeShift;
11722  }
11723  else
11724  {
11725  break;
11726  }
11727  }
11728  }
11729 
11730  size_t newBlockIndex = 0;
11731  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11732  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11733  if(!m_ExplicitBlockSize)
11734  {
11735  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11736  {
11737  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11738  if(smallerNewBlockSize >= size)
11739  {
11740  newBlockSize = smallerNewBlockSize;
11741  ++newBlockSizeShift;
11742  res = CreateBlock(newBlockSize, &newBlockIndex);
11743  }
11744  else
11745  {
11746  break;
11747  }
11748  }
11749  }
11750 
11751  if(res == VK_SUCCESS)
11752  {
11753  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11754  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11755 
11756  res = AllocateFromBlock(
11757  pBlock,
11758  currentFrameIndex,
11759  size,
11760  alignment,
11761  allocFlagsCopy,
11762  createInfo.pUserData,
11763  suballocType,
11764  strategy,
11765  pAllocation);
11766  if(res == VK_SUCCESS)
11767  {
11768  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11769  return VK_SUCCESS;
11770  }
11771  else
11772  {
11773  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11774  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11775  }
11776  }
11777  }
11778  }
11779 
11780  // 3. Try to allocate from existing blocks with making other allocations lost.
11781  if(canMakeOtherLost)
11782  {
11783  uint32_t tryIndex = 0;
11784  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11785  {
11786  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11787  VmaAllocationRequest bestRequest = {};
11788  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11789 
11790  // 1. Search existing allocations.
11792  {
11793  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11794  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11795  {
11796  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11797  VMA_ASSERT(pCurrBlock);
11798  VmaAllocationRequest currRequest = {};
11799  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11800  currentFrameIndex,
11801  m_FrameInUseCount,
11802  m_BufferImageGranularity,
11803  size,
11804  alignment,
11805  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11806  suballocType,
11807  canMakeOtherLost,
11808  strategy,
11809  &currRequest))
11810  {
11811  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11812  if(pBestRequestBlock == VMA_NULL ||
11813  currRequestCost < bestRequestCost)
11814  {
11815  pBestRequestBlock = pCurrBlock;
11816  bestRequest = currRequest;
11817  bestRequestCost = currRequestCost;
11818 
11819  if(bestRequestCost == 0)
11820  {
11821  break;
11822  }
11823  }
11824  }
11825  }
11826  }
11827  else // WORST_FIT, FIRST_FIT
11828  {
11829  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11830  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11831  {
11832  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11833  VMA_ASSERT(pCurrBlock);
11834  VmaAllocationRequest currRequest = {};
11835  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11836  currentFrameIndex,
11837  m_FrameInUseCount,
11838  m_BufferImageGranularity,
11839  size,
11840  alignment,
11841  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11842  suballocType,
11843  canMakeOtherLost,
11844  strategy,
11845  &currRequest))
11846  {
11847  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11848  if(pBestRequestBlock == VMA_NULL ||
11849  currRequestCost < bestRequestCost ||
11851  {
11852  pBestRequestBlock = pCurrBlock;
11853  bestRequest = currRequest;
11854  bestRequestCost = currRequestCost;
11855 
11856  if(bestRequestCost == 0 ||
11858  {
11859  break;
11860  }
11861  }
11862  }
11863  }
11864  }
11865 
11866  if(pBestRequestBlock != VMA_NULL)
11867  {
11868  if(mapped)
11869  {
11870  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11871  if(res != VK_SUCCESS)
11872  {
11873  return res;
11874  }
11875  }
11876 
11877  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11878  currentFrameIndex,
11879  m_FrameInUseCount,
11880  &bestRequest))
11881  {
11882  // We no longer have an empty Allocation.
11883  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11884  {
11885  m_HasEmptyBlock = false;
11886  }
11887  // Allocate from this pBlock.
11888  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11889  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11890  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11891  (*pAllocation)->InitBlockAllocation(
11892  pBestRequestBlock,
11893  bestRequest.offset,
11894  alignment,
11895  size,
11896  suballocType,
11897  mapped,
11898  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11899  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11900  VMA_DEBUG_LOG(" Returned from existing block");
11901  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11902  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11903  {
11904  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11905  }
11906  if(IsCorruptionDetectionEnabled())
11907  {
11908  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11909  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11910  }
11911  return VK_SUCCESS;
11912  }
11913  // else: Some allocations must have been touched while we are here. Next try.
11914  }
11915  else
11916  {
11917  // Could not find place in any of the blocks - break outer loop.
11918  break;
11919  }
11920  }
11921  /* Maximum number of tries exceeded - a very unlike event when many other
11922  threads are simultaneously touching allocations making it impossible to make
11923  lost at the same time as we try to allocate. */
11924  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11925  {
11926  return VK_ERROR_TOO_MANY_OBJECTS;
11927  }
11928  }
11929 
11930  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11931 }
11932 
11933 void VmaBlockVector::Free(
11934  VmaAllocation hAllocation)
11935 {
11936  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11937 
11938  // Scope for lock.
11939  {
11940  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11941 
11942  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11943 
11944  if(IsCorruptionDetectionEnabled())
11945  {
11946  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11947  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11948  }
11949 
11950  if(hAllocation->IsPersistentMap())
11951  {
11952  pBlock->Unmap(m_hAllocator, 1);
11953  }
11954 
11955  pBlock->m_pMetadata->Free(hAllocation);
11956  VMA_HEAVY_ASSERT(pBlock->Validate());
11957 
11958  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11959 
11960  // pBlock became empty after this deallocation.
11961  if(pBlock->m_pMetadata->IsEmpty())
11962  {
11963  // Already has empty Allocation. We don't want to have two, so delete this one.
11964  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11965  {
11966  pBlockToDelete = pBlock;
11967  Remove(pBlock);
11968  }
11969  // We now have first empty block.
11970  else
11971  {
11972  m_HasEmptyBlock = true;
11973  }
11974  }
11975  // pBlock didn't become empty, but we have another empty block - find and free that one.
11976  // (This is optional, heuristics.)
11977  else if(m_HasEmptyBlock)
11978  {
11979  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11980  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11981  {
11982  pBlockToDelete = pLastBlock;
11983  m_Blocks.pop_back();
11984  m_HasEmptyBlock = false;
11985  }
11986  }
11987 
11988  IncrementallySortBlocks();
11989  }
11990 
11991  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11992  // lock, for performance reason.
11993  if(pBlockToDelete != VMA_NULL)
11994  {
11995  VMA_DEBUG_LOG(" Deleted empty allocation");
11996  pBlockToDelete->Destroy(m_hAllocator);
11997  vma_delete(m_hAllocator, pBlockToDelete);
11998  }
11999 }
12000 
12001 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12002 {
12003  VkDeviceSize result = 0;
12004  for(size_t i = m_Blocks.size(); i--; )
12005  {
12006  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12007  if(result >= m_PreferredBlockSize)
12008  {
12009  break;
12010  }
12011  }
12012  return result;
12013 }
12014 
12015 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12016 {
12017  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12018  {
12019  if(m_Blocks[blockIndex] == pBlock)
12020  {
12021  VmaVectorRemove(m_Blocks, blockIndex);
12022  return;
12023  }
12024  }
12025  VMA_ASSERT(0);
12026 }
12027 
12028 void VmaBlockVector::IncrementallySortBlocks()
12029 {
12030  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12031  {
12032  // Bubble sort only until first swap.
12033  for(size_t i = 1; i < m_Blocks.size(); ++i)
12034  {
12035  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12036  {
12037  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12038  return;
12039  }
12040  }
12041  }
12042 }
12043 
12044 VkResult VmaBlockVector::AllocateFromBlock(
12045  VmaDeviceMemoryBlock* pBlock,
12046  uint32_t currentFrameIndex,
12047  VkDeviceSize size,
12048  VkDeviceSize alignment,
12049  VmaAllocationCreateFlags allocFlags,
12050  void* pUserData,
12051  VmaSuballocationType suballocType,
12052  uint32_t strategy,
12053  VmaAllocation* pAllocation)
12054 {
12055  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12056  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12057  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12058  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12059 
12060  VmaAllocationRequest currRequest = {};
12061  if(pBlock->m_pMetadata->CreateAllocationRequest(
12062  currentFrameIndex,
12063  m_FrameInUseCount,
12064  m_BufferImageGranularity,
12065  size,
12066  alignment,
12067  isUpperAddress,
12068  suballocType,
12069  false, // canMakeOtherLost
12070  strategy,
12071  &currRequest))
12072  {
12073  // Allocate from pCurrBlock.
12074  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12075 
12076  if(mapped)
12077  {
12078  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12079  if(res != VK_SUCCESS)
12080  {
12081  return res;
12082  }
12083  }
12084 
12085  // We no longer have an empty Allocation.
12086  if(pBlock->m_pMetadata->IsEmpty())
12087  {
12088  m_HasEmptyBlock = false;
12089  }
12090 
12091  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12092  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12093  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12094  (*pAllocation)->InitBlockAllocation(
12095  pBlock,
12096  currRequest.offset,
12097  alignment,
12098  size,
12099  suballocType,
12100  mapped,
12101  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12102  VMA_HEAVY_ASSERT(pBlock->Validate());
12103  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12104  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12105  {
12106  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12107  }
12108  if(IsCorruptionDetectionEnabled())
12109  {
12110  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12111  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12112  }
12113  return VK_SUCCESS;
12114  }
12115  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12116 }
12117 
12118 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12119 {
12120  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12121  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12122  allocInfo.allocationSize = blockSize;
12123  VkDeviceMemory mem = VK_NULL_HANDLE;
12124  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12125  if(res < 0)
12126  {
12127  return res;
12128  }
12129 
12130  // New VkDeviceMemory successfully created.
12131 
12132  // Create new Allocation for it.
12133  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12134  pBlock->Init(
12135  m_hAllocator,
12136  m_hParentPool,
12137  m_MemoryTypeIndex,
12138  mem,
12139  allocInfo.allocationSize,
12140  m_NextBlockId++,
12141  m_Algorithm);
12142 
12143  m_Blocks.push_back(pBlock);
12144  if(pNewBlockIndex != VMA_NULL)
12145  {
12146  *pNewBlockIndex = m_Blocks.size() - 1;
12147  }
12148 
12149  return VK_SUCCESS;
12150 }
12151 
12152 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12153  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12154  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12155 {
12156  const size_t blockCount = m_Blocks.size();
12157  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12158 
12159  enum BLOCK_FLAG
12160  {
12161  BLOCK_FLAG_USED = 0x00000001,
12162  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12163  };
12164 
12165  struct BlockInfo
12166  {
12167  uint32_t flags;
12168  void* pMappedData;
12169  };
12170  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12171  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12172  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12173 
12174  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12175  const size_t moveCount = moves.size();
12176  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12177  {
12178  const VmaDefragmentationMove& move = moves[moveIndex];
12179  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12180  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12181  }
12182 
12183  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12184 
12185  // Go over all blocks. Get mapped pointer or map if necessary.
12186  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12187  {
12188  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12189  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12190  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12191  {
12192  currBlockInfo.pMappedData = pBlock->GetMappedData();
12193  // It is not originally mapped - map it.
12194  if(currBlockInfo.pMappedData == VMA_NULL)
12195  {
12196  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12197  if(pDefragCtx->res == VK_SUCCESS)
12198  {
12199  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12200  }
12201  }
12202  }
12203  }
12204 
12205  // Go over all moves. Do actual data transfer.
12206  if(pDefragCtx->res == VK_SUCCESS)
12207  {
12208  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12209  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12210 
12211  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12212  {
12213  const VmaDefragmentationMove& move = moves[moveIndex];
12214 
12215  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12216  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12217 
12218  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12219 
12220  // Invalidate source.
12221  if(isNonCoherent)
12222  {
12223  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12224  memRange.memory = pSrcBlock->GetDeviceMemory();
12225  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12226  memRange.size = VMA_MIN(
12227  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12228  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12229  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12230  }
12231 
12232  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12233  memmove(
12234  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12235  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12236  static_cast<size_t>(move.size));
12237 
12238  if(IsCorruptionDetectionEnabled())
12239  {
12240  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12241  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12242  }
12243 
12244  // Flush destination.
12245  if(isNonCoherent)
12246  {
12247  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12248  memRange.memory = pDstBlock->GetDeviceMemory();
12249  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12250  memRange.size = VMA_MIN(
12251  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12252  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12253  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12254  }
12255  }
12256  }
12257 
12258  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12259  // Regardless of pCtx->res == VK_SUCCESS.
12260  for(size_t blockIndex = blockCount; blockIndex--; )
12261  {
12262  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12263  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12264  {
12265  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12266  pBlock->Unmap(m_hAllocator, 1);
12267  }
12268  }
12269 }
12270 
12271 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12272  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12273  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12274  VkCommandBuffer commandBuffer)
12275 {
12276  const size_t blockCount = m_Blocks.size();
12277 
12278  pDefragCtx->blockContexts.resize(blockCount);
12279  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12280 
12281  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12282  const size_t moveCount = moves.size();
12283  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12284  {
12285  const VmaDefragmentationMove& move = moves[moveIndex];
12286  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12287  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12288  }
12289 
12290  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12291 
12292  // Go over all blocks. Create and bind buffer for whole block if necessary.
12293  {
12294  VkBufferCreateInfo bufCreateInfo;
12295  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12296 
12297  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12298  {
12299  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12300  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12301  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12302  {
12303  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12304  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12305  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12306  if(pDefragCtx->res == VK_SUCCESS)
12307  {
12308  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12309  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12310  }
12311  }
12312  }
12313  }
12314 
12315  // Go over all moves. Post data transfer commands to command buffer.
12316  if(pDefragCtx->res == VK_SUCCESS)
12317  {
12318  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12319  {
12320  const VmaDefragmentationMove& move = moves[moveIndex];
12321 
12322  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12323  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12324 
12325  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12326 
12327  VkBufferCopy region = {
12328  move.srcOffset,
12329  move.dstOffset,
12330  move.size };
12331  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12332  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12333  }
12334  }
12335 
12336  // Save buffers to defrag context for later destruction.
12337  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12338  {
12339  pDefragCtx->res = VK_NOT_READY;
12340  }
12341 }
12342 
12343 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12344 {
12345  m_HasEmptyBlock = false;
12346  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12347  {
12348  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12349  if(pBlock->m_pMetadata->IsEmpty())
12350  {
12351  if(m_Blocks.size() > m_MinBlockCount)
12352  {
12353  if(pDefragmentationStats != VMA_NULL)
12354  {
12355  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12356  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12357  }
12358 
12359  VmaVectorRemove(m_Blocks, blockIndex);
12360  pBlock->Destroy(m_hAllocator);
12361  vma_delete(m_hAllocator, pBlock);
12362  }
12363  else
12364  {
12365  m_HasEmptyBlock = true;
12366  }
12367  }
12368  }
12369 }
12370 
12371 #if VMA_STATS_STRING_ENABLED
12372 
12373 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12374 {
12375  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12376 
12377  json.BeginObject();
12378 
12379  if(m_IsCustomPool)
12380  {
12381  json.WriteString("MemoryTypeIndex");
12382  json.WriteNumber(m_MemoryTypeIndex);
12383 
12384  json.WriteString("BlockSize");
12385  json.WriteNumber(m_PreferredBlockSize);
12386 
12387  json.WriteString("BlockCount");
12388  json.BeginObject(true);
12389  if(m_MinBlockCount > 0)
12390  {
12391  json.WriteString("Min");
12392  json.WriteNumber((uint64_t)m_MinBlockCount);
12393  }
12394  if(m_MaxBlockCount < SIZE_MAX)
12395  {
12396  json.WriteString("Max");
12397  json.WriteNumber((uint64_t)m_MaxBlockCount);
12398  }
12399  json.WriteString("Cur");
12400  json.WriteNumber((uint64_t)m_Blocks.size());
12401  json.EndObject();
12402 
12403  if(m_FrameInUseCount > 0)
12404  {
12405  json.WriteString("FrameInUseCount");
12406  json.WriteNumber(m_FrameInUseCount);
12407  }
12408 
12409  if(m_Algorithm != 0)
12410  {
12411  json.WriteString("Algorithm");
12412  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12413  }
12414  }
12415  else
12416  {
12417  json.WriteString("PreferredBlockSize");
12418  json.WriteNumber(m_PreferredBlockSize);
12419  }
12420 
12421  json.WriteString("Blocks");
12422  json.BeginObject();
12423  for(size_t i = 0; i < m_Blocks.size(); ++i)
12424  {
12425  json.BeginString();
12426  json.ContinueString(m_Blocks[i]->GetId());
12427  json.EndString();
12428 
12429  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12430  }
12431  json.EndObject();
12432 
12433  json.EndObject();
12434 }
12435 
12436 #endif // #if VMA_STATS_STRING_ENABLED
12437 
12438 void VmaBlockVector::Defragment(
12439  class VmaBlockVectorDefragmentationContext* pCtx,
12440  VmaDefragmentationStats* pStats,
12441  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12442  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12443  VkCommandBuffer commandBuffer)
12444 {
12445  pCtx->res = VK_SUCCESS;
12446 
12447  const VkMemoryPropertyFlags memPropFlags =
12448  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12449  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12450  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12451 
12452  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12453  isHostVisible;
12454  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12455  !IsCorruptionDetectionEnabled() &&
12456  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12457 
12458  // There are options to defragment this memory type.
12459  if(canDefragmentOnCpu || canDefragmentOnGpu)
12460  {
12461  bool defragmentOnGpu;
12462  // There is only one option to defragment this memory type.
12463  if(canDefragmentOnGpu != canDefragmentOnCpu)
12464  {
12465  defragmentOnGpu = canDefragmentOnGpu;
12466  }
12467  // Both options are available: Heuristics to choose the best one.
12468  else
12469  {
12470  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12471  m_hAllocator->IsIntegratedGpu();
12472  }
12473 
12474  bool overlappingMoveSupported = !defragmentOnGpu;
12475 
12476  if(m_hAllocator->m_UseMutex)
12477  {
12478  m_Mutex.LockWrite();
12479  pCtx->mutexLocked = true;
12480  }
12481 
12482  pCtx->Begin(overlappingMoveSupported);
12483 
12484  // Defragment.
12485 
12486  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12487  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12488  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12489  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12490  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12491 
12492  // Accumulate statistics.
12493  if(pStats != VMA_NULL)
12494  {
12495  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12496  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12497  pStats->bytesMoved += bytesMoved;
12498  pStats->allocationsMoved += allocationsMoved;
12499  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12500  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12501  if(defragmentOnGpu)
12502  {
12503  maxGpuBytesToMove -= bytesMoved;
12504  maxGpuAllocationsToMove -= allocationsMoved;
12505  }
12506  else
12507  {
12508  maxCpuBytesToMove -= bytesMoved;
12509  maxCpuAllocationsToMove -= allocationsMoved;
12510  }
12511  }
12512 
12513  if(pCtx->res >= VK_SUCCESS)
12514  {
12515  if(defragmentOnGpu)
12516  {
12517  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12518  }
12519  else
12520  {
12521  ApplyDefragmentationMovesCpu(pCtx, moves);
12522  }
12523  }
12524  }
12525 }
12526 
12527 void VmaBlockVector::DefragmentationEnd(
12528  class VmaBlockVectorDefragmentationContext* pCtx,
12529  VmaDefragmentationStats* pStats)
12530 {
12531  // Destroy buffers.
12532  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12533  {
12534  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12535  if(blockCtx.hBuffer)
12536  {
12537  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12538  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12539  }
12540  }
12541 
12542  if(pCtx->res >= VK_SUCCESS)
12543  {
12544  FreeEmptyBlocks(pStats);
12545  }
12546 
12547  if(pCtx->mutexLocked)
12548  {
12549  VMA_ASSERT(m_hAllocator->m_UseMutex);
12550  m_Mutex.UnlockWrite();
12551  }
12552 }
12553 
12554 size_t VmaBlockVector::CalcAllocationCount() const
12555 {
12556  size_t result = 0;
12557  for(size_t i = 0; i < m_Blocks.size(); ++i)
12558  {
12559  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12560  }
12561  return result;
12562 }
12563 
12564 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12565 {
12566  if(m_BufferImageGranularity == 1)
12567  {
12568  return false;
12569  }
12570  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12571  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12572  {
12573  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12574  VMA_ASSERT(m_Algorithm == 0);
12575  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12576  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12577  {
12578  return true;
12579  }
12580  }
12581  return false;
12582 }
12583 
12584 void VmaBlockVector::MakePoolAllocationsLost(
12585  uint32_t currentFrameIndex,
12586  size_t* pLostAllocationCount)
12587 {
12588  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12589  size_t lostAllocationCount = 0;
12590  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12591  {
12592  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12593  VMA_ASSERT(pBlock);
12594  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12595  }
12596  if(pLostAllocationCount != VMA_NULL)
12597  {
12598  *pLostAllocationCount = lostAllocationCount;
12599  }
12600 }
12601 
12602 VkResult VmaBlockVector::CheckCorruption()
12603 {
12604  if(!IsCorruptionDetectionEnabled())
12605  {
12606  return VK_ERROR_FEATURE_NOT_PRESENT;
12607  }
12608 
12609  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12610  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12611  {
12612  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12613  VMA_ASSERT(pBlock);
12614  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12615  if(res != VK_SUCCESS)
12616  {
12617  return res;
12618  }
12619  }
12620  return VK_SUCCESS;
12621 }
12622 
12623 void VmaBlockVector::AddStats(VmaStats* pStats)
12624 {
12625  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12626  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12627 
12628  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12629 
12630  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12631  {
12632  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12633  VMA_ASSERT(pBlock);
12634  VMA_HEAVY_ASSERT(pBlock->Validate());
12635  VmaStatInfo allocationStatInfo;
12636  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12637  VmaAddStatInfo(pStats->total, allocationStatInfo);
12638  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12639  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12640  }
12641 }
12642 
12644 // VmaDefragmentationAlgorithm_Generic members definition
12645 
12646 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12647  VmaAllocator hAllocator,
12648  VmaBlockVector* pBlockVector,
12649  uint32_t currentFrameIndex,
12650  bool overlappingMoveSupported) :
12651  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12652  m_AllocationCount(0),
12653  m_AllAllocations(false),
12654  m_BytesMoved(0),
12655  m_AllocationsMoved(0),
12656  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12657 {
12658  // Create block info for each block.
12659  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12660  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12661  {
12662  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12663  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12664  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12665  m_Blocks.push_back(pBlockInfo);
12666  }
12667 
12668  // Sort them by m_pBlock pointer value.
12669  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12670 }
12671 
12672 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12673 {
12674  for(size_t i = m_Blocks.size(); i--; )
12675  {
12676  vma_delete(m_hAllocator, m_Blocks[i]);
12677  }
12678 }
12679 
12680 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12681 {
12682  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12683  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12684  {
12685  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12686  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12687  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12688  {
12689  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12690  (*it)->m_Allocations.push_back(allocInfo);
12691  }
12692  else
12693  {
12694  VMA_ASSERT(0);
12695  }
12696 
12697  ++m_AllocationCount;
12698  }
12699 }
12700 
12701 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12702  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12703  VkDeviceSize maxBytesToMove,
12704  uint32_t maxAllocationsToMove)
12705 {
12706  if(m_Blocks.empty())
12707  {
12708  return VK_SUCCESS;
12709  }
12710 
12711  // This is a choice based on research.
12712  // Option 1:
12713  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12714  // Option 2:
12715  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12716  // Option 3:
12717  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12718 
12719  size_t srcBlockMinIndex = 0;
12720  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12721  /*
12722  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12723  {
12724  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12725  if(blocksWithNonMovableCount > 0)
12726  {
12727  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12728  }
12729  }
12730  */
12731 
12732  size_t srcBlockIndex = m_Blocks.size() - 1;
12733  size_t srcAllocIndex = SIZE_MAX;
12734  for(;;)
12735  {
12736  // 1. Find next allocation to move.
12737  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12738  // 1.2. Then start from last to first m_Allocations.
12739  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12740  {
12741  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12742  {
12743  // Finished: no more allocations to process.
12744  if(srcBlockIndex == srcBlockMinIndex)
12745  {
12746  return VK_SUCCESS;
12747  }
12748  else
12749  {
12750  --srcBlockIndex;
12751  srcAllocIndex = SIZE_MAX;
12752  }
12753  }
12754  else
12755  {
12756  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12757  }
12758  }
12759 
12760  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12761  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12762 
12763  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12764  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12765  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12766  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12767 
12768  // 2. Try to find new place for this allocation in preceding or current block.
12769  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12770  {
12771  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12772  VmaAllocationRequest dstAllocRequest;
12773  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12774  m_CurrentFrameIndex,
12775  m_pBlockVector->GetFrameInUseCount(),
12776  m_pBlockVector->GetBufferImageGranularity(),
12777  size,
12778  alignment,
12779  false, // upperAddress
12780  suballocType,
12781  false, // canMakeOtherLost
12782  strategy,
12783  &dstAllocRequest) &&
12784  MoveMakesSense(
12785  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12786  {
12787  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12788 
12789  // Reached limit on number of allocations or bytes to move.
12790  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12791  (m_BytesMoved + size > maxBytesToMove))
12792  {
12793  return VK_SUCCESS;
12794  }
12795 
12796  VmaDefragmentationMove move;
12797  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12798  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12799  move.srcOffset = srcOffset;
12800  move.dstOffset = dstAllocRequest.offset;
12801  move.size = size;
12802  moves.push_back(move);
12803 
12804  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12805  dstAllocRequest,
12806  suballocType,
12807  size,
12808  allocInfo.m_hAllocation);
12809  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12810 
12811  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12812 
12813  if(allocInfo.m_pChanged != VMA_NULL)
12814  {
12815  *allocInfo.m_pChanged = VK_TRUE;
12816  }
12817 
12818  ++m_AllocationsMoved;
12819  m_BytesMoved += size;
12820 
12821  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12822 
12823  break;
12824  }
12825  }
12826 
12827  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12828 
12829  if(srcAllocIndex > 0)
12830  {
12831  --srcAllocIndex;
12832  }
12833  else
12834  {
12835  if(srcBlockIndex > 0)
12836  {
12837  --srcBlockIndex;
12838  srcAllocIndex = SIZE_MAX;
12839  }
12840  else
12841  {
12842  return VK_SUCCESS;
12843  }
12844  }
12845  }
12846 }
12847 
12848 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12849 {
12850  size_t result = 0;
12851  for(size_t i = 0; i < m_Blocks.size(); ++i)
12852  {
12853  if(m_Blocks[i]->m_HasNonMovableAllocations)
12854  {
12855  ++result;
12856  }
12857  }
12858  return result;
12859 }
12860 
12861 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12862  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12863  VkDeviceSize maxBytesToMove,
12864  uint32_t maxAllocationsToMove)
12865 {
12866  if(!m_AllAllocations && m_AllocationCount == 0)
12867  {
12868  return VK_SUCCESS;
12869  }
12870 
12871  const size_t blockCount = m_Blocks.size();
12872  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12873  {
12874  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12875 
12876  if(m_AllAllocations)
12877  {
12878  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12879  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12880  it != pMetadata->m_Suballocations.end();
12881  ++it)
12882  {
12883  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12884  {
12885  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12886  pBlockInfo->m_Allocations.push_back(allocInfo);
12887  }
12888  }
12889  }
12890 
12891  pBlockInfo->CalcHasNonMovableAllocations();
12892 
12893  // This is a choice based on research.
12894  // Option 1:
12895  pBlockInfo->SortAllocationsByOffsetDescending();
12896  // Option 2:
12897  //pBlockInfo->SortAllocationsBySizeDescending();
12898  }
12899 
12900  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12901  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12902 
12903  // This is a choice based on research.
12904  const uint32_t roundCount = 2;
12905 
12906  // Execute defragmentation rounds (the main part).
12907  VkResult result = VK_SUCCESS;
12908  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12909  {
12910  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12911  }
12912 
12913  return result;
12914 }
12915 
12916 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12917  size_t dstBlockIndex, VkDeviceSize dstOffset,
12918  size_t srcBlockIndex, VkDeviceSize srcOffset)
12919 {
12920  if(dstBlockIndex < srcBlockIndex)
12921  {
12922  return true;
12923  }
12924  if(dstBlockIndex > srcBlockIndex)
12925  {
12926  return false;
12927  }
12928  if(dstOffset < srcOffset)
12929  {
12930  return true;
12931  }
12932  return false;
12933 }
12934 
12936 // VmaDefragmentationAlgorithm_Fast
12937 
12938 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12939  VmaAllocator hAllocator,
12940  VmaBlockVector* pBlockVector,
12941  uint32_t currentFrameIndex,
12942  bool overlappingMoveSupported) :
12943  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12944  m_OverlappingMoveSupported(overlappingMoveSupported),
12945  m_AllocationCount(0),
12946  m_AllAllocations(false),
12947  m_BytesMoved(0),
12948  m_AllocationsMoved(0),
12949  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12950 {
12951  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12952 
12953 }
12954 
12955 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12956 {
12957 }
12958 
12959 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12960  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12961  VkDeviceSize maxBytesToMove,
12962  uint32_t maxAllocationsToMove)
12963 {
12964  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12965 
12966  const size_t blockCount = m_pBlockVector->GetBlockCount();
12967  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12968  {
12969  return VK_SUCCESS;
12970  }
12971 
12972  PreprocessMetadata();
12973 
12974  // Sort blocks in order from most destination.
12975 
12976  m_BlockInfos.resize(blockCount);
12977  for(size_t i = 0; i < blockCount; ++i)
12978  {
12979  m_BlockInfos[i].origBlockIndex = i;
12980  }
12981 
12982  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12983  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12984  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12985  });
12986 
12987  // THE MAIN ALGORITHM
12988 
12989  FreeSpaceDatabase freeSpaceDb;
12990 
12991  size_t dstBlockInfoIndex = 0;
12992  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12993  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12994  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12995  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12996  VkDeviceSize dstOffset = 0;
12997 
12998  bool end = false;
12999  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13000  {
13001  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13002  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13003  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13004  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13005  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13006  {
13007  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13008  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13009  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13010  if(m_AllocationsMoved == maxAllocationsToMove ||
13011  m_BytesMoved + srcAllocSize > maxBytesToMove)
13012  {
13013  end = true;
13014  break;
13015  }
13016  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13017 
13018  // Try to place it in one of free spaces from the database.
13019  size_t freeSpaceInfoIndex;
13020  VkDeviceSize dstAllocOffset;
13021  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13022  freeSpaceInfoIndex, dstAllocOffset))
13023  {
13024  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13025  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13026  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13027 
13028  // Same block
13029  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13030  {
13031  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13032 
13033  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13034 
13035  VmaSuballocation suballoc = *srcSuballocIt;
13036  suballoc.offset = dstAllocOffset;
13037  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13038  m_BytesMoved += srcAllocSize;
13039  ++m_AllocationsMoved;
13040 
13041  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13042  ++nextSuballocIt;
13043  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13044  srcSuballocIt = nextSuballocIt;
13045 
13046  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13047 
13048  VmaDefragmentationMove move = {
13049  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13050  srcAllocOffset, dstAllocOffset,
13051  srcAllocSize };
13052  moves.push_back(move);
13053  }
13054  // Different block
13055  else
13056  {
13057  // MOVE OPTION 2: Move the allocation to a different block.
13058 
13059  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13060 
13061  VmaSuballocation suballoc = *srcSuballocIt;
13062  suballoc.offset = dstAllocOffset;
13063  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13064  m_BytesMoved += srcAllocSize;
13065  ++m_AllocationsMoved;
13066 
13067  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13068  ++nextSuballocIt;
13069  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13070  srcSuballocIt = nextSuballocIt;
13071 
13072  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13073 
13074  VmaDefragmentationMove move = {
13075  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13076  srcAllocOffset, dstAllocOffset,
13077  srcAllocSize };
13078  moves.push_back(move);
13079  }
13080  }
13081  else
13082  {
13083  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13084 
13085  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13086  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13087  dstAllocOffset + srcAllocSize > dstBlockSize)
13088  {
13089  // But before that, register remaining free space at the end of dst block.
13090  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13091 
13092  ++dstBlockInfoIndex;
13093  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13094  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13095  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13096  dstBlockSize = pDstMetadata->GetSize();
13097  dstOffset = 0;
13098  dstAllocOffset = 0;
13099  }
13100 
13101  // Same block
13102  if(dstBlockInfoIndex == srcBlockInfoIndex)
13103  {
13104  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13105 
13106  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13107 
13108  bool skipOver = overlap;
13109  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13110  {
13111  // If destination and source place overlap, skip if it would move it
13112  // by only < 1/64 of its size.
13113  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13114  }
13115 
13116  if(skipOver)
13117  {
13118  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13119 
13120  dstOffset = srcAllocOffset + srcAllocSize;
13121  ++srcSuballocIt;
13122  }
13123  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13124  else
13125  {
13126  srcSuballocIt->offset = dstAllocOffset;
13127  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13128  dstOffset = dstAllocOffset + srcAllocSize;
13129  m_BytesMoved += srcAllocSize;
13130  ++m_AllocationsMoved;
13131  ++srcSuballocIt;
13132  VmaDefragmentationMove move = {
13133  srcOrigBlockIndex, dstOrigBlockIndex,
13134  srcAllocOffset, dstAllocOffset,
13135  srcAllocSize };
13136  moves.push_back(move);
13137  }
13138  }
13139  // Different block
13140  else
13141  {
13142  // MOVE OPTION 2: Move the allocation to a different block.
13143 
13144  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13145  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13146 
13147  VmaSuballocation suballoc = *srcSuballocIt;
13148  suballoc.offset = dstAllocOffset;
13149  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13150  dstOffset = dstAllocOffset + srcAllocSize;
13151  m_BytesMoved += srcAllocSize;
13152  ++m_AllocationsMoved;
13153 
13154  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13155  ++nextSuballocIt;
13156  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13157  srcSuballocIt = nextSuballocIt;
13158 
13159  pDstMetadata->m_Suballocations.push_back(suballoc);
13160 
13161  VmaDefragmentationMove move = {
13162  srcOrigBlockIndex, dstOrigBlockIndex,
13163  srcAllocOffset, dstAllocOffset,
13164  srcAllocSize };
13165  moves.push_back(move);
13166  }
13167  }
13168  }
13169  }
13170 
13171  m_BlockInfos.clear();
13172 
13173  PostprocessMetadata();
13174 
13175  return VK_SUCCESS;
13176 }
13177 
13178 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13179 {
13180  const size_t blockCount = m_pBlockVector->GetBlockCount();
13181  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13182  {
13183  VmaBlockMetadata_Generic* const pMetadata =
13184  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13185  pMetadata->m_FreeCount = 0;
13186  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13187  pMetadata->m_FreeSuballocationsBySize.clear();
13188  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13189  it != pMetadata->m_Suballocations.end(); )
13190  {
13191  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13192  {
13193  VmaSuballocationList::iterator nextIt = it;
13194  ++nextIt;
13195  pMetadata->m_Suballocations.erase(it);
13196  it = nextIt;
13197  }
13198  else
13199  {
13200  ++it;
13201  }
13202  }
13203  }
13204 }
13205 
13206 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13207 {
13208  const size_t blockCount = m_pBlockVector->GetBlockCount();
13209  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13210  {
13211  VmaBlockMetadata_Generic* const pMetadata =
13212  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13213  const VkDeviceSize blockSize = pMetadata->GetSize();
13214 
13215  // No allocations in this block - entire area is free.
13216  if(pMetadata->m_Suballocations.empty())
13217  {
13218  pMetadata->m_FreeCount = 1;
13219  //pMetadata->m_SumFreeSize is already set to blockSize.
13220  VmaSuballocation suballoc = {
13221  0, // offset
13222  blockSize, // size
13223  VMA_NULL, // hAllocation
13224  VMA_SUBALLOCATION_TYPE_FREE };
13225  pMetadata->m_Suballocations.push_back(suballoc);
13226  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13227  }
13228  // There are some allocations in this block.
13229  else
13230  {
13231  VkDeviceSize offset = 0;
13232  VmaSuballocationList::iterator it;
13233  for(it = pMetadata->m_Suballocations.begin();
13234  it != pMetadata->m_Suballocations.end();
13235  ++it)
13236  {
13237  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13238  VMA_ASSERT(it->offset >= offset);
13239 
13240  // Need to insert preceding free space.
13241  if(it->offset > offset)
13242  {
13243  ++pMetadata->m_FreeCount;
13244  const VkDeviceSize freeSize = it->offset - offset;
13245  VmaSuballocation suballoc = {
13246  offset, // offset
13247  freeSize, // size
13248  VMA_NULL, // hAllocation
13249  VMA_SUBALLOCATION_TYPE_FREE };
13250  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13251  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13252  {
13253  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13254  }
13255  }
13256 
13257  pMetadata->m_SumFreeSize -= it->size;
13258  offset = it->offset + it->size;
13259  }
13260 
13261  // Need to insert trailing free space.
13262  if(offset < blockSize)
13263  {
13264  ++pMetadata->m_FreeCount;
13265  const VkDeviceSize freeSize = blockSize - offset;
13266  VmaSuballocation suballoc = {
13267  offset, // offset
13268  freeSize, // size
13269  VMA_NULL, // hAllocation
13270  VMA_SUBALLOCATION_TYPE_FREE };
13271  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13272  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13273  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13274  {
13275  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13276  }
13277  }
13278 
13279  VMA_SORT(
13280  pMetadata->m_FreeSuballocationsBySize.begin(),
13281  pMetadata->m_FreeSuballocationsBySize.end(),
13282  VmaSuballocationItemSizeLess());
13283  }
13284 
13285  VMA_HEAVY_ASSERT(pMetadata->Validate());
13286  }
13287 }
13288 
13289 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13290 {
13291  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13292  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13293  while(it != pMetadata->m_Suballocations.end())
13294  {
13295  if(it->offset < suballoc.offset)
13296  {
13297  ++it;
13298  }
13299  }
13300  pMetadata->m_Suballocations.insert(it, suballoc);
13301 }
13302 
13304 // VmaBlockVectorDefragmentationContext
13305 
13306 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13307  VmaAllocator hAllocator,
13308  VmaPool hCustomPool,
13309  VmaBlockVector* pBlockVector,
13310  uint32_t currFrameIndex,
13311  uint32_t algorithmFlags) :
13312  res(VK_SUCCESS),
13313  mutexLocked(false),
13314  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13315  m_hAllocator(hAllocator),
13316  m_hCustomPool(hCustomPool),
13317  m_pBlockVector(pBlockVector),
13318  m_CurrFrameIndex(currFrameIndex),
13319  m_AlgorithmFlags(algorithmFlags),
13320  m_pAlgorithm(VMA_NULL),
13321  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13322  m_AllAllocations(false)
13323 {
13324 }
13325 
13326 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13327 {
13328  vma_delete(m_hAllocator, m_pAlgorithm);
13329 }
13330 
13331 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13332 {
13333  AllocInfo info = { hAlloc, pChanged };
13334  m_Allocations.push_back(info);
13335 }
13336 
13337 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13338 {
13339  const bool allAllocations = m_AllAllocations ||
13340  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13341 
13342  /********************************
13343  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13344  ********************************/
13345 
13346  /*
13347  Fast algorithm is supported only when certain criteria are met:
13348  - VMA_DEBUG_MARGIN is 0.
13349  - All allocations in this block vector are moveable.
13350  - There is no possibility of image/buffer granularity conflict.
13351  */
13352  if(VMA_DEBUG_MARGIN == 0 &&
13353  allAllocations &&
13354  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13355  {
13356  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13357  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13358  }
13359  else
13360  {
13361  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13362  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13363  }
13364 
13365  if(allAllocations)
13366  {
13367  m_pAlgorithm->AddAll();
13368  }
13369  else
13370  {
13371  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13372  {
13373  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13374  }
13375  }
13376 }
13377 
13379 // VmaDefragmentationContext
13380 
13381 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13382  VmaAllocator hAllocator,
13383  uint32_t currFrameIndex,
13384  uint32_t flags,
13385  VmaDefragmentationStats* pStats) :
13386  m_hAllocator(hAllocator),
13387  m_CurrFrameIndex(currFrameIndex),
13388  m_Flags(flags),
13389  m_pStats(pStats),
13390  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13391 {
13392  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13393 }
13394 
13395 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13396 {
13397  for(size_t i = m_CustomPoolContexts.size(); i--; )
13398  {
13399  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13400  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13401  vma_delete(m_hAllocator, pBlockVectorCtx);
13402  }
13403  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13404  {
13405  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13406  if(pBlockVectorCtx)
13407  {
13408  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13409  vma_delete(m_hAllocator, pBlockVectorCtx);
13410  }
13411  }
13412 }
13413 
13414 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13415 {
13416  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13417  {
13418  VmaPool pool = pPools[poolIndex];
13419  VMA_ASSERT(pool);
13420  // Pools with algorithm other than default are not defragmented.
13421  if(pool->m_BlockVector.GetAlgorithm() == 0)
13422  {
13423  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13424 
13425  for(size_t i = m_CustomPoolContexts.size(); i--; )
13426  {
13427  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13428  {
13429  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13430  break;
13431  }
13432  }
13433 
13434  if(!pBlockVectorDefragCtx)
13435  {
13436  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13437  m_hAllocator,
13438  pool,
13439  &pool->m_BlockVector,
13440  m_CurrFrameIndex,
13441  m_Flags);
13442  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13443  }
13444 
13445  pBlockVectorDefragCtx->AddAll();
13446  }
13447  }
13448 }
13449 
13450 void VmaDefragmentationContext_T::AddAllocations(
13451  uint32_t allocationCount,
13452  VmaAllocation* pAllocations,
13453  VkBool32* pAllocationsChanged)
13454 {
13455  // Dispatch pAllocations among defragmentators. Create them when necessary.
13456  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13457  {
13458  const VmaAllocation hAlloc = pAllocations[allocIndex];
13459  VMA_ASSERT(hAlloc);
13460  // DedicatedAlloc cannot be defragmented.
13461  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13462  // Lost allocation cannot be defragmented.
13463  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13464  {
13465  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13466 
13467  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13468  // This allocation belongs to custom pool.
13469  if(hAllocPool != VK_NULL_HANDLE)
13470  {
13471  // Pools with algorithm other than default are not defragmented.
13472  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13473  {
13474  for(size_t i = m_CustomPoolContexts.size(); i--; )
13475  {
13476  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13477  {
13478  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13479  break;
13480  }
13481  }
13482  if(!pBlockVectorDefragCtx)
13483  {
13484  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13485  m_hAllocator,
13486  hAllocPool,
13487  &hAllocPool->m_BlockVector,
13488  m_CurrFrameIndex,
13489  m_Flags);
13490  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13491  }
13492  }
13493  }
13494  // This allocation belongs to default pool.
13495  else
13496  {
13497  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13498  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13499  if(!pBlockVectorDefragCtx)
13500  {
13501  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13502  m_hAllocator,
13503  VMA_NULL, // hCustomPool
13504  m_hAllocator->m_pBlockVectors[memTypeIndex],
13505  m_CurrFrameIndex,
13506  m_Flags);
13507  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13508  }
13509  }
13510 
13511  if(pBlockVectorDefragCtx)
13512  {
13513  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13514  &pAllocationsChanged[allocIndex] : VMA_NULL;
13515  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13516  }
13517  }
13518  }
13519 }
13520 
13521 VkResult VmaDefragmentationContext_T::Defragment(
13522  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13523  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13524  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13525 {
13526  if(pStats)
13527  {
13528  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13529  }
13530 
13531  if(commandBuffer == VK_NULL_HANDLE)
13532  {
13533  maxGpuBytesToMove = 0;
13534  maxGpuAllocationsToMove = 0;
13535  }
13536 
13537  VkResult res = VK_SUCCESS;
13538 
13539  // Process default pools.
13540  for(uint32_t memTypeIndex = 0;
13541  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13542  ++memTypeIndex)
13543  {
13544  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13545  if(pBlockVectorCtx)
13546  {
13547  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13548  pBlockVectorCtx->GetBlockVector()->Defragment(
13549  pBlockVectorCtx,
13550  pStats,
13551  maxCpuBytesToMove, maxCpuAllocationsToMove,
13552  maxGpuBytesToMove, maxGpuAllocationsToMove,
13553  commandBuffer);
13554  if(pBlockVectorCtx->res != VK_SUCCESS)
13555  {
13556  res = pBlockVectorCtx->res;
13557  }
13558  }
13559  }
13560 
13561  // Process custom pools.
13562  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13563  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13564  ++customCtxIndex)
13565  {
13566  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13567  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13568  pBlockVectorCtx->GetBlockVector()->Defragment(
13569  pBlockVectorCtx,
13570  pStats,
13571  maxCpuBytesToMove, maxCpuAllocationsToMove,
13572  maxGpuBytesToMove, maxGpuAllocationsToMove,
13573  commandBuffer);
13574  if(pBlockVectorCtx->res != VK_SUCCESS)
13575  {
13576  res = pBlockVectorCtx->res;
13577  }
13578  }
13579 
13580  return res;
13581 }
13582 
13584 // VmaRecorder
13585 
13586 #if VMA_RECORDING_ENABLED
13587 
13588 VmaRecorder::VmaRecorder() :
13589  m_UseMutex(true),
13590  m_Flags(0),
13591  m_File(VMA_NULL),
13592  m_Freq(INT64_MAX),
13593  m_StartCounter(INT64_MAX)
13594 {
13595 }
13596 
13597 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13598 {
13599  m_UseMutex = useMutex;
13600  m_Flags = settings.flags;
13601 
13602  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13603  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13604 
13605  // Open file for writing.
13606  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13607  if(err != 0)
13608  {
13609  return VK_ERROR_INITIALIZATION_FAILED;
13610  }
13611 
13612  // Write header.
13613  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13614  fprintf(m_File, "%s\n", "1,5");
13615 
13616  return VK_SUCCESS;
13617 }
13618 
13619 VmaRecorder::~VmaRecorder()
13620 {
13621  if(m_File != VMA_NULL)
13622  {
13623  fclose(m_File);
13624  }
13625 }
13626 
13627 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13628 {
13629  CallParams callParams;
13630  GetBasicParams(callParams);
13631 
13632  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13633  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13634  Flush();
13635 }
13636 
13637 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13638 {
13639  CallParams callParams;
13640  GetBasicParams(callParams);
13641 
13642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13643  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13644  Flush();
13645 }
13646 
13647 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13648 {
13649  CallParams callParams;
13650  GetBasicParams(callParams);
13651 
13652  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13653  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13654  createInfo.memoryTypeIndex,
13655  createInfo.flags,
13656  createInfo.blockSize,
13657  (uint64_t)createInfo.minBlockCount,
13658  (uint64_t)createInfo.maxBlockCount,
13659  createInfo.frameInUseCount,
13660  pool);
13661  Flush();
13662 }
13663 
13664 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13665 {
13666  CallParams callParams;
13667  GetBasicParams(callParams);
13668 
13669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13670  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13671  pool);
13672  Flush();
13673 }
13674 
13675 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13676  const VkMemoryRequirements& vkMemReq,
13677  const VmaAllocationCreateInfo& createInfo,
13678  VmaAllocation allocation)
13679 {
13680  CallParams callParams;
13681  GetBasicParams(callParams);
13682 
13683  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13684  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13685  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13686  vkMemReq.size,
13687  vkMemReq.alignment,
13688  vkMemReq.memoryTypeBits,
13689  createInfo.flags,
13690  createInfo.usage,
13691  createInfo.requiredFlags,
13692  createInfo.preferredFlags,
13693  createInfo.memoryTypeBits,
13694  createInfo.pool,
13695  allocation,
13696  userDataStr.GetString());
13697  Flush();
13698 }
13699 
13700 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13701  const VkMemoryRequirements& vkMemReq,
13702  const VmaAllocationCreateInfo& createInfo,
13703  uint64_t allocationCount,
13704  const VmaAllocation* pAllocations)
13705 {
13706  CallParams callParams;
13707  GetBasicParams(callParams);
13708 
13709  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13710  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13711  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13712  vkMemReq.size,
13713  vkMemReq.alignment,
13714  vkMemReq.memoryTypeBits,
13715  createInfo.flags,
13716  createInfo.usage,
13717  createInfo.requiredFlags,
13718  createInfo.preferredFlags,
13719  createInfo.memoryTypeBits,
13720  createInfo.pool);
13721  PrintPointerList(allocationCount, pAllocations);
13722  fprintf(m_File, ",%s\n", userDataStr.GetString());
13723  Flush();
13724 }
13725 
13726 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13727  const VkMemoryRequirements& vkMemReq,
13728  bool requiresDedicatedAllocation,
13729  bool prefersDedicatedAllocation,
13730  const VmaAllocationCreateInfo& createInfo,
13731  VmaAllocation allocation)
13732 {
13733  CallParams callParams;
13734  GetBasicParams(callParams);
13735 
13736  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13737  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13738  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13739  vkMemReq.size,
13740  vkMemReq.alignment,
13741  vkMemReq.memoryTypeBits,
13742  requiresDedicatedAllocation ? 1 : 0,
13743  prefersDedicatedAllocation ? 1 : 0,
13744  createInfo.flags,
13745  createInfo.usage,
13746  createInfo.requiredFlags,
13747  createInfo.preferredFlags,
13748  createInfo.memoryTypeBits,
13749  createInfo.pool,
13750  allocation,
13751  userDataStr.GetString());
13752  Flush();
13753 }
13754 
13755 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13756  const VkMemoryRequirements& vkMemReq,
13757  bool requiresDedicatedAllocation,
13758  bool prefersDedicatedAllocation,
13759  const VmaAllocationCreateInfo& createInfo,
13760  VmaAllocation allocation)
13761 {
13762  CallParams callParams;
13763  GetBasicParams(callParams);
13764 
13765  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13766  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13767  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13768  vkMemReq.size,
13769  vkMemReq.alignment,
13770  vkMemReq.memoryTypeBits,
13771  requiresDedicatedAllocation ? 1 : 0,
13772  prefersDedicatedAllocation ? 1 : 0,
13773  createInfo.flags,
13774  createInfo.usage,
13775  createInfo.requiredFlags,
13776  createInfo.preferredFlags,
13777  createInfo.memoryTypeBits,
13778  createInfo.pool,
13779  allocation,
13780  userDataStr.GetString());
13781  Flush();
13782 }
13783 
13784 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13785  VmaAllocation allocation)
13786 {
13787  CallParams callParams;
13788  GetBasicParams(callParams);
13789 
13790  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13791  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13792  allocation);
13793  Flush();
13794 }
13795 
13796 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13797  uint64_t allocationCount,
13798  const VmaAllocation* pAllocations)
13799 {
13800  CallParams callParams;
13801  GetBasicParams(callParams);
13802 
13803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13804  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13805  PrintPointerList(allocationCount, pAllocations);
13806  fprintf(m_File, "\n");
13807  Flush();
13808 }
13809 
13810 void VmaRecorder::RecordResizeAllocation(
13811  uint32_t frameIndex,
13812  VmaAllocation allocation,
13813  VkDeviceSize newSize)
13814 {
13815  CallParams callParams;
13816  GetBasicParams(callParams);
13817 
13818  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13819  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13820  allocation, newSize);
13821  Flush();
13822 }
13823 
13824 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13825  VmaAllocation allocation,
13826  const void* pUserData)
13827 {
13828  CallParams callParams;
13829  GetBasicParams(callParams);
13830 
13831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13832  UserDataString userDataStr(
13833  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13834  pUserData);
13835  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13836  allocation,
13837  userDataStr.GetString());
13838  Flush();
13839 }
13840 
13841 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13842  VmaAllocation allocation)
13843 {
13844  CallParams callParams;
13845  GetBasicParams(callParams);
13846 
13847  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13848  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13849  allocation);
13850  Flush();
13851 }
13852 
13853 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13854  VmaAllocation allocation)
13855 {
13856  CallParams callParams;
13857  GetBasicParams(callParams);
13858 
13859  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13860  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13861  allocation);
13862  Flush();
13863 }
13864 
13865 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13866  VmaAllocation allocation)
13867 {
13868  CallParams callParams;
13869  GetBasicParams(callParams);
13870 
13871  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13872  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13873  allocation);
13874  Flush();
13875 }
13876 
13877 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13878  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13879 {
13880  CallParams callParams;
13881  GetBasicParams(callParams);
13882 
13883  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13884  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13885  allocation,
13886  offset,
13887  size);
13888  Flush();
13889 }
13890 
13891 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13892  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13893 {
13894  CallParams callParams;
13895  GetBasicParams(callParams);
13896 
13897  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13898  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13899  allocation,
13900  offset,
13901  size);
13902  Flush();
13903 }
13904 
13905 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13906  const VkBufferCreateInfo& bufCreateInfo,
13907  const VmaAllocationCreateInfo& allocCreateInfo,
13908  VmaAllocation allocation)
13909 {
13910  CallParams callParams;
13911  GetBasicParams(callParams);
13912 
13913  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13914  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13915  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13916  bufCreateInfo.flags,
13917  bufCreateInfo.size,
13918  bufCreateInfo.usage,
13919  bufCreateInfo.sharingMode,
13920  allocCreateInfo.flags,
13921  allocCreateInfo.usage,
13922  allocCreateInfo.requiredFlags,
13923  allocCreateInfo.preferredFlags,
13924  allocCreateInfo.memoryTypeBits,
13925  allocCreateInfo.pool,
13926  allocation,
13927  userDataStr.GetString());
13928  Flush();
13929 }
13930 
13931 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13932  const VkImageCreateInfo& imageCreateInfo,
13933  const VmaAllocationCreateInfo& allocCreateInfo,
13934  VmaAllocation allocation)
13935 {
13936  CallParams callParams;
13937  GetBasicParams(callParams);
13938 
13939  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13940  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13941  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13942  imageCreateInfo.flags,
13943  imageCreateInfo.imageType,
13944  imageCreateInfo.format,
13945  imageCreateInfo.extent.width,
13946  imageCreateInfo.extent.height,
13947  imageCreateInfo.extent.depth,
13948  imageCreateInfo.mipLevels,
13949  imageCreateInfo.arrayLayers,
13950  imageCreateInfo.samples,
13951  imageCreateInfo.tiling,
13952  imageCreateInfo.usage,
13953  imageCreateInfo.sharingMode,
13954  imageCreateInfo.initialLayout,
13955  allocCreateInfo.flags,
13956  allocCreateInfo.usage,
13957  allocCreateInfo.requiredFlags,
13958  allocCreateInfo.preferredFlags,
13959  allocCreateInfo.memoryTypeBits,
13960  allocCreateInfo.pool,
13961  allocation,
13962  userDataStr.GetString());
13963  Flush();
13964 }
13965 
13966 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13967  VmaAllocation allocation)
13968 {
13969  CallParams callParams;
13970  GetBasicParams(callParams);
13971 
13972  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13973  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13974  allocation);
13975  Flush();
13976 }
13977 
13978 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13979  VmaAllocation allocation)
13980 {
13981  CallParams callParams;
13982  GetBasicParams(callParams);
13983 
13984  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13985  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13986  allocation);
13987  Flush();
13988 }
13989 
13990 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13991  VmaAllocation allocation)
13992 {
13993  CallParams callParams;
13994  GetBasicParams(callParams);
13995 
13996  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13997  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13998  allocation);
13999  Flush();
14000 }
14001 
14002 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14003  VmaAllocation allocation)
14004 {
14005  CallParams callParams;
14006  GetBasicParams(callParams);
14007 
14008  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14009  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14010  allocation);
14011  Flush();
14012 }
14013 
14014 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14015  VmaPool pool)
14016 {
14017  CallParams callParams;
14018  GetBasicParams(callParams);
14019 
14020  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14021  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14022  pool);
14023  Flush();
14024 }
14025 
14026 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14027  const VmaDefragmentationInfo2& info,
14029 {
14030  CallParams callParams;
14031  GetBasicParams(callParams);
14032 
14033  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14034  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14035  info.flags);
14036  PrintPointerList(info.allocationCount, info.pAllocations);
14037  fprintf(m_File, ",");
14038  PrintPointerList(info.poolCount, info.pPools);
14039  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14040  info.maxCpuBytesToMove,
14042  info.maxGpuBytesToMove,
14044  info.commandBuffer,
14045  ctx);
14046  Flush();
14047 }
14048 
14049 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14051 {
14052  CallParams callParams;
14053  GetBasicParams(callParams);
14054 
14055  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14056  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14057  ctx);
14058  Flush();
14059 }
14060 
14061 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14062 {
14063  if(pUserData != VMA_NULL)
14064  {
14065  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14066  {
14067  m_Str = (const char*)pUserData;
14068  }
14069  else
14070  {
14071  sprintf_s(m_PtrStr, "%p", pUserData);
14072  m_Str = m_PtrStr;
14073  }
14074  }
14075  else
14076  {
14077  m_Str = "";
14078  }
14079 }
14080 
14081 void VmaRecorder::WriteConfiguration(
14082  const VkPhysicalDeviceProperties& devProps,
14083  const VkPhysicalDeviceMemoryProperties& memProps,
14084  bool dedicatedAllocationExtensionEnabled)
14085 {
14086  fprintf(m_File, "Config,Begin\n");
14087 
14088  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14089  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14090  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14091  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14092  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14093  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14094 
14095  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14096  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14097  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14098 
14099  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14100  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14101  {
14102  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14103  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14104  }
14105  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14106  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14107  {
14108  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14109  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14110  }
14111 
14112  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14113 
14114  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14115  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14116  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14117  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14118  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14119  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14120  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14121  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14122  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14123 
14124  fprintf(m_File, "Config,End\n");
14125 }
14126 
14127 void VmaRecorder::GetBasicParams(CallParams& outParams)
14128 {
14129  outParams.threadId = GetCurrentThreadId();
14130 
14131  LARGE_INTEGER counter;
14132  QueryPerformanceCounter(&counter);
14133  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14134 }
14135 
14136 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14137 {
14138  if(count)
14139  {
14140  fprintf(m_File, "%p", pItems[0]);
14141  for(uint64_t i = 1; i < count; ++i)
14142  {
14143  fprintf(m_File, " %p", pItems[i]);
14144  }
14145  }
14146 }
14147 
14148 void VmaRecorder::Flush()
14149 {
14150  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14151  {
14152  fflush(m_File);
14153  }
14154 }
14155 
14156 #endif // #if VMA_RECORDING_ENABLED
14157 
14159 // VmaAllocationObjectAllocator
14160 
14161 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14162  m_Allocator(pAllocationCallbacks, 1024)
14163 {
14164 }
14165 
14166 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14167 {
14168  VmaMutexLock mutexLock(m_Mutex);
14169  return m_Allocator.Alloc();
14170 }
14171 
14172 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14173 {
14174  VmaMutexLock mutexLock(m_Mutex);
14175  m_Allocator.Free(hAlloc);
14176 }
14177 
14179 // VmaAllocator_T
14180 
14181 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14182  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14183  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14184  m_hDevice(pCreateInfo->device),
14185  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14186  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14187  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14188  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14189  m_PreferredLargeHeapBlockSize(0),
14190  m_PhysicalDevice(pCreateInfo->physicalDevice),
14191  m_CurrentFrameIndex(0),
14192  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14193  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14194  m_NextPoolId(0)
14196  ,m_pRecorder(VMA_NULL)
14197 #endif
14198 {
14199  if(VMA_DEBUG_DETECT_CORRUPTION)
14200  {
14201  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14202  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14203  }
14204 
14205  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14206 
14207 #if !(VMA_DEDICATED_ALLOCATION)
14209  {
14210  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14211  }
14212 #endif
14213 
14214  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14215  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14216  memset(&m_MemProps, 0, sizeof(m_MemProps));
14217 
14218  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14219  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14220 
14221  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14222  {
14223  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14224  }
14225 
14226  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14227  {
14228  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14229  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14230  }
14231 
14232  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14233 
14234  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14235  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14236 
14237  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14238  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14239  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14240  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14241 
14242  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14243  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14244 
14245  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14246  {
14247  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14248  {
14249  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14250  if(limit != VK_WHOLE_SIZE)
14251  {
14252  m_HeapSizeLimit[heapIndex] = limit;
14253  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14254  {
14255  m_MemProps.memoryHeaps[heapIndex].size = limit;
14256  }
14257  }
14258  }
14259  }
14260 
14261  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14262  {
14263  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14264 
14265  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14266  this,
14267  VK_NULL_HANDLE, // hParentPool
14268  memTypeIndex,
14269  preferredBlockSize,
14270  0,
14271  SIZE_MAX,
14272  GetBufferImageGranularity(),
14273  pCreateInfo->frameInUseCount,
14274  false, // isCustomPool
14275  false, // explicitBlockSize
14276  false); // linearAlgorithm
14277  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14278  // becase minBlockCount is 0.
14279  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14280 
14281  }
14282 }
14283 
14284 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14285 {
14286  VkResult res = VK_SUCCESS;
14287 
14288  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14289  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14290  {
14291 #if VMA_RECORDING_ENABLED
14292  m_pRecorder = vma_new(this, VmaRecorder)();
14293  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14294  if(res != VK_SUCCESS)
14295  {
14296  return res;
14297  }
14298  m_pRecorder->WriteConfiguration(
14299  m_PhysicalDeviceProperties,
14300  m_MemProps,
14301  m_UseKhrDedicatedAllocation);
14302  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14303 #else
14304  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14305  return VK_ERROR_FEATURE_NOT_PRESENT;
14306 #endif
14307  }
14308 
14309  return res;
14310 }
14311 
14312 VmaAllocator_T::~VmaAllocator_T()
14313 {
14314 #if VMA_RECORDING_ENABLED
14315  if(m_pRecorder != VMA_NULL)
14316  {
14317  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14318  vma_delete(this, m_pRecorder);
14319  }
14320 #endif
14321 
14322  VMA_ASSERT(m_Pools.empty());
14323 
14324  for(size_t i = GetMemoryTypeCount(); i--; )
14325  {
14326  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14327  {
14328  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14329  }
14330 
14331  vma_delete(this, m_pDedicatedAllocations[i]);
14332  vma_delete(this, m_pBlockVectors[i]);
14333  }
14334 }
14335 
14336 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14337 {
14338 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14339  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14340  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14341  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14342  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14343  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14344  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14345  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14346  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14347  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14348  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14349  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14350  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14351  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14352  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14353  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14354  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14355  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14356 #if VMA_DEDICATED_ALLOCATION
14357  if(m_UseKhrDedicatedAllocation)
14358  {
14359  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14360  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14361  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14362  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14363  }
14364 #endif // #if VMA_DEDICATED_ALLOCATION
14365 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14366 
14367 #define VMA_COPY_IF_NOT_NULL(funcName) \
14368  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14369 
14370  if(pVulkanFunctions != VMA_NULL)
14371  {
14372  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14373  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14374  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14375  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14376  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14377  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14378  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14379  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14380  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14381  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14382  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14383  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14384  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14385  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14386  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14387  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14388  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14389 #if VMA_DEDICATED_ALLOCATION
14390  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14391  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14392 #endif
14393  }
14394 
14395 #undef VMA_COPY_IF_NOT_NULL
14396 
14397  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14398  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14399  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14400  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14401  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14402  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14403  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14404  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14405  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14406  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14407  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14408  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14409  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14410  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14411  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14412  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14413  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14414  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14415  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14416 #if VMA_DEDICATED_ALLOCATION
14417  if(m_UseKhrDedicatedAllocation)
14418  {
14419  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14420  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14421  }
14422 #endif
14423 }
14424 
14425 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14426 {
14427  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14428  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14429  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14430  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14431 }
14432 
14433 VkResult VmaAllocator_T::AllocateMemoryOfType(
14434  VkDeviceSize size,
14435  VkDeviceSize alignment,
14436  bool dedicatedAllocation,
14437  VkBuffer dedicatedBuffer,
14438  VkImage dedicatedImage,
14439  const VmaAllocationCreateInfo& createInfo,
14440  uint32_t memTypeIndex,
14441  VmaSuballocationType suballocType,
14442  size_t allocationCount,
14443  VmaAllocation* pAllocations)
14444 {
14445  VMA_ASSERT(pAllocations != VMA_NULL);
14446  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14447 
14448  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14449 
14450  // If memory type is not HOST_VISIBLE, disable MAPPED.
14451  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14452  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14453  {
14454  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14455  }
14456 
14457  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14458  VMA_ASSERT(blockVector);
14459 
14460  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14461  bool preferDedicatedMemory =
14462  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14463  dedicatedAllocation ||
14464  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14465  size > preferredBlockSize / 2;
14466 
14467  if(preferDedicatedMemory &&
14468  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14469  finalCreateInfo.pool == VK_NULL_HANDLE)
14470  {
14472  }
14473 
14474  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14475  {
14476  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14477  {
14478  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14479  }
14480  else
14481  {
14482  return AllocateDedicatedMemory(
14483  size,
14484  suballocType,
14485  memTypeIndex,
14486  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14487  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14488  finalCreateInfo.pUserData,
14489  dedicatedBuffer,
14490  dedicatedImage,
14491  allocationCount,
14492  pAllocations);
14493  }
14494  }
14495  else
14496  {
14497  VkResult res = blockVector->Allocate(
14498  m_CurrentFrameIndex.load(),
14499  size,
14500  alignment,
14501  finalCreateInfo,
14502  suballocType,
14503  allocationCount,
14504  pAllocations);
14505  if(res == VK_SUCCESS)
14506  {
14507  return res;
14508  }
14509 
14510  // 5. Try dedicated memory.
14511  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14512  {
14513  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14514  }
14515  else
14516  {
14517  res = AllocateDedicatedMemory(
14518  size,
14519  suballocType,
14520  memTypeIndex,
14521  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14522  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14523  finalCreateInfo.pUserData,
14524  dedicatedBuffer,
14525  dedicatedImage,
14526  allocationCount,
14527  pAllocations);
14528  if(res == VK_SUCCESS)
14529  {
14530  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14531  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14532  return VK_SUCCESS;
14533  }
14534  else
14535  {
14536  // Everything failed: Return error code.
14537  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14538  return res;
14539  }
14540  }
14541  }
14542 }
14543 
14544 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14545  VkDeviceSize size,
14546  VmaSuballocationType suballocType,
14547  uint32_t memTypeIndex,
14548  bool map,
14549  bool isUserDataString,
14550  void* pUserData,
14551  VkBuffer dedicatedBuffer,
14552  VkImage dedicatedImage,
14553  size_t allocationCount,
14554  VmaAllocation* pAllocations)
14555 {
14556  VMA_ASSERT(allocationCount > 0 && pAllocations);
14557 
14558  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14559  allocInfo.memoryTypeIndex = memTypeIndex;
14560  allocInfo.allocationSize = size;
14561 
14562 #if VMA_DEDICATED_ALLOCATION
14563  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14564  if(m_UseKhrDedicatedAllocation)
14565  {
14566  if(dedicatedBuffer != VK_NULL_HANDLE)
14567  {
14568  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14569  dedicatedAllocInfo.buffer = dedicatedBuffer;
14570  allocInfo.pNext = &dedicatedAllocInfo;
14571  }
14572  else if(dedicatedImage != VK_NULL_HANDLE)
14573  {
14574  dedicatedAllocInfo.image = dedicatedImage;
14575  allocInfo.pNext = &dedicatedAllocInfo;
14576  }
14577  }
14578 #endif // #if VMA_DEDICATED_ALLOCATION
14579 
14580  size_t allocIndex;
14581  VkResult res = VK_SUCCESS;
14582  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14583  {
14584  res = AllocateDedicatedMemoryPage(
14585  size,
14586  suballocType,
14587  memTypeIndex,
14588  allocInfo,
14589  map,
14590  isUserDataString,
14591  pUserData,
14592  pAllocations + allocIndex);
14593  if(res != VK_SUCCESS)
14594  {
14595  break;
14596  }
14597  }
14598 
14599  if(res == VK_SUCCESS)
14600  {
14601  // Register them in m_pDedicatedAllocations.
14602  {
14603  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14604  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14605  VMA_ASSERT(pDedicatedAllocations);
14606  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14607  {
14608  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14609  }
14610  }
14611 
14612  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14613  }
14614  else
14615  {
14616  // Free all already created allocations.
14617  while(allocIndex--)
14618  {
14619  VmaAllocation currAlloc = pAllocations[allocIndex];
14620  VkDeviceMemory hMemory = currAlloc->GetMemory();
14621 
14622  /*
14623  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14624  before vkFreeMemory.
14625 
14626  if(currAlloc->GetMappedData() != VMA_NULL)
14627  {
14628  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14629  }
14630  */
14631 
14632  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14633 
14634  currAlloc->SetUserData(this, VMA_NULL);
14635  currAlloc->Dtor();
14636  m_AllocationObjectAllocator.Free(currAlloc);
14637  }
14638 
14639  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14640  }
14641 
14642  return res;
14643 }
14644 
14645 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14646  VkDeviceSize size,
14647  VmaSuballocationType suballocType,
14648  uint32_t memTypeIndex,
14649  const VkMemoryAllocateInfo& allocInfo,
14650  bool map,
14651  bool isUserDataString,
14652  void* pUserData,
14653  VmaAllocation* pAllocation)
14654 {
14655  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14656  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14657  if(res < 0)
14658  {
14659  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14660  return res;
14661  }
14662 
14663  void* pMappedData = VMA_NULL;
14664  if(map)
14665  {
14666  res = (*m_VulkanFunctions.vkMapMemory)(
14667  m_hDevice,
14668  hMemory,
14669  0,
14670  VK_WHOLE_SIZE,
14671  0,
14672  &pMappedData);
14673  if(res < 0)
14674  {
14675  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14676  FreeVulkanMemory(memTypeIndex, size, hMemory);
14677  return res;
14678  }
14679  }
14680 
14681  *pAllocation = m_AllocationObjectAllocator.Allocate();
14682  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14683  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14684  (*pAllocation)->SetUserData(this, pUserData);
14685  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14686  {
14687  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14688  }
14689 
14690  return VK_SUCCESS;
14691 }
14692 
14693 void VmaAllocator_T::GetBufferMemoryRequirements(
14694  VkBuffer hBuffer,
14695  VkMemoryRequirements& memReq,
14696  bool& requiresDedicatedAllocation,
14697  bool& prefersDedicatedAllocation) const
14698 {
14699 #if VMA_DEDICATED_ALLOCATION
14700  if(m_UseKhrDedicatedAllocation)
14701  {
14702  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14703  memReqInfo.buffer = hBuffer;
14704 
14705  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14706 
14707  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14708  memReq2.pNext = &memDedicatedReq;
14709 
14710  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14711 
14712  memReq = memReq2.memoryRequirements;
14713  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14714  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14715  }
14716  else
14717 #endif // #if VMA_DEDICATED_ALLOCATION
14718  {
14719  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14720  requiresDedicatedAllocation = false;
14721  prefersDedicatedAllocation = false;
14722  }
14723 }
14724 
14725 void VmaAllocator_T::GetImageMemoryRequirements(
14726  VkImage hImage,
14727  VkMemoryRequirements& memReq,
14728  bool& requiresDedicatedAllocation,
14729  bool& prefersDedicatedAllocation) const
14730 {
14731 #if VMA_DEDICATED_ALLOCATION
14732  if(m_UseKhrDedicatedAllocation)
14733  {
14734  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14735  memReqInfo.image = hImage;
14736 
14737  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14738 
14739  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14740  memReq2.pNext = &memDedicatedReq;
14741 
14742  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14743 
14744  memReq = memReq2.memoryRequirements;
14745  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14746  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14747  }
14748  else
14749 #endif // #if VMA_DEDICATED_ALLOCATION
14750  {
14751  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14752  requiresDedicatedAllocation = false;
14753  prefersDedicatedAllocation = false;
14754  }
14755 }
14756 
14757 VkResult VmaAllocator_T::AllocateMemory(
14758  const VkMemoryRequirements& vkMemReq,
14759  bool requiresDedicatedAllocation,
14760  bool prefersDedicatedAllocation,
14761  VkBuffer dedicatedBuffer,
14762  VkImage dedicatedImage,
14763  const VmaAllocationCreateInfo& createInfo,
14764  VmaSuballocationType suballocType,
14765  size_t allocationCount,
14766  VmaAllocation* pAllocations)
14767 {
14768  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14769 
14770  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14771 
14772  if(vkMemReq.size == 0)
14773  {
14774  return VK_ERROR_VALIDATION_FAILED_EXT;
14775  }
14776  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14777  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14778  {
14779  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14780  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14781  }
14782  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14784  {
14785  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14786  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14787  }
14788  if(requiresDedicatedAllocation)
14789  {
14790  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14791  {
14792  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14793  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14794  }
14795  if(createInfo.pool != VK_NULL_HANDLE)
14796  {
14797  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14799  }
14800  }
14801  if((createInfo.pool != VK_NULL_HANDLE) &&
14802  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14803  {
14804  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14805  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14806  }
14807 
14808  if(createInfo.pool != VK_NULL_HANDLE)
14809  {
14810  const VkDeviceSize alignmentForPool = VMA_MAX(
14811  vkMemReq.alignment,
14812  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14813  return createInfo.pool->m_BlockVector.Allocate(
14814  m_CurrentFrameIndex.load(),
14815  vkMemReq.size,
14816  alignmentForPool,
14817  createInfo,
14818  suballocType,
14819  allocationCount,
14820  pAllocations);
14821  }
14822  else
14823  {
14824  // Bit mask of memory Vulkan types acceptable for this allocation.
14825  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14826  uint32_t memTypeIndex = UINT32_MAX;
14827  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14828  if(res == VK_SUCCESS)
14829  {
14830  VkDeviceSize alignmentForMemType = VMA_MAX(
14831  vkMemReq.alignment,
14832  GetMemoryTypeMinAlignment(memTypeIndex));
14833 
14834  res = AllocateMemoryOfType(
14835  vkMemReq.size,
14836  alignmentForMemType,
14837  requiresDedicatedAllocation || prefersDedicatedAllocation,
14838  dedicatedBuffer,
14839  dedicatedImage,
14840  createInfo,
14841  memTypeIndex,
14842  suballocType,
14843  allocationCount,
14844  pAllocations);
14845  // Succeeded on first try.
14846  if(res == VK_SUCCESS)
14847  {
14848  return res;
14849  }
14850  // Allocation from this memory type failed. Try other compatible memory types.
14851  else
14852  {
14853  for(;;)
14854  {
14855  // Remove old memTypeIndex from list of possibilities.
14856  memoryTypeBits &= ~(1u << memTypeIndex);
14857  // Find alternative memTypeIndex.
14858  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14859  if(res == VK_SUCCESS)
14860  {
14861  alignmentForMemType = VMA_MAX(
14862  vkMemReq.alignment,
14863  GetMemoryTypeMinAlignment(memTypeIndex));
14864 
14865  res = AllocateMemoryOfType(
14866  vkMemReq.size,
14867  alignmentForMemType,
14868  requiresDedicatedAllocation || prefersDedicatedAllocation,
14869  dedicatedBuffer,
14870  dedicatedImage,
14871  createInfo,
14872  memTypeIndex,
14873  suballocType,
14874  allocationCount,
14875  pAllocations);
14876  // Allocation from this alternative memory type succeeded.
14877  if(res == VK_SUCCESS)
14878  {
14879  return res;
14880  }
14881  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14882  }
14883  // No other matching memory type index could be found.
14884  else
14885  {
14886  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14887  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14888  }
14889  }
14890  }
14891  }
14892  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14893  else
14894  return res;
14895  }
14896 }
14897 
14898 void VmaAllocator_T::FreeMemory(
14899  size_t allocationCount,
14900  const VmaAllocation* pAllocations)
14901 {
14902  VMA_ASSERT(pAllocations);
14903 
14904  for(size_t allocIndex = allocationCount; allocIndex--; )
14905  {
14906  VmaAllocation allocation = pAllocations[allocIndex];
14907 
14908  if(allocation != VK_NULL_HANDLE)
14909  {
14910  if(TouchAllocation(allocation))
14911  {
14912  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14913  {
14914  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14915  }
14916 
14917  switch(allocation->GetType())
14918  {
14919  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14920  {
14921  VmaBlockVector* pBlockVector = VMA_NULL;
14922  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14923  if(hPool != VK_NULL_HANDLE)
14924  {
14925  pBlockVector = &hPool->m_BlockVector;
14926  }
14927  else
14928  {
14929  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14930  pBlockVector = m_pBlockVectors[memTypeIndex];
14931  }
14932  pBlockVector->Free(allocation);
14933  }
14934  break;
14935  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14936  FreeDedicatedMemory(allocation);
14937  break;
14938  default:
14939  VMA_ASSERT(0);
14940  }
14941  }
14942 
14943  allocation->SetUserData(this, VMA_NULL);
14944  allocation->Dtor();
14945  m_AllocationObjectAllocator.Free(allocation);
14946  }
14947  }
14948 }
14949 
14950 VkResult VmaAllocator_T::ResizeAllocation(
14951  const VmaAllocation alloc,
14952  VkDeviceSize newSize)
14953 {
14954  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14955  {
14956  return VK_ERROR_VALIDATION_FAILED_EXT;
14957  }
14958  if(newSize == alloc->GetSize())
14959  {
14960  return VK_SUCCESS;
14961  }
14962 
14963  switch(alloc->GetType())
14964  {
14965  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14966  return VK_ERROR_FEATURE_NOT_PRESENT;
14967  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14968  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14969  {
14970  alloc->ChangeSize(newSize);
14971  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14972  return VK_SUCCESS;
14973  }
14974  else
14975  {
14976  return VK_ERROR_OUT_OF_POOL_MEMORY;
14977  }
14978  default:
14979  VMA_ASSERT(0);
14980  return VK_ERROR_VALIDATION_FAILED_EXT;
14981  }
14982 }
14983 
14984 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14985 {
14986  // Initialize.
14987  InitStatInfo(pStats->total);
14988  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14989  InitStatInfo(pStats->memoryType[i]);
14990  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14991  InitStatInfo(pStats->memoryHeap[i]);
14992 
14993  // Process default pools.
14994  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14995  {
14996  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14997  VMA_ASSERT(pBlockVector);
14998  pBlockVector->AddStats(pStats);
14999  }
15000 
15001  // Process custom pools.
15002  {
15003  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15004  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15005  {
15006  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15007  }
15008  }
15009 
15010  // Process dedicated allocations.
15011  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15012  {
15013  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15014  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15015  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15016  VMA_ASSERT(pDedicatedAllocVector);
15017  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15018  {
15019  VmaStatInfo allocationStatInfo;
15020  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15021  VmaAddStatInfo(pStats->total, allocationStatInfo);
15022  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15023  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15024  }
15025  }
15026 
15027  // Postprocess.
15028  VmaPostprocessCalcStatInfo(pStats->total);
15029  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15030  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15031  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15032  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15033 }
15034 
15035 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15036 
15037 VkResult VmaAllocator_T::DefragmentationBegin(
15038  const VmaDefragmentationInfo2& info,
15039  VmaDefragmentationStats* pStats,
15040  VmaDefragmentationContext* pContext)
15041 {
15042  if(info.pAllocationsChanged != VMA_NULL)
15043  {
15044  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15045  }
15046 
15047  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15048  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15049 
15050  (*pContext)->AddPools(info.poolCount, info.pPools);
15051  (*pContext)->AddAllocations(
15053 
15054  VkResult res = (*pContext)->Defragment(
15057  info.commandBuffer, pStats);
15058 
15059  if(res != VK_NOT_READY)
15060  {
15061  vma_delete(this, *pContext);
15062  *pContext = VMA_NULL;
15063  }
15064 
15065  return res;
15066 }
15067 
15068 VkResult VmaAllocator_T::DefragmentationEnd(
15069  VmaDefragmentationContext context)
15070 {
15071  vma_delete(this, context);
15072  return VK_SUCCESS;
15073 }
15074 
15075 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15076 {
15077  if(hAllocation->CanBecomeLost())
15078  {
15079  /*
15080  Warning: This is a carefully designed algorithm.
15081  Do not modify unless you really know what you're doing :)
15082  */
15083  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15084  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15085  for(;;)
15086  {
15087  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15088  {
15089  pAllocationInfo->memoryType = UINT32_MAX;
15090  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15091  pAllocationInfo->offset = 0;
15092  pAllocationInfo->size = hAllocation->GetSize();
15093  pAllocationInfo->pMappedData = VMA_NULL;
15094  pAllocationInfo->pUserData = hAllocation->GetUserData();
15095  return;
15096  }
15097  else if(localLastUseFrameIndex == localCurrFrameIndex)
15098  {
15099  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15100  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15101  pAllocationInfo->offset = hAllocation->GetOffset();
15102  pAllocationInfo->size = hAllocation->GetSize();
15103  pAllocationInfo->pMappedData = VMA_NULL;
15104  pAllocationInfo->pUserData = hAllocation->GetUserData();
15105  return;
15106  }
15107  else // Last use time earlier than current time.
15108  {
15109  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15110  {
15111  localLastUseFrameIndex = localCurrFrameIndex;
15112  }
15113  }
15114  }
15115  }
15116  else
15117  {
15118 #if VMA_STATS_STRING_ENABLED
15119  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15120  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15121  for(;;)
15122  {
15123  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15124  if(localLastUseFrameIndex == localCurrFrameIndex)
15125  {
15126  break;
15127  }
15128  else // Last use time earlier than current time.
15129  {
15130  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15131  {
15132  localLastUseFrameIndex = localCurrFrameIndex;
15133  }
15134  }
15135  }
15136 #endif
15137 
15138  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15139  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15140  pAllocationInfo->offset = hAllocation->GetOffset();
15141  pAllocationInfo->size = hAllocation->GetSize();
15142  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15143  pAllocationInfo->pUserData = hAllocation->GetUserData();
15144  }
15145 }
15146 
15147 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15148 {
15149  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15150  if(hAllocation->CanBecomeLost())
15151  {
15152  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15153  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15154  for(;;)
15155  {
15156  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15157  {
15158  return false;
15159  }
15160  else if(localLastUseFrameIndex == localCurrFrameIndex)
15161  {
15162  return true;
15163  }
15164  else // Last use time earlier than current time.
15165  {
15166  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15167  {
15168  localLastUseFrameIndex = localCurrFrameIndex;
15169  }
15170  }
15171  }
15172  }
15173  else
15174  {
15175 #if VMA_STATS_STRING_ENABLED
15176  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15177  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15178  for(;;)
15179  {
15180  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15181  if(localLastUseFrameIndex == localCurrFrameIndex)
15182  {
15183  break;
15184  }
15185  else // Last use time earlier than current time.
15186  {
15187  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15188  {
15189  localLastUseFrameIndex = localCurrFrameIndex;
15190  }
15191  }
15192  }
15193 #endif
15194 
15195  return true;
15196  }
15197 }
15198 
15199 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15200 {
15201  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15202 
15203  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15204 
15205  if(newCreateInfo.maxBlockCount == 0)
15206  {
15207  newCreateInfo.maxBlockCount = SIZE_MAX;
15208  }
15209  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15210  {
15211  return VK_ERROR_INITIALIZATION_FAILED;
15212  }
15213 
15214  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15215 
15216  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15217 
15218  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15219  if(res != VK_SUCCESS)
15220  {
15221  vma_delete(this, *pPool);
15222  *pPool = VMA_NULL;
15223  return res;
15224  }
15225 
15226  // Add to m_Pools.
15227  {
15228  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15229  (*pPool)->SetId(m_NextPoolId++);
15230  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15231  }
15232 
15233  return VK_SUCCESS;
15234 }
15235 
15236 void VmaAllocator_T::DestroyPool(VmaPool pool)
15237 {
15238  // Remove from m_Pools.
15239  {
15240  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15241  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15242  VMA_ASSERT(success && "Pool not found in Allocator.");
15243  }
15244 
15245  vma_delete(this, pool);
15246 }
15247 
15248 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15249 {
15250  pool->m_BlockVector.GetPoolStats(pPoolStats);
15251 }
15252 
15253 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15254 {
15255  m_CurrentFrameIndex.store(frameIndex);
15256 }
15257 
15258 void VmaAllocator_T::MakePoolAllocationsLost(
15259  VmaPool hPool,
15260  size_t* pLostAllocationCount)
15261 {
15262  hPool->m_BlockVector.MakePoolAllocationsLost(
15263  m_CurrentFrameIndex.load(),
15264  pLostAllocationCount);
15265 }
15266 
15267 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15268 {
15269  return hPool->m_BlockVector.CheckCorruption();
15270 }
15271 
15272 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15273 {
15274  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15275 
15276  // Process default pools.
15277  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15278  {
15279  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15280  {
15281  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15282  VMA_ASSERT(pBlockVector);
15283  VkResult localRes = pBlockVector->CheckCorruption();
15284  switch(localRes)
15285  {
15286  case VK_ERROR_FEATURE_NOT_PRESENT:
15287  break;
15288  case VK_SUCCESS:
15289  finalRes = VK_SUCCESS;
15290  break;
15291  default:
15292  return localRes;
15293  }
15294  }
15295  }
15296 
15297  // Process custom pools.
15298  {
15299  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15300  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15301  {
15302  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15303  {
15304  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15305  switch(localRes)
15306  {
15307  case VK_ERROR_FEATURE_NOT_PRESENT:
15308  break;
15309  case VK_SUCCESS:
15310  finalRes = VK_SUCCESS;
15311  break;
15312  default:
15313  return localRes;
15314  }
15315  }
15316  }
15317  }
15318 
15319  return finalRes;
15320 }
15321 
15322 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15323 {
15324  *pAllocation = m_AllocationObjectAllocator.Allocate();
15325  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15326  (*pAllocation)->InitLost();
15327 }
15328 
15329 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15330 {
15331  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15332 
15333  VkResult res;
15334  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15335  {
15336  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15337  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15338  {
15339  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15340  if(res == VK_SUCCESS)
15341  {
15342  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15343  }
15344  }
15345  else
15346  {
15347  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15348  }
15349  }
15350  else
15351  {
15352  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15353  }
15354 
15355  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15356  {
15357  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15358  }
15359 
15360  return res;
15361 }
15362 
15363 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15364 {
15365  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15366  {
15367  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15368  }
15369 
15370  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15371 
15372  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15373  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15374  {
15375  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15376  m_HeapSizeLimit[heapIndex] += size;
15377  }
15378 }
15379 
15380 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15381 {
15382  if(hAllocation->CanBecomeLost())
15383  {
15384  return VK_ERROR_MEMORY_MAP_FAILED;
15385  }
15386 
15387  switch(hAllocation->GetType())
15388  {
15389  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15390  {
15391  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15392  char *pBytes = VMA_NULL;
15393  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15394  if(res == VK_SUCCESS)
15395  {
15396  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15397  hAllocation->BlockAllocMap();
15398  }
15399  return res;
15400  }
15401  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15402  return hAllocation->DedicatedAllocMap(this, ppData);
15403  default:
15404  VMA_ASSERT(0);
15405  return VK_ERROR_MEMORY_MAP_FAILED;
15406  }
15407 }
15408 
15409 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15410 {
15411  switch(hAllocation->GetType())
15412  {
15413  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15414  {
15415  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15416  hAllocation->BlockAllocUnmap();
15417  pBlock->Unmap(this, 1);
15418  }
15419  break;
15420  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15421  hAllocation->DedicatedAllocUnmap(this);
15422  break;
15423  default:
15424  VMA_ASSERT(0);
15425  }
15426 }
15427 
15428 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15429 {
15430  VkResult res = VK_SUCCESS;
15431  switch(hAllocation->GetType())
15432  {
15433  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15434  res = GetVulkanFunctions().vkBindBufferMemory(
15435  m_hDevice,
15436  hBuffer,
15437  hAllocation->GetMemory(),
15438  0); //memoryOffset
15439  break;
15440  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15441  {
15442  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15443  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15444  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15445  break;
15446  }
15447  default:
15448  VMA_ASSERT(0);
15449  }
15450  return res;
15451 }
15452 
15453 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15454 {
15455  VkResult res = VK_SUCCESS;
15456  switch(hAllocation->GetType())
15457  {
15458  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15459  res = GetVulkanFunctions().vkBindImageMemory(
15460  m_hDevice,
15461  hImage,
15462  hAllocation->GetMemory(),
15463  0); //memoryOffset
15464  break;
15465  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15466  {
15467  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15468  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15469  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15470  break;
15471  }
15472  default:
15473  VMA_ASSERT(0);
15474  }
15475  return res;
15476 }
15477 
15478 void VmaAllocator_T::FlushOrInvalidateAllocation(
15479  VmaAllocation hAllocation,
15480  VkDeviceSize offset, VkDeviceSize size,
15481  VMA_CACHE_OPERATION op)
15482 {
15483  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15484  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15485  {
15486  const VkDeviceSize allocationSize = hAllocation->GetSize();
15487  VMA_ASSERT(offset <= allocationSize);
15488 
15489  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15490 
15491  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15492  memRange.memory = hAllocation->GetMemory();
15493 
15494  switch(hAllocation->GetType())
15495  {
15496  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15497  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15498  if(size == VK_WHOLE_SIZE)
15499  {
15500  memRange.size = allocationSize - memRange.offset;
15501  }
15502  else
15503  {
15504  VMA_ASSERT(offset + size <= allocationSize);
15505  memRange.size = VMA_MIN(
15506  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15507  allocationSize - memRange.offset);
15508  }
15509  break;
15510 
15511  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15512  {
15513  // 1. Still within this allocation.
15514  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15515  if(size == VK_WHOLE_SIZE)
15516  {
15517  size = allocationSize - offset;
15518  }
15519  else
15520  {
15521  VMA_ASSERT(offset + size <= allocationSize);
15522  }
15523  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15524 
15525  // 2. Adjust to whole block.
15526  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15527  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15528  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15529  memRange.offset += allocationOffset;
15530  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15531 
15532  break;
15533  }
15534 
15535  default:
15536  VMA_ASSERT(0);
15537  }
15538 
15539  switch(op)
15540  {
15541  case VMA_CACHE_FLUSH:
15542  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15543  break;
15544  case VMA_CACHE_INVALIDATE:
15545  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15546  break;
15547  default:
15548  VMA_ASSERT(0);
15549  }
15550  }
15551  // else: Just ignore this call.
15552 }
15553 
15554 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15555 {
15556  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15557 
15558  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15559  {
15560  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15561  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15562  VMA_ASSERT(pDedicatedAllocations);
15563  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15564  VMA_ASSERT(success);
15565  }
15566 
15567  VkDeviceMemory hMemory = allocation->GetMemory();
15568 
15569  /*
15570  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15571  before vkFreeMemory.
15572 
15573  if(allocation->GetMappedData() != VMA_NULL)
15574  {
15575  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15576  }
15577  */
15578 
15579  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15580 
15581  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15582 }
15583 
15584 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15585 {
15586  VkBufferCreateInfo dummyBufCreateInfo;
15587  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15588 
15589  uint32_t memoryTypeBits = 0;
15590 
15591  // Create buffer.
15592  VkBuffer buf = VMA_NULL;
15593  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15594  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15595  if(res == VK_SUCCESS)
15596  {
15597  // Query for supported memory types.
15598  VkMemoryRequirements memReq;
15599  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15600  memoryTypeBits = memReq.memoryTypeBits;
15601 
15602  // Destroy buffer.
15603  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15604  }
15605 
15606  return memoryTypeBits;
15607 }
15608 
15609 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15610 {
15611  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15612  !hAllocation->CanBecomeLost() &&
15613  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15614  {
15615  void* pData = VMA_NULL;
15616  VkResult res = Map(hAllocation, &pData);
15617  if(res == VK_SUCCESS)
15618  {
15619  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15620  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15621  Unmap(hAllocation);
15622  }
15623  else
15624  {
15625  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15626  }
15627  }
15628 }
15629 
15630 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15631 {
15632  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15633  if(memoryTypeBits == UINT32_MAX)
15634  {
15635  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15636  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15637  }
15638  return memoryTypeBits;
15639 }
15640 
15641 #if VMA_STATS_STRING_ENABLED
15642 
15643 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15644 {
15645  bool dedicatedAllocationsStarted = false;
15646  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15647  {
15648  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15649  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15650  VMA_ASSERT(pDedicatedAllocVector);
15651  if(pDedicatedAllocVector->empty() == false)
15652  {
15653  if(dedicatedAllocationsStarted == false)
15654  {
15655  dedicatedAllocationsStarted = true;
15656  json.WriteString("DedicatedAllocations");
15657  json.BeginObject();
15658  }
15659 
15660  json.BeginString("Type ");
15661  json.ContinueString(memTypeIndex);
15662  json.EndString();
15663 
15664  json.BeginArray();
15665 
15666  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15667  {
15668  json.BeginObject(true);
15669  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15670  hAlloc->PrintParameters(json);
15671  json.EndObject();
15672  }
15673 
15674  json.EndArray();
15675  }
15676  }
15677  if(dedicatedAllocationsStarted)
15678  {
15679  json.EndObject();
15680  }
15681 
15682  {
15683  bool allocationsStarted = false;
15684  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15685  {
15686  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15687  {
15688  if(allocationsStarted == false)
15689  {
15690  allocationsStarted = true;
15691  json.WriteString("DefaultPools");
15692  json.BeginObject();
15693  }
15694 
15695  json.BeginString("Type ");
15696  json.ContinueString(memTypeIndex);
15697  json.EndString();
15698 
15699  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15700  }
15701  }
15702  if(allocationsStarted)
15703  {
15704  json.EndObject();
15705  }
15706  }
15707 
15708  // Custom pools
15709  {
15710  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15711  const size_t poolCount = m_Pools.size();
15712  if(poolCount > 0)
15713  {
15714  json.WriteString("Pools");
15715  json.BeginObject();
15716  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15717  {
15718  json.BeginString();
15719  json.ContinueString(m_Pools[poolIndex]->GetId());
15720  json.EndString();
15721 
15722  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15723  }
15724  json.EndObject();
15725  }
15726  }
15727 }
15728 
15729 #endif // #if VMA_STATS_STRING_ENABLED
15730 
15732 // Public interface
15733 
15734 VkResult vmaCreateAllocator(
15735  const VmaAllocatorCreateInfo* pCreateInfo,
15736  VmaAllocator* pAllocator)
15737 {
15738  VMA_ASSERT(pCreateInfo && pAllocator);
15739  VMA_DEBUG_LOG("vmaCreateAllocator");
15740  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15741  return (*pAllocator)->Init(pCreateInfo);
15742 }
15743 
15744 void vmaDestroyAllocator(
15745  VmaAllocator allocator)
15746 {
15747  if(allocator != VK_NULL_HANDLE)
15748  {
15749  VMA_DEBUG_LOG("vmaDestroyAllocator");
15750  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15751  vma_delete(&allocationCallbacks, allocator);
15752  }
15753 }
15754 
15756  VmaAllocator allocator,
15757  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15758 {
15759  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15760  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15761 }
15762 
15764  VmaAllocator allocator,
15765  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15766 {
15767  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15768  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15769 }
15770 
15772  VmaAllocator allocator,
15773  uint32_t memoryTypeIndex,
15774  VkMemoryPropertyFlags* pFlags)
15775 {
15776  VMA_ASSERT(allocator && pFlags);
15777  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15778  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15779 }
15780 
15782  VmaAllocator allocator,
15783  uint32_t frameIndex)
15784 {
15785  VMA_ASSERT(allocator);
15786  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15787 
15788  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15789 
15790  allocator->SetCurrentFrameIndex(frameIndex);
15791 }
15792 
15793 void vmaCalculateStats(
15794  VmaAllocator allocator,
15795  VmaStats* pStats)
15796 {
15797  VMA_ASSERT(allocator && pStats);
15798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15799  allocator->CalculateStats(pStats);
15800 }
15801 
15802 #if VMA_STATS_STRING_ENABLED
15803 
15804 void vmaBuildStatsString(
15805  VmaAllocator allocator,
15806  char** ppStatsString,
15807  VkBool32 detailedMap)
15808 {
15809  VMA_ASSERT(allocator && ppStatsString);
15810  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15811 
15812  VmaStringBuilder sb(allocator);
15813  {
15814  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15815  json.BeginObject();
15816 
15817  VmaStats stats;
15818  allocator->CalculateStats(&stats);
15819 
15820  json.WriteString("Total");
15821  VmaPrintStatInfo(json, stats.total);
15822 
15823  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15824  {
15825  json.BeginString("Heap ");
15826  json.ContinueString(heapIndex);
15827  json.EndString();
15828  json.BeginObject();
15829 
15830  json.WriteString("Size");
15831  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15832 
15833  json.WriteString("Flags");
15834  json.BeginArray(true);
15835  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15836  {
15837  json.WriteString("DEVICE_LOCAL");
15838  }
15839  json.EndArray();
15840 
15841  if(stats.memoryHeap[heapIndex].blockCount > 0)
15842  {
15843  json.WriteString("Stats");
15844  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15845  }
15846 
15847  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15848  {
15849  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15850  {
15851  json.BeginString("Type ");
15852  json.ContinueString(typeIndex);
15853  json.EndString();
15854 
15855  json.BeginObject();
15856 
15857  json.WriteString("Flags");
15858  json.BeginArray(true);
15859  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15860  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15861  {
15862  json.WriteString("DEVICE_LOCAL");
15863  }
15864  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15865  {
15866  json.WriteString("HOST_VISIBLE");
15867  }
15868  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15869  {
15870  json.WriteString("HOST_COHERENT");
15871  }
15872  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15873  {
15874  json.WriteString("HOST_CACHED");
15875  }
15876  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15877  {
15878  json.WriteString("LAZILY_ALLOCATED");
15879  }
15880  json.EndArray();
15881 
15882  if(stats.memoryType[typeIndex].blockCount > 0)
15883  {
15884  json.WriteString("Stats");
15885  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15886  }
15887 
15888  json.EndObject();
15889  }
15890  }
15891 
15892  json.EndObject();
15893  }
15894  if(detailedMap == VK_TRUE)
15895  {
15896  allocator->PrintDetailedMap(json);
15897  }
15898 
15899  json.EndObject();
15900  }
15901 
15902  const size_t len = sb.GetLength();
15903  char* const pChars = vma_new_array(allocator, char, len + 1);
15904  if(len > 0)
15905  {
15906  memcpy(pChars, sb.GetData(), len);
15907  }
15908  pChars[len] = '\0';
15909  *ppStatsString = pChars;
15910 }
15911 
15912 void vmaFreeStatsString(
15913  VmaAllocator allocator,
15914  char* pStatsString)
15915 {
15916  if(pStatsString != VMA_NULL)
15917  {
15918  VMA_ASSERT(allocator);
15919  size_t len = strlen(pStatsString);
15920  vma_delete_array(allocator, pStatsString, len + 1);
15921  }
15922 }
15923 
15924 #endif // #if VMA_STATS_STRING_ENABLED
15925 
15926 /*
15927 This function is not protected by any mutex because it just reads immutable data.
15928 */
15929 VkResult vmaFindMemoryTypeIndex(
15930  VmaAllocator allocator,
15931  uint32_t memoryTypeBits,
15932  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15933  uint32_t* pMemoryTypeIndex)
15934 {
15935  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15936  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15937  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15938 
15939  if(pAllocationCreateInfo->memoryTypeBits != 0)
15940  {
15941  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15942  }
15943 
15944  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15945  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15946 
15947  // Convert usage to requiredFlags and preferredFlags.
15948  switch(pAllocationCreateInfo->usage)
15949  {
15951  break;
15953  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15954  {
15955  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15956  }
15957  break;
15959  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15960  break;
15962  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15963  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15964  {
15965  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15966  }
15967  break;
15969  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15970  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15971  break;
15972  default:
15973  break;
15974  }
15975 
15976  *pMemoryTypeIndex = UINT32_MAX;
15977  uint32_t minCost = UINT32_MAX;
15978  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15979  memTypeIndex < allocator->GetMemoryTypeCount();
15980  ++memTypeIndex, memTypeBit <<= 1)
15981  {
15982  // This memory type is acceptable according to memoryTypeBits bitmask.
15983  if((memTypeBit & memoryTypeBits) != 0)
15984  {
15985  const VkMemoryPropertyFlags currFlags =
15986  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15987  // This memory type contains requiredFlags.
15988  if((requiredFlags & ~currFlags) == 0)
15989  {
15990  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15991  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15992  // Remember memory type with lowest cost.
15993  if(currCost < minCost)
15994  {
15995  *pMemoryTypeIndex = memTypeIndex;
15996  if(currCost == 0)
15997  {
15998  return VK_SUCCESS;
15999  }
16000  minCost = currCost;
16001  }
16002  }
16003  }
16004  }
16005  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16006 }
16007 
16009  VmaAllocator allocator,
16010  const VkBufferCreateInfo* pBufferCreateInfo,
16011  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16012  uint32_t* pMemoryTypeIndex)
16013 {
16014  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16015  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16016  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16017  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16018 
16019  const VkDevice hDev = allocator->m_hDevice;
16020  VkBuffer hBuffer = VK_NULL_HANDLE;
16021  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16022  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16023  if(res == VK_SUCCESS)
16024  {
16025  VkMemoryRequirements memReq = {};
16026  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16027  hDev, hBuffer, &memReq);
16028 
16029  res = vmaFindMemoryTypeIndex(
16030  allocator,
16031  memReq.memoryTypeBits,
16032  pAllocationCreateInfo,
16033  pMemoryTypeIndex);
16034 
16035  allocator->GetVulkanFunctions().vkDestroyBuffer(
16036  hDev, hBuffer, allocator->GetAllocationCallbacks());
16037  }
16038  return res;
16039 }
16040 
16042  VmaAllocator allocator,
16043  const VkImageCreateInfo* pImageCreateInfo,
16044  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16045  uint32_t* pMemoryTypeIndex)
16046 {
16047  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16048  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16049  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16050  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16051 
16052  const VkDevice hDev = allocator->m_hDevice;
16053  VkImage hImage = VK_NULL_HANDLE;
16054  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16055  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16056  if(res == VK_SUCCESS)
16057  {
16058  VkMemoryRequirements memReq = {};
16059  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16060  hDev, hImage, &memReq);
16061 
16062  res = vmaFindMemoryTypeIndex(
16063  allocator,
16064  memReq.memoryTypeBits,
16065  pAllocationCreateInfo,
16066  pMemoryTypeIndex);
16067 
16068  allocator->GetVulkanFunctions().vkDestroyImage(
16069  hDev, hImage, allocator->GetAllocationCallbacks());
16070  }
16071  return res;
16072 }
16073 
16074 VkResult vmaCreatePool(
16075  VmaAllocator allocator,
16076  const VmaPoolCreateInfo* pCreateInfo,
16077  VmaPool* pPool)
16078 {
16079  VMA_ASSERT(allocator && pCreateInfo && pPool);
16080 
16081  VMA_DEBUG_LOG("vmaCreatePool");
16082 
16083  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16084 
16085  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16086 
16087 #if VMA_RECORDING_ENABLED
16088  if(allocator->GetRecorder() != VMA_NULL)
16089  {
16090  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16091  }
16092 #endif
16093 
16094  return res;
16095 }
16096 
16097 void vmaDestroyPool(
16098  VmaAllocator allocator,
16099  VmaPool pool)
16100 {
16101  VMA_ASSERT(allocator);
16102 
16103  if(pool == VK_NULL_HANDLE)
16104  {
16105  return;
16106  }
16107 
16108  VMA_DEBUG_LOG("vmaDestroyPool");
16109 
16110  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16111 
16112 #if VMA_RECORDING_ENABLED
16113  if(allocator->GetRecorder() != VMA_NULL)
16114  {
16115  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16116  }
16117 #endif
16118 
16119  allocator->DestroyPool(pool);
16120 }
16121 
16122 void vmaGetPoolStats(
16123  VmaAllocator allocator,
16124  VmaPool pool,
16125  VmaPoolStats* pPoolStats)
16126 {
16127  VMA_ASSERT(allocator && pool && pPoolStats);
16128 
16129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16130 
16131  allocator->GetPoolStats(pool, pPoolStats);
16132 }
16133 
16135  VmaAllocator allocator,
16136  VmaPool pool,
16137  size_t* pLostAllocationCount)
16138 {
16139  VMA_ASSERT(allocator && pool);
16140 
16141  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16142 
16143 #if VMA_RECORDING_ENABLED
16144  if(allocator->GetRecorder() != VMA_NULL)
16145  {
16146  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16147  }
16148 #endif
16149 
16150  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16151 }
16152 
16153 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16154 {
16155  VMA_ASSERT(allocator && pool);
16156 
16157  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16158 
16159  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16160 
16161  return allocator->CheckPoolCorruption(pool);
16162 }
16163 
16164 VkResult vmaAllocateMemory(
16165  VmaAllocator allocator,
16166  const VkMemoryRequirements* pVkMemoryRequirements,
16167  const VmaAllocationCreateInfo* pCreateInfo,
16168  VmaAllocation* pAllocation,
16169  VmaAllocationInfo* pAllocationInfo)
16170 {
16171  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16172 
16173  VMA_DEBUG_LOG("vmaAllocateMemory");
16174 
16175  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16176 
16177  VkResult result = allocator->AllocateMemory(
16178  *pVkMemoryRequirements,
16179  false, // requiresDedicatedAllocation
16180  false, // prefersDedicatedAllocation
16181  VK_NULL_HANDLE, // dedicatedBuffer
16182  VK_NULL_HANDLE, // dedicatedImage
16183  *pCreateInfo,
16184  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16185  1, // allocationCount
16186  pAllocation);
16187 
16188 #if VMA_RECORDING_ENABLED
16189  if(allocator->GetRecorder() != VMA_NULL)
16190  {
16191  allocator->GetRecorder()->RecordAllocateMemory(
16192  allocator->GetCurrentFrameIndex(),
16193  *pVkMemoryRequirements,
16194  *pCreateInfo,
16195  *pAllocation);
16196  }
16197 #endif
16198 
16199  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16200  {
16201  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16202  }
16203 
16204  return result;
16205 }
16206 
16207 VkResult vmaAllocateMemoryPages(
16208  VmaAllocator allocator,
16209  const VkMemoryRequirements* pVkMemoryRequirements,
16210  const VmaAllocationCreateInfo* pCreateInfo,
16211  size_t allocationCount,
16212  VmaAllocation* pAllocations,
16213  VmaAllocationInfo* pAllocationInfo)
16214 {
16215  if(allocationCount == 0)
16216  {
16217  return VK_SUCCESS;
16218  }
16219 
16220  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16221 
16222  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16223 
16224  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16225 
16226  VkResult result = allocator->AllocateMemory(
16227  *pVkMemoryRequirements,
16228  false, // requiresDedicatedAllocation
16229  false, // prefersDedicatedAllocation
16230  VK_NULL_HANDLE, // dedicatedBuffer
16231  VK_NULL_HANDLE, // dedicatedImage
16232  *pCreateInfo,
16233  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16234  allocationCount,
16235  pAllocations);
16236 
16237 #if VMA_RECORDING_ENABLED
16238  if(allocator->GetRecorder() != VMA_NULL)
16239  {
16240  allocator->GetRecorder()->RecordAllocateMemoryPages(
16241  allocator->GetCurrentFrameIndex(),
16242  *pVkMemoryRequirements,
16243  *pCreateInfo,
16244  (uint64_t)allocationCount,
16245  pAllocations);
16246  }
16247 #endif
16248 
16249  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16250  {
16251  for(size_t i = 0; i < allocationCount; ++i)
16252  {
16253  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16254  }
16255  }
16256 
16257  return result;
16258 }
16259 
16261  VmaAllocator allocator,
16262  VkBuffer buffer,
16263  const VmaAllocationCreateInfo* pCreateInfo,
16264  VmaAllocation* pAllocation,
16265  VmaAllocationInfo* pAllocationInfo)
16266 {
16267  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16268 
16269  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16270 
16271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16272 
16273  VkMemoryRequirements vkMemReq = {};
16274  bool requiresDedicatedAllocation = false;
16275  bool prefersDedicatedAllocation = false;
16276  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16277  requiresDedicatedAllocation,
16278  prefersDedicatedAllocation);
16279 
16280  VkResult result = allocator->AllocateMemory(
16281  vkMemReq,
16282  requiresDedicatedAllocation,
16283  prefersDedicatedAllocation,
16284  buffer, // dedicatedBuffer
16285  VK_NULL_HANDLE, // dedicatedImage
16286  *pCreateInfo,
16287  VMA_SUBALLOCATION_TYPE_BUFFER,
16288  1, // allocationCount
16289  pAllocation);
16290 
16291 #if VMA_RECORDING_ENABLED
16292  if(allocator->GetRecorder() != VMA_NULL)
16293  {
16294  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16295  allocator->GetCurrentFrameIndex(),
16296  vkMemReq,
16297  requiresDedicatedAllocation,
16298  prefersDedicatedAllocation,
16299  *pCreateInfo,
16300  *pAllocation);
16301  }
16302 #endif
16303 
16304  if(pAllocationInfo && result == VK_SUCCESS)
16305  {
16306  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16307  }
16308 
16309  return result;
16310 }
16311 
16312 VkResult vmaAllocateMemoryForImage(
16313  VmaAllocator allocator,
16314  VkImage image,
16315  const VmaAllocationCreateInfo* pCreateInfo,
16316  VmaAllocation* pAllocation,
16317  VmaAllocationInfo* pAllocationInfo)
16318 {
16319  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16320 
16321  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16322 
16323  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16324 
16325  VkMemoryRequirements vkMemReq = {};
16326  bool requiresDedicatedAllocation = false;
16327  bool prefersDedicatedAllocation = false;
16328  allocator->GetImageMemoryRequirements(image, vkMemReq,
16329  requiresDedicatedAllocation, prefersDedicatedAllocation);
16330 
16331  VkResult result = allocator->AllocateMemory(
16332  vkMemReq,
16333  requiresDedicatedAllocation,
16334  prefersDedicatedAllocation,
16335  VK_NULL_HANDLE, // dedicatedBuffer
16336  image, // dedicatedImage
16337  *pCreateInfo,
16338  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16339  1, // allocationCount
16340  pAllocation);
16341 
16342 #if VMA_RECORDING_ENABLED
16343  if(allocator->GetRecorder() != VMA_NULL)
16344  {
16345  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16346  allocator->GetCurrentFrameIndex(),
16347  vkMemReq,
16348  requiresDedicatedAllocation,
16349  prefersDedicatedAllocation,
16350  *pCreateInfo,
16351  *pAllocation);
16352  }
16353 #endif
16354 
16355  if(pAllocationInfo && result == VK_SUCCESS)
16356  {
16357  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16358  }
16359 
16360  return result;
16361 }
16362 
16363 void vmaFreeMemory(
16364  VmaAllocator allocator,
16365  VmaAllocation allocation)
16366 {
16367  VMA_ASSERT(allocator);
16368 
16369  if(allocation == VK_NULL_HANDLE)
16370  {
16371  return;
16372  }
16373 
16374  VMA_DEBUG_LOG("vmaFreeMemory");
16375 
16376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16377 
16378 #if VMA_RECORDING_ENABLED
16379  if(allocator->GetRecorder() != VMA_NULL)
16380  {
16381  allocator->GetRecorder()->RecordFreeMemory(
16382  allocator->GetCurrentFrameIndex(),
16383  allocation);
16384  }
16385 #endif
16386 
16387  allocator->FreeMemory(
16388  1, // allocationCount
16389  &allocation);
16390 }
16391 
16392 void vmaFreeMemoryPages(
16393  VmaAllocator allocator,
16394  size_t allocationCount,
16395  VmaAllocation* pAllocations)
16396 {
16397  if(allocationCount == 0)
16398  {
16399  return;
16400  }
16401 
16402  VMA_ASSERT(allocator);
16403 
16404  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16405 
16406  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16407 
16408 #if VMA_RECORDING_ENABLED
16409  if(allocator->GetRecorder() != VMA_NULL)
16410  {
16411  allocator->GetRecorder()->RecordFreeMemoryPages(
16412  allocator->GetCurrentFrameIndex(),
16413  (uint64_t)allocationCount,
16414  pAllocations);
16415  }
16416 #endif
16417 
16418  allocator->FreeMemory(allocationCount, pAllocations);
16419 }
16420 
16421 VkResult vmaResizeAllocation(
16422  VmaAllocator allocator,
16423  VmaAllocation allocation,
16424  VkDeviceSize newSize)
16425 {
16426  VMA_ASSERT(allocator && allocation);
16427 
16428  VMA_DEBUG_LOG("vmaResizeAllocation");
16429 
16430  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16431 
16432 #if VMA_RECORDING_ENABLED
16433  if(allocator->GetRecorder() != VMA_NULL)
16434  {
16435  allocator->GetRecorder()->RecordResizeAllocation(
16436  allocator->GetCurrentFrameIndex(),
16437  allocation,
16438  newSize);
16439  }
16440 #endif
16441 
16442  return allocator->ResizeAllocation(allocation, newSize);
16443 }
16444 
16446  VmaAllocator allocator,
16447  VmaAllocation allocation,
16448  VmaAllocationInfo* pAllocationInfo)
16449 {
16450  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16451 
16452  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16453 
16454 #if VMA_RECORDING_ENABLED
16455  if(allocator->GetRecorder() != VMA_NULL)
16456  {
16457  allocator->GetRecorder()->RecordGetAllocationInfo(
16458  allocator->GetCurrentFrameIndex(),
16459  allocation);
16460  }
16461 #endif
16462 
16463  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16464 }
16465 
16466 VkBool32 vmaTouchAllocation(
16467  VmaAllocator allocator,
16468  VmaAllocation allocation)
16469 {
16470  VMA_ASSERT(allocator && allocation);
16471 
16472  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16473 
16474 #if VMA_RECORDING_ENABLED
16475  if(allocator->GetRecorder() != VMA_NULL)
16476  {
16477  allocator->GetRecorder()->RecordTouchAllocation(
16478  allocator->GetCurrentFrameIndex(),
16479  allocation);
16480  }
16481 #endif
16482 
16483  return allocator->TouchAllocation(allocation);
16484 }
16485 
16487  VmaAllocator allocator,
16488  VmaAllocation allocation,
16489  void* pUserData)
16490 {
16491  VMA_ASSERT(allocator && allocation);
16492 
16493  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16494 
16495  allocation->SetUserData(allocator, pUserData);
16496 
16497 #if VMA_RECORDING_ENABLED
16498  if(allocator->GetRecorder() != VMA_NULL)
16499  {
16500  allocator->GetRecorder()->RecordSetAllocationUserData(
16501  allocator->GetCurrentFrameIndex(),
16502  allocation,
16503  pUserData);
16504  }
16505 #endif
16506 }
16507 
16509  VmaAllocator allocator,
16510  VmaAllocation* pAllocation)
16511 {
16512  VMA_ASSERT(allocator && pAllocation);
16513 
16514  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16515 
16516  allocator->CreateLostAllocation(pAllocation);
16517 
16518 #if VMA_RECORDING_ENABLED
16519  if(allocator->GetRecorder() != VMA_NULL)
16520  {
16521  allocator->GetRecorder()->RecordCreateLostAllocation(
16522  allocator->GetCurrentFrameIndex(),
16523  *pAllocation);
16524  }
16525 #endif
16526 }
16527 
16528 VkResult vmaMapMemory(
16529  VmaAllocator allocator,
16530  VmaAllocation allocation,
16531  void** ppData)
16532 {
16533  VMA_ASSERT(allocator && allocation && ppData);
16534 
16535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16536 
16537  VkResult res = allocator->Map(allocation, ppData);
16538 
16539 #if VMA_RECORDING_ENABLED
16540  if(allocator->GetRecorder() != VMA_NULL)
16541  {
16542  allocator->GetRecorder()->RecordMapMemory(
16543  allocator->GetCurrentFrameIndex(),
16544  allocation);
16545  }
16546 #endif
16547 
16548  return res;
16549 }
16550 
16551 void vmaUnmapMemory(
16552  VmaAllocator allocator,
16553  VmaAllocation allocation)
16554 {
16555  VMA_ASSERT(allocator && allocation);
16556 
16557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16558 
16559 #if VMA_RECORDING_ENABLED
16560  if(allocator->GetRecorder() != VMA_NULL)
16561  {
16562  allocator->GetRecorder()->RecordUnmapMemory(
16563  allocator->GetCurrentFrameIndex(),
16564  allocation);
16565  }
16566 #endif
16567 
16568  allocator->Unmap(allocation);
16569 }
16570 
16571 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16572 {
16573  VMA_ASSERT(allocator && allocation);
16574 
16575  VMA_DEBUG_LOG("vmaFlushAllocation");
16576 
16577  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16578 
16579  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16580 
16581 #if VMA_RECORDING_ENABLED
16582  if(allocator->GetRecorder() != VMA_NULL)
16583  {
16584  allocator->GetRecorder()->RecordFlushAllocation(
16585  allocator->GetCurrentFrameIndex(),
16586  allocation, offset, size);
16587  }
16588 #endif
16589 }
16590 
16591 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16592 {
16593  VMA_ASSERT(allocator && allocation);
16594 
16595  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16596 
16597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16598 
16599  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16600 
16601 #if VMA_RECORDING_ENABLED
16602  if(allocator->GetRecorder() != VMA_NULL)
16603  {
16604  allocator->GetRecorder()->RecordInvalidateAllocation(
16605  allocator->GetCurrentFrameIndex(),
16606  allocation, offset, size);
16607  }
16608 #endif
16609 }
16610 
16611 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16612 {
16613  VMA_ASSERT(allocator);
16614 
16615  VMA_DEBUG_LOG("vmaCheckCorruption");
16616 
16617  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16618 
16619  return allocator->CheckCorruption(memoryTypeBits);
16620 }
16621 
16622 VkResult vmaDefragment(
16623  VmaAllocator allocator,
16624  VmaAllocation* pAllocations,
16625  size_t allocationCount,
16626  VkBool32* pAllocationsChanged,
16627  const VmaDefragmentationInfo *pDefragmentationInfo,
16628  VmaDefragmentationStats* pDefragmentationStats)
16629 {
16630  // Deprecated interface, reimplemented using new one.
16631 
16632  VmaDefragmentationInfo2 info2 = {};
16633  info2.allocationCount = (uint32_t)allocationCount;
16634  info2.pAllocations = pAllocations;
16635  info2.pAllocationsChanged = pAllocationsChanged;
16636  if(pDefragmentationInfo != VMA_NULL)
16637  {
16638  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16639  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16640  }
16641  else
16642  {
16643  info2.maxCpuAllocationsToMove = UINT32_MAX;
16644  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16645  }
16646  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16647 
16649  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16650  if(res == VK_NOT_READY)
16651  {
16652  res = vmaDefragmentationEnd( allocator, ctx);
16653  }
16654  return res;
16655 }
16656 
16657 VkResult vmaDefragmentationBegin(
16658  VmaAllocator allocator,
16659  const VmaDefragmentationInfo2* pInfo,
16660  VmaDefragmentationStats* pStats,
16661  VmaDefragmentationContext *pContext)
16662 {
16663  VMA_ASSERT(allocator && pInfo && pContext);
16664 
16665  // Degenerate case: Nothing to defragment.
16666  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16667  {
16668  return VK_SUCCESS;
16669  }
16670 
16671  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16672  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16673  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16674  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16675 
16676  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16677 
16678  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16679 
16680  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16681 
16682 #if VMA_RECORDING_ENABLED
16683  if(allocator->GetRecorder() != VMA_NULL)
16684  {
16685  allocator->GetRecorder()->RecordDefragmentationBegin(
16686  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16687  }
16688 #endif
16689 
16690  return res;
16691 }
16692 
16693 VkResult vmaDefragmentationEnd(
16694  VmaAllocator allocator,
16695  VmaDefragmentationContext context)
16696 {
16697  VMA_ASSERT(allocator);
16698 
16699  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16700 
16701  if(context != VK_NULL_HANDLE)
16702  {
16703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16704 
16705 #if VMA_RECORDING_ENABLED
16706  if(allocator->GetRecorder() != VMA_NULL)
16707  {
16708  allocator->GetRecorder()->RecordDefragmentationEnd(
16709  allocator->GetCurrentFrameIndex(), context);
16710  }
16711 #endif
16712 
16713  return allocator->DefragmentationEnd(context);
16714  }
16715  else
16716  {
16717  return VK_SUCCESS;
16718  }
16719 }
16720 
16721 VkResult vmaBindBufferMemory(
16722  VmaAllocator allocator,
16723  VmaAllocation allocation,
16724  VkBuffer buffer)
16725 {
16726  VMA_ASSERT(allocator && allocation && buffer);
16727 
16728  VMA_DEBUG_LOG("vmaBindBufferMemory");
16729 
16730  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16731 
16732  return allocator->BindBufferMemory(allocation, buffer);
16733 }
16734 
16735 VkResult vmaBindImageMemory(
16736  VmaAllocator allocator,
16737  VmaAllocation allocation,
16738  VkImage image)
16739 {
16740  VMA_ASSERT(allocator && allocation && image);
16741 
16742  VMA_DEBUG_LOG("vmaBindImageMemory");
16743 
16744  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16745 
16746  return allocator->BindImageMemory(allocation, image);
16747 }
16748 
16749 VkResult vmaCreateBuffer(
16750  VmaAllocator allocator,
16751  const VkBufferCreateInfo* pBufferCreateInfo,
16752  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16753  VkBuffer* pBuffer,
16754  VmaAllocation* pAllocation,
16755  VmaAllocationInfo* pAllocationInfo)
16756 {
16757  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16758 
16759  if(pBufferCreateInfo->size == 0)
16760  {
16761  return VK_ERROR_VALIDATION_FAILED_EXT;
16762  }
16763 
16764  VMA_DEBUG_LOG("vmaCreateBuffer");
16765 
16766  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16767 
16768  *pBuffer = VK_NULL_HANDLE;
16769  *pAllocation = VK_NULL_HANDLE;
16770 
16771  // 1. Create VkBuffer.
16772  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16773  allocator->m_hDevice,
16774  pBufferCreateInfo,
16775  allocator->GetAllocationCallbacks(),
16776  pBuffer);
16777  if(res >= 0)
16778  {
16779  // 2. vkGetBufferMemoryRequirements.
16780  VkMemoryRequirements vkMemReq = {};
16781  bool requiresDedicatedAllocation = false;
16782  bool prefersDedicatedAllocation = false;
16783  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16784  requiresDedicatedAllocation, prefersDedicatedAllocation);
16785 
16786  // Make sure alignment requirements for specific buffer usages reported
16787  // in Physical Device Properties are included in alignment reported by memory requirements.
16788  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16789  {
16790  VMA_ASSERT(vkMemReq.alignment %
16791  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16792  }
16793  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16794  {
16795  VMA_ASSERT(vkMemReq.alignment %
16796  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16797  }
16798  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16799  {
16800  VMA_ASSERT(vkMemReq.alignment %
16801  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16802  }
16803 
16804  // 3. Allocate memory using allocator.
16805  res = allocator->AllocateMemory(
16806  vkMemReq,
16807  requiresDedicatedAllocation,
16808  prefersDedicatedAllocation,
16809  *pBuffer, // dedicatedBuffer
16810  VK_NULL_HANDLE, // dedicatedImage
16811  *pAllocationCreateInfo,
16812  VMA_SUBALLOCATION_TYPE_BUFFER,
16813  1, // allocationCount
16814  pAllocation);
16815 
16816 #if VMA_RECORDING_ENABLED
16817  if(allocator->GetRecorder() != VMA_NULL)
16818  {
16819  allocator->GetRecorder()->RecordCreateBuffer(
16820  allocator->GetCurrentFrameIndex(),
16821  *pBufferCreateInfo,
16822  *pAllocationCreateInfo,
16823  *pAllocation);
16824  }
16825 #endif
16826 
16827  if(res >= 0)
16828  {
16829  // 3. Bind buffer with memory.
16830  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16831  {
16832  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16833  }
16834  if(res >= 0)
16835  {
16836  // All steps succeeded.
16837  #if VMA_STATS_STRING_ENABLED
16838  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16839  #endif
16840  if(pAllocationInfo != VMA_NULL)
16841  {
16842  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16843  }
16844 
16845  return VK_SUCCESS;
16846  }
16847  allocator->FreeMemory(
16848  1, // allocationCount
16849  pAllocation);
16850  *pAllocation = VK_NULL_HANDLE;
16851  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16852  *pBuffer = VK_NULL_HANDLE;
16853  return res;
16854  }
16855  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16856  *pBuffer = VK_NULL_HANDLE;
16857  return res;
16858  }
16859  return res;
16860 }
16861 
16862 void vmaDestroyBuffer(
16863  VmaAllocator allocator,
16864  VkBuffer buffer,
16865  VmaAllocation allocation)
16866 {
16867  VMA_ASSERT(allocator);
16868 
16869  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16870  {
16871  return;
16872  }
16873 
16874  VMA_DEBUG_LOG("vmaDestroyBuffer");
16875 
16876  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16877 
16878 #if VMA_RECORDING_ENABLED
16879  if(allocator->GetRecorder() != VMA_NULL)
16880  {
16881  allocator->GetRecorder()->RecordDestroyBuffer(
16882  allocator->GetCurrentFrameIndex(),
16883  allocation);
16884  }
16885 #endif
16886 
16887  if(buffer != VK_NULL_HANDLE)
16888  {
16889  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16890  }
16891 
16892  if(allocation != VK_NULL_HANDLE)
16893  {
16894  allocator->FreeMemory(
16895  1, // allocationCount
16896  &allocation);
16897  }
16898 }
16899 
16900 VkResult vmaCreateImage(
16901  VmaAllocator allocator,
16902  const VkImageCreateInfo* pImageCreateInfo,
16903  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16904  VkImage* pImage,
16905  VmaAllocation* pAllocation,
16906  VmaAllocationInfo* pAllocationInfo)
16907 {
16908  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16909 
16910  if(pImageCreateInfo->extent.width == 0 ||
16911  pImageCreateInfo->extent.height == 0 ||
16912  pImageCreateInfo->extent.depth == 0 ||
16913  pImageCreateInfo->mipLevels == 0 ||
16914  pImageCreateInfo->arrayLayers == 0)
16915  {
16916  return VK_ERROR_VALIDATION_FAILED_EXT;
16917  }
16918 
16919  VMA_DEBUG_LOG("vmaCreateImage");
16920 
16921  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16922 
16923  *pImage = VK_NULL_HANDLE;
16924  *pAllocation = VK_NULL_HANDLE;
16925 
16926  // 1. Create VkImage.
16927  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16928  allocator->m_hDevice,
16929  pImageCreateInfo,
16930  allocator->GetAllocationCallbacks(),
16931  pImage);
16932  if(res >= 0)
16933  {
16934  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16935  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16936  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16937 
16938  // 2. Allocate memory using allocator.
16939  VkMemoryRequirements vkMemReq = {};
16940  bool requiresDedicatedAllocation = false;
16941  bool prefersDedicatedAllocation = false;
16942  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16943  requiresDedicatedAllocation, prefersDedicatedAllocation);
16944 
16945  res = allocator->AllocateMemory(
16946  vkMemReq,
16947  requiresDedicatedAllocation,
16948  prefersDedicatedAllocation,
16949  VK_NULL_HANDLE, // dedicatedBuffer
16950  *pImage, // dedicatedImage
16951  *pAllocationCreateInfo,
16952  suballocType,
16953  1, // allocationCount
16954  pAllocation);
16955 
16956 #if VMA_RECORDING_ENABLED
16957  if(allocator->GetRecorder() != VMA_NULL)
16958  {
16959  allocator->GetRecorder()->RecordCreateImage(
16960  allocator->GetCurrentFrameIndex(),
16961  *pImageCreateInfo,
16962  *pAllocationCreateInfo,
16963  *pAllocation);
16964  }
16965 #endif
16966 
16967  if(res >= 0)
16968  {
16969  // 3. Bind image with memory.
16970  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16971  {
16972  res = allocator->BindImageMemory(*pAllocation, *pImage);
16973  }
16974  if(res >= 0)
16975  {
16976  // All steps succeeded.
16977  #if VMA_STATS_STRING_ENABLED
16978  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16979  #endif
16980  if(pAllocationInfo != VMA_NULL)
16981  {
16982  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16983  }
16984 
16985  return VK_SUCCESS;
16986  }
16987  allocator->FreeMemory(
16988  1, // allocationCount
16989  pAllocation);
16990  *pAllocation = VK_NULL_HANDLE;
16991  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16992  *pImage = VK_NULL_HANDLE;
16993  return res;
16994  }
16995  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16996  *pImage = VK_NULL_HANDLE;
16997  return res;
16998  }
16999  return res;
17000 }
17001 
17002 void vmaDestroyImage(
17003  VmaAllocator allocator,
17004  VkImage image,
17005  VmaAllocation allocation)
17006 {
17007  VMA_ASSERT(allocator);
17008 
17009  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17010  {
17011  return;
17012  }
17013 
17014  VMA_DEBUG_LOG("vmaDestroyImage");
17015 
17016  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17017 
17018 #if VMA_RECORDING_ENABLED
17019  if(allocator->GetRecorder() != VMA_NULL)
17020  {
17021  allocator->GetRecorder()->RecordDestroyImage(
17022  allocator->GetCurrentFrameIndex(),
17023  allocation);
17024  }
17025 #endif
17026 
17027  if(image != VK_NULL_HANDLE)
17028  {
17029  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17030  }
17031  if(allocation != VK_NULL_HANDLE)
17032  {
17033  allocator->FreeMemory(
17034  1, // allocationCount
17035  &allocation);
17036  }
17037 }
17038 
17039 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1756
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2056
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1814
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2867
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1788
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2387
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1768
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2018
Definition: vk_mem_alloc.h:2122
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2820
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1760
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2487
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1811
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2903
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2276
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1655
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2368
Definition: vk_mem_alloc.h:2093
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2823
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1749
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2175
Definition: vk_mem_alloc.h:2045
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1823
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2304
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1877
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1808
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2049
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1949
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1765
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2857
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1948
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2907
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1840
VmaStatInfo total
Definition: vk_mem_alloc.h:1958
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2915
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2159
Definition: vk_mem_alloc.h:2117
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2898
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1766
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1691
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1817
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2318
Definition: vk_mem_alloc.h:2312
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1772
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1884
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2497
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1761
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1786
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2196
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2338
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2374
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1747
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2321
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2872
VmaMemoryUsage
Definition: vk_mem_alloc.h:1996
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2832
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2893
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2911
Definition: vk_mem_alloc.h:2035
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2183
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1764
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1954
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1697
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2811
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2809
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2838
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1718
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1790
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1723
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2913
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2170
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2384
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1757
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1937
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2333
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1710
Definition: vk_mem_alloc.h:2308
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2100
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1950
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1714
Definition: vk_mem_alloc.h:2133
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2324
Definition: vk_mem_alloc.h:2044
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1763
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2165
Definition: vk_mem_alloc.h:2156
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1940
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1759
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2346
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1826
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2377
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2154
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2862
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2189
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1865
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1956
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2080
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1949
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1770
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1796
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2808
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2886
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1712
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1769
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2360
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1762
Definition: vk_mem_alloc.h:2111
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1804
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2511
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1820
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1949
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1946
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2365
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2817
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2126
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2492
Definition: vk_mem_alloc.h:2140
Definition: vk_mem_alloc.h:2152
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2909
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1755
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1944
Definition: vk_mem_alloc.h:2001
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2314
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1793
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1942
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1767
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1771
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2067
Definition: vk_mem_alloc.h:2147
Definition: vk_mem_alloc.h:2028
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2506
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1745
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1758
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2293
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2473
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2137
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2258
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1950
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1780
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1957
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2371
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1950
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2877
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2478
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2841