Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1647 /*
1648 Define this macro to 0/1 to disable/enable support for recording functionality,
1649 available through VmaAllocatorCreateInfo::pRecordSettings.
1650 */
1651 #ifndef VMA_RECORDING_ENABLED
1652  #ifdef _WIN32
1653  #define VMA_RECORDING_ENABLED 1
1654  #else
1655  #define VMA_RECORDING_ENABLED 0
1656  #endif
1657 #endif
1658 
1659 #ifndef NOMINMAX
1660  #define NOMINMAX // For windows.h
1661 #endif
1662 
1663 #ifndef VULKAN_H_
1664  #include <vulkan/vulkan.h>
1665 #endif
1666 
1667 #if VMA_RECORDING_ENABLED
1668  #include <windows.h>
1669 #endif
1670 
1671 #if !defined(VMA_DEDICATED_ALLOCATION)
1672  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1673  #define VMA_DEDICATED_ALLOCATION 1
1674  #else
1675  #define VMA_DEDICATED_ALLOCATION 0
1676  #endif
1677 #endif
1678 
1688 VK_DEFINE_HANDLE(VmaAllocator)
1689 
1690 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1692  VmaAllocator allocator,
1693  uint32_t memoryType,
1694  VkDeviceMemory memory,
1695  VkDeviceSize size);
1697 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1698  VmaAllocator allocator,
1699  uint32_t memoryType,
1700  VkDeviceMemory memory,
1701  VkDeviceSize size);
1702 
1716 
1746 
1749 typedef VkFlags VmaAllocatorCreateFlags;
1750 
1755 typedef struct VmaVulkanFunctions {
1756  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1757  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1758  PFN_vkAllocateMemory vkAllocateMemory;
1759  PFN_vkFreeMemory vkFreeMemory;
1760  PFN_vkMapMemory vkMapMemory;
1761  PFN_vkUnmapMemory vkUnmapMemory;
1762  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1763  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1764  PFN_vkBindBufferMemory vkBindBufferMemory;
1765  PFN_vkBindImageMemory vkBindImageMemory;
1766  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1767  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1768  PFN_vkCreateBuffer vkCreateBuffer;
1769  PFN_vkDestroyBuffer vkDestroyBuffer;
1770  PFN_vkCreateImage vkCreateImage;
1771  PFN_vkDestroyImage vkDestroyImage;
1772  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1773 #if VMA_DEDICATED_ALLOCATION
1774  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1775  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1776 #endif
1778 
1780 typedef enum VmaRecordFlagBits {
1787 
1790 typedef VkFlags VmaRecordFlags;
1791 
1793 typedef struct VmaRecordSettings
1794 {
1804  const char* pFilePath;
1806 
1809 {
1813 
1814  VkPhysicalDevice physicalDevice;
1816 
1817  VkDevice device;
1819 
1822 
1823  const VkAllocationCallbacks* pAllocationCallbacks;
1825 
1865  const VkDeviceSize* pHeapSizeLimit;
1886 
1888 VkResult vmaCreateAllocator(
1889  const VmaAllocatorCreateInfo* pCreateInfo,
1890  VmaAllocator* pAllocator);
1891 
1893 void vmaDestroyAllocator(
1894  VmaAllocator allocator);
1895 
1901  VmaAllocator allocator,
1902  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1903 
1909  VmaAllocator allocator,
1910  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1911 
1919  VmaAllocator allocator,
1920  uint32_t memoryTypeIndex,
1921  VkMemoryPropertyFlags* pFlags);
1922 
1932  VmaAllocator allocator,
1933  uint32_t frameIndex);
1934 
1937 typedef struct VmaStatInfo
1938 {
1940  uint32_t blockCount;
1946  VkDeviceSize usedBytes;
1948  VkDeviceSize unusedBytes;
1951 } VmaStatInfo;
1952 
1954 typedef struct VmaStats
1955 {
1956  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1957  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1959 } VmaStats;
1960 
1962 void vmaCalculateStats(
1963  VmaAllocator allocator,
1964  VmaStats* pStats);
1965 
1966 #ifndef VMA_STATS_STRING_ENABLED
1967 #define VMA_STATS_STRING_ENABLED 1
1968 #endif
1969 
1970 #if VMA_STATS_STRING_ENABLED
1971 
1973 
1975 void vmaBuildStatsString(
1976  VmaAllocator allocator,
1977  char** ppStatsString,
1978  VkBool32 detailedMap);
1979 
1980 void vmaFreeStatsString(
1981  VmaAllocator allocator,
1982  char* pStatsString);
1983 
1984 #endif // #if VMA_STATS_STRING_ENABLED
1985 
1994 VK_DEFINE_HANDLE(VmaPool)
1995 
1996 typedef enum VmaMemoryUsage
1997 {
2046 } VmaMemoryUsage;
2047 
2057 
2118 
2134 
2144 
2151 
2155 
2157 {
2170  VkMemoryPropertyFlags requiredFlags;
2175  VkMemoryPropertyFlags preferredFlags;
2183  uint32_t memoryTypeBits;
2196  void* pUserData;
2198 
2215 VkResult vmaFindMemoryTypeIndex(
2216  VmaAllocator allocator,
2217  uint32_t memoryTypeBits,
2218  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2219  uint32_t* pMemoryTypeIndex);
2220 
2234  VmaAllocator allocator,
2235  const VkBufferCreateInfo* pBufferCreateInfo,
2236  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2237  uint32_t* pMemoryTypeIndex);
2238 
2252  VmaAllocator allocator,
2253  const VkImageCreateInfo* pImageCreateInfo,
2254  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2255  uint32_t* pMemoryTypeIndex);
2256 
2277 
2294 
2305 
2311 
2314 typedef VkFlags VmaPoolCreateFlags;
2315 
2318 typedef struct VmaPoolCreateInfo {
2333  VkDeviceSize blockSize;
2362 
2365 typedef struct VmaPoolStats {
2368  VkDeviceSize size;
2371  VkDeviceSize unusedSize;
2384  VkDeviceSize unusedRangeSizeMax;
2387  size_t blockCount;
2388 } VmaPoolStats;
2389 
2396 VkResult vmaCreatePool(
2397  VmaAllocator allocator,
2398  const VmaPoolCreateInfo* pCreateInfo,
2399  VmaPool* pPool);
2400 
2403 void vmaDestroyPool(
2404  VmaAllocator allocator,
2405  VmaPool pool);
2406 
2413 void vmaGetPoolStats(
2414  VmaAllocator allocator,
2415  VmaPool pool,
2416  VmaPoolStats* pPoolStats);
2417 
2425  VmaAllocator allocator,
2426  VmaPool pool,
2427  size_t* pLostAllocationCount);
2428 
2443 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2444 
2469 VK_DEFINE_HANDLE(VmaAllocation)
2470 
2471 
2473 typedef struct VmaAllocationInfo {
2478  uint32_t memoryType;
2487  VkDeviceMemory deviceMemory;
2492  VkDeviceSize offset;
2497  VkDeviceSize size;
2511  void* pUserData;
2513 
2524 VkResult vmaAllocateMemory(
2525  VmaAllocator allocator,
2526  const VkMemoryRequirements* pVkMemoryRequirements,
2527  const VmaAllocationCreateInfo* pCreateInfo,
2528  VmaAllocation* pAllocation,
2529  VmaAllocationInfo* pAllocationInfo);
2530 
2550 VkResult vmaAllocateMemoryPages(
2551  VmaAllocator allocator,
2552  const VkMemoryRequirements* pVkMemoryRequirements,
2553  const VmaAllocationCreateInfo* pCreateInfo,
2554  size_t allocationCount,
2555  VmaAllocation* pAllocations,
2556  VmaAllocationInfo* pAllocationInfo);
2557 
2565  VmaAllocator allocator,
2566  VkBuffer buffer,
2567  const VmaAllocationCreateInfo* pCreateInfo,
2568  VmaAllocation* pAllocation,
2569  VmaAllocationInfo* pAllocationInfo);
2570 
2572 VkResult vmaAllocateMemoryForImage(
2573  VmaAllocator allocator,
2574  VkImage image,
2575  const VmaAllocationCreateInfo* pCreateInfo,
2576  VmaAllocation* pAllocation,
2577  VmaAllocationInfo* pAllocationInfo);
2578 
2583 void vmaFreeMemory(
2584  VmaAllocator allocator,
2585  VmaAllocation allocation);
2586 
2597 void vmaFreeMemoryPages(
2598  VmaAllocator allocator,
2599  size_t allocationCount,
2600  VmaAllocation* pAllocations);
2601 
2622 VkResult vmaResizeAllocation(
2623  VmaAllocator allocator,
2624  VmaAllocation allocation,
2625  VkDeviceSize newSize);
2626 
2644  VmaAllocator allocator,
2645  VmaAllocation allocation,
2646  VmaAllocationInfo* pAllocationInfo);
2647 
2662 VkBool32 vmaTouchAllocation(
2663  VmaAllocator allocator,
2664  VmaAllocation allocation);
2665 
2680  VmaAllocator allocator,
2681  VmaAllocation allocation,
2682  void* pUserData);
2683 
2695  VmaAllocator allocator,
2696  VmaAllocation* pAllocation);
2697 
2732 VkResult vmaMapMemory(
2733  VmaAllocator allocator,
2734  VmaAllocation allocation,
2735  void** ppData);
2736 
2741 void vmaUnmapMemory(
2742  VmaAllocator allocator,
2743  VmaAllocation allocation);
2744 
2761 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2762 
2779 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2780 
2797 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2798 
2805 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2806 
2807 typedef enum VmaDefragmentationFlagBits {
2811 typedef VkFlags VmaDefragmentationFlags;
2812 
2817 typedef struct VmaDefragmentationInfo2 {
2841  uint32_t poolCount;
2862  VkDeviceSize maxCpuBytesToMove;
2872  VkDeviceSize maxGpuBytesToMove;
2886  VkCommandBuffer commandBuffer;
2888 
2893 typedef struct VmaDefragmentationInfo {
2898  VkDeviceSize maxBytesToMove;
2905 
2907 typedef struct VmaDefragmentationStats {
2909  VkDeviceSize bytesMoved;
2911  VkDeviceSize bytesFreed;
2917 
2947 VkResult vmaDefragmentationBegin(
2948  VmaAllocator allocator,
2949  const VmaDefragmentationInfo2* pInfo,
2950  VmaDefragmentationStats* pStats,
2951  VmaDefragmentationContext *pContext);
2952 
2958 VkResult vmaDefragmentationEnd(
2959  VmaAllocator allocator,
2960  VmaDefragmentationContext context);
2961 
3002 VkResult vmaDefragment(
3003  VmaAllocator allocator,
3004  VmaAllocation* pAllocations,
3005  size_t allocationCount,
3006  VkBool32* pAllocationsChanged,
3007  const VmaDefragmentationInfo *pDefragmentationInfo,
3008  VmaDefragmentationStats* pDefragmentationStats);
3009 
3022 VkResult vmaBindBufferMemory(
3023  VmaAllocator allocator,
3024  VmaAllocation allocation,
3025  VkBuffer buffer);
3026 
3039 VkResult vmaBindImageMemory(
3040  VmaAllocator allocator,
3041  VmaAllocation allocation,
3042  VkImage image);
3043 
3070 VkResult vmaCreateBuffer(
3071  VmaAllocator allocator,
3072  const VkBufferCreateInfo* pBufferCreateInfo,
3073  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3074  VkBuffer* pBuffer,
3075  VmaAllocation* pAllocation,
3076  VmaAllocationInfo* pAllocationInfo);
3077 
3089 void vmaDestroyBuffer(
3090  VmaAllocator allocator,
3091  VkBuffer buffer,
3092  VmaAllocation allocation);
3093 
3095 VkResult vmaCreateImage(
3096  VmaAllocator allocator,
3097  const VkImageCreateInfo* pImageCreateInfo,
3098  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3099  VkImage* pImage,
3100  VmaAllocation* pAllocation,
3101  VmaAllocationInfo* pAllocationInfo);
3102 
3114 void vmaDestroyImage(
3115  VmaAllocator allocator,
3116  VkImage image,
3117  VmaAllocation allocation);
3118 
3119 #ifdef __cplusplus
3120 }
3121 #endif
3122 
3123 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3124 
3125 // For Visual Studio IntelliSense.
3126 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3127 #define VMA_IMPLEMENTATION
3128 #endif
3129 
3130 #ifdef VMA_IMPLEMENTATION
3131 #undef VMA_IMPLEMENTATION
3132 
3133 #include <cstdint>
3134 #include <cstdlib>
3135 #include <cstring>
3136 
3137 /*******************************************************************************
3138 CONFIGURATION SECTION
3139 
3140 Define some of these macros before each #include of this header or change them
3141 here if you need other then default behavior depending on your environment.
3142 */
3143 
3144 /*
3145 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3146 internally, like:
3147 
3148  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3149 
3150 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3151 VmaAllocatorCreateInfo::pVulkanFunctions.
3152 */
3153 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3154 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3155 #endif
3156 
3157 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3158 //#define VMA_USE_STL_CONTAINERS 1
3159 
3160 /* Set this macro to 1 to make the library including and using STL containers:
3161 std::pair, std::vector, std::list, std::unordered_map.
3162 
3163 Set it to 0 or undefined to make the library using its own implementation of
3164 the containers.
3165 */
3166 #if VMA_USE_STL_CONTAINERS
3167  #define VMA_USE_STL_VECTOR 1
3168  #define VMA_USE_STL_UNORDERED_MAP 1
3169  #define VMA_USE_STL_LIST 1
3170 #endif
3171 
3172 #ifndef VMA_USE_STL_SHARED_MUTEX
3173  // Compiler conforms to C++17.
3174  #if __cplusplus >= 201703L
3175  #define VMA_USE_STL_SHARED_MUTEX 1
3176  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3177  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3178  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3179  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3180  #define VMA_USE_STL_SHARED_MUTEX 1
3181  #else
3182  #define VMA_USE_STL_SHARED_MUTEX 0
3183  #endif
3184 #endif
3185 
3186 /*
3187 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3188 Library has its own container implementation.
3189 */
3190 #if VMA_USE_STL_VECTOR
3191  #include <vector>
3192 #endif
3193 
3194 #if VMA_USE_STL_UNORDERED_MAP
3195  #include <unordered_map>
3196 #endif
3197 
3198 #if VMA_USE_STL_LIST
3199  #include <list>
3200 #endif
3201 
3202 /*
3203 Following headers are used in this CONFIGURATION section only, so feel free to
3204 remove them if not needed.
3205 */
3206 #include <cassert> // for assert
3207 #include <algorithm> // for min, max
3208 #include <mutex>
3209 #include <atomic> // for std::atomic
3210 
3211 #ifndef VMA_NULL
3212  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3213  #define VMA_NULL nullptr
3214 #endif
3215 
3216 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3217 #include <cstdlib>
3218 void *aligned_alloc(size_t alignment, size_t size)
3219 {
3220  // alignment must be >= sizeof(void*)
3221  if(alignment < sizeof(void*))
3222  {
3223  alignment = sizeof(void*);
3224  }
3225 
3226  return memalign(alignment, size);
3227 }
3228 #elif defined(__APPLE__) || defined(__ANDROID__)
3229 #include <cstdlib>
3230 void *aligned_alloc(size_t alignment, size_t size)
3231 {
3232  // alignment must be >= sizeof(void*)
3233  if(alignment < sizeof(void*))
3234  {
3235  alignment = sizeof(void*);
3236  }
3237 
3238  void *pointer;
3239  if(posix_memalign(&pointer, alignment, size) == 0)
3240  return pointer;
3241  return VMA_NULL;
3242 }
3243 #endif
3244 
3245 // If your compiler is not compatible with C++11 and definition of
3246 // aligned_alloc() function is missing, uncommeting following line may help:
3247 
3248 //#include <malloc.h>
3249 
3250 // Normal assert to check for programmer's errors, especially in Debug configuration.
3251 #ifndef VMA_ASSERT
3252  #ifdef _DEBUG
3253  #define VMA_ASSERT(expr) assert(expr)
3254  #else
3255  #define VMA_ASSERT(expr)
3256  #endif
3257 #endif
3258 
3259 // Assert that will be called very often, like inside data structures e.g. operator[].
3260 // Making it non-empty can make program slow.
3261 #ifndef VMA_HEAVY_ASSERT
3262  #ifdef _DEBUG
3263  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3264  #else
3265  #define VMA_HEAVY_ASSERT(expr)
3266  #endif
3267 #endif
3268 
3269 #ifndef VMA_ALIGN_OF
3270  #define VMA_ALIGN_OF(type) (__alignof(type))
3271 #endif
3272 
3273 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3274  #if defined(_WIN32)
3275  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3276  #else
3277  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3278  #endif
3279 #endif
3280 
3281 #ifndef VMA_SYSTEM_FREE
3282  #if defined(_WIN32)
3283  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3284  #else
3285  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3286  #endif
3287 #endif
3288 
3289 #ifndef VMA_MIN
3290  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3291 #endif
3292 
3293 #ifndef VMA_MAX
3294  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3295 #endif
3296 
3297 #ifndef VMA_SWAP
3298  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3299 #endif
3300 
3301 #ifndef VMA_SORT
3302  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3303 #endif
3304 
3305 #ifndef VMA_DEBUG_LOG
3306  #define VMA_DEBUG_LOG(format, ...)
3307  /*
3308  #define VMA_DEBUG_LOG(format, ...) do { \
3309  printf(format, __VA_ARGS__); \
3310  printf("\n"); \
3311  } while(false)
3312  */
3313 #endif
3314 
3315 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3316 #if VMA_STATS_STRING_ENABLED
3317  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3318  {
3319  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3320  }
3321  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3322  {
3323  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3324  }
3325  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3326  {
3327  snprintf(outStr, strLen, "%p", ptr);
3328  }
3329 #endif
3330 
3331 #ifndef VMA_MUTEX
3332  class VmaMutex
3333  {
3334  public:
3335  void Lock() { m_Mutex.lock(); }
3336  void Unlock() { m_Mutex.unlock(); }
3337  private:
3338  std::mutex m_Mutex;
3339  };
3340  #define VMA_MUTEX VmaMutex
3341 #endif
3342 
3343 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3344 #ifndef VMA_RW_MUTEX
3345  #if VMA_USE_STL_SHARED_MUTEX
3346  // Use std::shared_mutex from C++17.
3347  #include <shared_mutex>
3348  class VmaRWMutex
3349  {
3350  public:
3351  void LockRead() { m_Mutex.lock_shared(); }
3352  void UnlockRead() { m_Mutex.unlock_shared(); }
3353  void LockWrite() { m_Mutex.lock(); }
3354  void UnlockWrite() { m_Mutex.unlock(); }
3355  private:
3356  std::shared_mutex m_Mutex;
3357  };
3358  #define VMA_RW_MUTEX VmaRWMutex
3359  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3360  // Use SRWLOCK from WinAPI.
3361  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3362  class VmaRWMutex
3363  {
3364  public:
3365  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3366  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3367  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3368  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3369  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3370  private:
3371  SRWLOCK m_Lock;
3372  };
3373  #define VMA_RW_MUTEX VmaRWMutex
3374  #else
3375  // Less efficient fallback: Use normal mutex.
3376  class VmaRWMutex
3377  {
3378  public:
3379  void LockRead() { m_Mutex.Lock(); }
3380  void UnlockRead() { m_Mutex.Unlock(); }
3381  void LockWrite() { m_Mutex.Lock(); }
3382  void UnlockWrite() { m_Mutex.Unlock(); }
3383  private:
3384  VMA_MUTEX m_Mutex;
3385  };
3386  #define VMA_RW_MUTEX VmaRWMutex
3387  #endif // #if VMA_USE_STL_SHARED_MUTEX
3388 #endif // #ifndef VMA_RW_MUTEX
3389 
3390 /*
3391 If providing your own implementation, you need to implement a subset of std::atomic:
3392 
3393 - Constructor(uint32_t desired)
3394 - uint32_t load() const
3395 - void store(uint32_t desired)
3396 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3397 */
3398 #ifndef VMA_ATOMIC_UINT32
3399  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3400 #endif
3401 
3402 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3403 
3407  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3408 #endif
3409 
3410 #ifndef VMA_DEBUG_ALIGNMENT
3411 
3415  #define VMA_DEBUG_ALIGNMENT (1)
3416 #endif
3417 
3418 #ifndef VMA_DEBUG_MARGIN
3419 
3423  #define VMA_DEBUG_MARGIN (0)
3424 #endif
3425 
3426 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3427 
3431  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3432 #endif
3433 
3434 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3435 
3440  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3441 #endif
3442 
3443 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3444 
3448  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3449 #endif
3450 
3451 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3452 
3456  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3457 #endif
3458 
3459 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3460  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3462 #endif
3463 
3464 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3465  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3467 #endif
3468 
3469 #ifndef VMA_CLASS_NO_COPY
3470  #define VMA_CLASS_NO_COPY(className) \
3471  private: \
3472  className(const className&) = delete; \
3473  className& operator=(const className&) = delete;
3474 #endif
3475 
3476 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3477 
3478 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3479 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3480 
3481 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3482 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3483 
3484 /*******************************************************************************
3485 END OF CONFIGURATION
3486 */
3487 
3488 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3489 
3490 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3491  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3492 
3493 // Returns number of bits set to 1 in (v).
3494 static inline uint32_t VmaCountBitsSet(uint32_t v)
3495 {
3496  uint32_t c = v - ((v >> 1) & 0x55555555);
3497  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3498  c = ((c >> 4) + c) & 0x0F0F0F0F;
3499  c = ((c >> 8) + c) & 0x00FF00FF;
3500  c = ((c >> 16) + c) & 0x0000FFFF;
3501  return c;
3502 }
3503 
3504 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3505 // Use types like uint32_t, uint64_t as T.
3506 template <typename T>
3507 static inline T VmaAlignUp(T val, T align)
3508 {
3509  return (val + align - 1) / align * align;
3510 }
3511 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3512 // Use types like uint32_t, uint64_t as T.
3513 template <typename T>
3514 static inline T VmaAlignDown(T val, T align)
3515 {
3516  return val / align * align;
3517 }
3518 
3519 // Division with mathematical rounding to nearest number.
3520 template <typename T>
3521 static inline T VmaRoundDiv(T x, T y)
3522 {
3523  return (x + (y / (T)2)) / y;
3524 }
3525 
3526 /*
3527 Returns true if given number is a power of two.
3528 T must be unsigned integer number or signed integer but always nonnegative.
3529 For 0 returns true.
3530 */
3531 template <typename T>
3532 inline bool VmaIsPow2(T x)
3533 {
3534  return (x & (x-1)) == 0;
3535 }
3536 
3537 // Returns smallest power of 2 greater or equal to v.
3538 static inline uint32_t VmaNextPow2(uint32_t v)
3539 {
3540  v--;
3541  v |= v >> 1;
3542  v |= v >> 2;
3543  v |= v >> 4;
3544  v |= v >> 8;
3545  v |= v >> 16;
3546  v++;
3547  return v;
3548 }
3549 static inline uint64_t VmaNextPow2(uint64_t v)
3550 {
3551  v--;
3552  v |= v >> 1;
3553  v |= v >> 2;
3554  v |= v >> 4;
3555  v |= v >> 8;
3556  v |= v >> 16;
3557  v |= v >> 32;
3558  v++;
3559  return v;
3560 }
3561 
3562 // Returns largest power of 2 less or equal to v.
3563 static inline uint32_t VmaPrevPow2(uint32_t v)
3564 {
3565  v |= v >> 1;
3566  v |= v >> 2;
3567  v |= v >> 4;
3568  v |= v >> 8;
3569  v |= v >> 16;
3570  v = v ^ (v >> 1);
3571  return v;
3572 }
3573 static inline uint64_t VmaPrevPow2(uint64_t v)
3574 {
3575  v |= v >> 1;
3576  v |= v >> 2;
3577  v |= v >> 4;
3578  v |= v >> 8;
3579  v |= v >> 16;
3580  v |= v >> 32;
3581  v = v ^ (v >> 1);
3582  return v;
3583 }
3584 
3585 static inline bool VmaStrIsEmpty(const char* pStr)
3586 {
3587  return pStr == VMA_NULL || *pStr == '\0';
3588 }
3589 
3590 #if VMA_STATS_STRING_ENABLED
3591 
3592 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3593 {
3594  switch(algorithm)
3595  {
3597  return "Linear";
3599  return "Buddy";
3600  case 0:
3601  return "Default";
3602  default:
3603  VMA_ASSERT(0);
3604  return "";
3605  }
3606 }
3607 
3608 #endif // #if VMA_STATS_STRING_ENABLED
3609 
3610 #ifndef VMA_SORT
3611 
3612 template<typename Iterator, typename Compare>
3613 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3614 {
3615  Iterator centerValue = end; --centerValue;
3616  Iterator insertIndex = beg;
3617  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3618  {
3619  if(cmp(*memTypeIndex, *centerValue))
3620  {
3621  if(insertIndex != memTypeIndex)
3622  {
3623  VMA_SWAP(*memTypeIndex, *insertIndex);
3624  }
3625  ++insertIndex;
3626  }
3627  }
3628  if(insertIndex != centerValue)
3629  {
3630  VMA_SWAP(*insertIndex, *centerValue);
3631  }
3632  return insertIndex;
3633 }
3634 
3635 template<typename Iterator, typename Compare>
3636 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3637 {
3638  if(beg < end)
3639  {
3640  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3641  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3642  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3643  }
3644 }
3645 
3646 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3647 
3648 #endif // #ifndef VMA_SORT
3649 
3650 /*
3651 Returns true if two memory blocks occupy overlapping pages.
3652 ResourceA must be in less memory offset than ResourceB.
3653 
3654 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3655 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3656 */
3657 static inline bool VmaBlocksOnSamePage(
3658  VkDeviceSize resourceAOffset,
3659  VkDeviceSize resourceASize,
3660  VkDeviceSize resourceBOffset,
3661  VkDeviceSize pageSize)
3662 {
3663  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3664  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3665  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3666  VkDeviceSize resourceBStart = resourceBOffset;
3667  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3668  return resourceAEndPage == resourceBStartPage;
3669 }
3670 
3671 enum VmaSuballocationType
3672 {
3673  VMA_SUBALLOCATION_TYPE_FREE = 0,
3674  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3675  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3676  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3677  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3678  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3679  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3680 };
3681 
3682 /*
3683 Returns true if given suballocation types could conflict and must respect
3684 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3685 or linear image and another one is optimal image. If type is unknown, behave
3686 conservatively.
3687 */
3688 static inline bool VmaIsBufferImageGranularityConflict(
3689  VmaSuballocationType suballocType1,
3690  VmaSuballocationType suballocType2)
3691 {
3692  if(suballocType1 > suballocType2)
3693  {
3694  VMA_SWAP(suballocType1, suballocType2);
3695  }
3696 
3697  switch(suballocType1)
3698  {
3699  case VMA_SUBALLOCATION_TYPE_FREE:
3700  return false;
3701  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3702  return true;
3703  case VMA_SUBALLOCATION_TYPE_BUFFER:
3704  return
3705  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3706  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3707  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3708  return
3709  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3710  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3711  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3712  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3713  return
3714  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3715  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3716  return false;
3717  default:
3718  VMA_ASSERT(0);
3719  return true;
3720  }
3721 }
3722 
3723 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3724 {
3725  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3726  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3727  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3728  {
3729  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3730  }
3731 }
3732 
3733 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3734 {
3735  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3736  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3737  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3738  {
3739  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3740  {
3741  return false;
3742  }
3743  }
3744  return true;
3745 }
3746 
3747 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3748 struct VmaMutexLock
3749 {
3750  VMA_CLASS_NO_COPY(VmaMutexLock)
3751 public:
3752  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3753  m_pMutex(useMutex ? &mutex : VMA_NULL)
3754  { if(m_pMutex) { m_pMutex->Lock(); } }
3755  ~VmaMutexLock()
3756  { if(m_pMutex) { m_pMutex->Unlock(); } }
3757 private:
3758  VMA_MUTEX* m_pMutex;
3759 };
3760 
3761 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3762 struct VmaMutexLockRead
3763 {
3764  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3765 public:
3766  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3767  m_pMutex(useMutex ? &mutex : VMA_NULL)
3768  { if(m_pMutex) { m_pMutex->LockRead(); } }
3769  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3770 private:
3771  VMA_RW_MUTEX* m_pMutex;
3772 };
3773 
3774 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3775 struct VmaMutexLockWrite
3776 {
3777  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3778 public:
3779  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3780  m_pMutex(useMutex ? &mutex : VMA_NULL)
3781  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3782  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3783 private:
3784  VMA_RW_MUTEX* m_pMutex;
3785 };
3786 
3787 #if VMA_DEBUG_GLOBAL_MUTEX
3788  static VMA_MUTEX gDebugGlobalMutex;
3789  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3790 #else
3791  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3792 #endif
3793 
3794 // Minimum size of a free suballocation to register it in the free suballocation collection.
3795 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3796 
3797 /*
3798 Performs binary search and returns iterator to first element that is greater or
3799 equal to (key), according to comparison (cmp).
3800 
3801 Cmp should return true if first argument is less than second argument.
3802 
3803 Returned value is the found element, if present in the collection or place where
3804 new element with value (key) should be inserted.
3805 */
3806 template <typename CmpLess, typename IterT, typename KeyT>
3807 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3808 {
3809  size_t down = 0, up = (end - beg);
3810  while(down < up)
3811  {
3812  const size_t mid = (down + up) / 2;
3813  if(cmp(*(beg+mid), key))
3814  {
3815  down = mid + 1;
3816  }
3817  else
3818  {
3819  up = mid;
3820  }
3821  }
3822  return beg + down;
3823 }
3824 
3825 /*
3826 Returns true if all pointers in the array are not-null and unique.
3827 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3828 T must be pointer type, e.g. VmaAllocation, VmaPool.
3829 */
3830 template<typename T>
3831 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3832 {
3833  for(uint32_t i = 0; i < count; ++i)
3834  {
3835  const T iPtr = arr[i];
3836  if(iPtr == VMA_NULL)
3837  {
3838  return false;
3839  }
3840  for(uint32_t j = i + 1; j < count; ++j)
3841  {
3842  if(iPtr == arr[j])
3843  {
3844  return false;
3845  }
3846  }
3847  }
3848  return true;
3849 }
3850 
3852 // Memory allocation
3853 
3854 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3855 {
3856  if((pAllocationCallbacks != VMA_NULL) &&
3857  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3858  {
3859  return (*pAllocationCallbacks->pfnAllocation)(
3860  pAllocationCallbacks->pUserData,
3861  size,
3862  alignment,
3863  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3864  }
3865  else
3866  {
3867  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3868  }
3869 }
3870 
3871 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3872 {
3873  if((pAllocationCallbacks != VMA_NULL) &&
3874  (pAllocationCallbacks->pfnFree != VMA_NULL))
3875  {
3876  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3877  }
3878  else
3879  {
3880  VMA_SYSTEM_FREE(ptr);
3881  }
3882 }
3883 
3884 template<typename T>
3885 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3886 {
3887  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3888 }
3889 
3890 template<typename T>
3891 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3892 {
3893  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3894 }
3895 
3896 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3897 
3898 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3899 
3900 template<typename T>
3901 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3902 {
3903  ptr->~T();
3904  VmaFree(pAllocationCallbacks, ptr);
3905 }
3906 
3907 template<typename T>
3908 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3909 {
3910  if(ptr != VMA_NULL)
3911  {
3912  for(size_t i = count; i--; )
3913  {
3914  ptr[i].~T();
3915  }
3916  VmaFree(pAllocationCallbacks, ptr);
3917  }
3918 }
3919 
3920 // STL-compatible allocator.
3921 template<typename T>
3922 class VmaStlAllocator
3923 {
3924 public:
3925  const VkAllocationCallbacks* const m_pCallbacks;
3926  typedef T value_type;
3927 
3928  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3929  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3930 
3931  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3932  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3933 
3934  template<typename U>
3935  bool operator==(const VmaStlAllocator<U>& rhs) const
3936  {
3937  return m_pCallbacks == rhs.m_pCallbacks;
3938  }
3939  template<typename U>
3940  bool operator!=(const VmaStlAllocator<U>& rhs) const
3941  {
3942  return m_pCallbacks != rhs.m_pCallbacks;
3943  }
3944 
3945  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3946 };
3947 
3948 #if VMA_USE_STL_VECTOR
3949 
3950 #define VmaVector std::vector
3951 
3952 template<typename T, typename allocatorT>
3953 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3954 {
3955  vec.insert(vec.begin() + index, item);
3956 }
3957 
3958 template<typename T, typename allocatorT>
3959 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3960 {
3961  vec.erase(vec.begin() + index);
3962 }
3963 
3964 #else // #if VMA_USE_STL_VECTOR
3965 
3966 /* Class with interface compatible with subset of std::vector.
3967 T must be POD because constructors and destructors are not called and memcpy is
3968 used for these objects. */
3969 template<typename T, typename AllocatorT>
3970 class VmaVector
3971 {
3972 public:
3973  typedef T value_type;
3974 
3975  VmaVector(const AllocatorT& allocator) :
3976  m_Allocator(allocator),
3977  m_pArray(VMA_NULL),
3978  m_Count(0),
3979  m_Capacity(0)
3980  {
3981  }
3982 
3983  VmaVector(size_t count, const AllocatorT& allocator) :
3984  m_Allocator(allocator),
3985  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3986  m_Count(count),
3987  m_Capacity(count)
3988  {
3989  }
3990 
3991  VmaVector(const VmaVector<T, AllocatorT>& src) :
3992  m_Allocator(src.m_Allocator),
3993  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3994  m_Count(src.m_Count),
3995  m_Capacity(src.m_Count)
3996  {
3997  if(m_Count != 0)
3998  {
3999  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4000  }
4001  }
4002 
4003  ~VmaVector()
4004  {
4005  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4006  }
4007 
4008  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4009  {
4010  if(&rhs != this)
4011  {
4012  resize(rhs.m_Count);
4013  if(m_Count != 0)
4014  {
4015  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4016  }
4017  }
4018  return *this;
4019  }
4020 
4021  bool empty() const { return m_Count == 0; }
4022  size_t size() const { return m_Count; }
4023  T* data() { return m_pArray; }
4024  const T* data() const { return m_pArray; }
4025 
4026  T& operator[](size_t index)
4027  {
4028  VMA_HEAVY_ASSERT(index < m_Count);
4029  return m_pArray[index];
4030  }
4031  const T& operator[](size_t index) const
4032  {
4033  VMA_HEAVY_ASSERT(index < m_Count);
4034  return m_pArray[index];
4035  }
4036 
4037  T& front()
4038  {
4039  VMA_HEAVY_ASSERT(m_Count > 0);
4040  return m_pArray[0];
4041  }
4042  const T& front() const
4043  {
4044  VMA_HEAVY_ASSERT(m_Count > 0);
4045  return m_pArray[0];
4046  }
4047  T& back()
4048  {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  return m_pArray[m_Count - 1];
4051  }
4052  const T& back() const
4053  {
4054  VMA_HEAVY_ASSERT(m_Count > 0);
4055  return m_pArray[m_Count - 1];
4056  }
4057 
4058  void reserve(size_t newCapacity, bool freeMemory = false)
4059  {
4060  newCapacity = VMA_MAX(newCapacity, m_Count);
4061 
4062  if((newCapacity < m_Capacity) && !freeMemory)
4063  {
4064  newCapacity = m_Capacity;
4065  }
4066 
4067  if(newCapacity != m_Capacity)
4068  {
4069  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4070  if(m_Count != 0)
4071  {
4072  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4073  }
4074  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4075  m_Capacity = newCapacity;
4076  m_pArray = newArray;
4077  }
4078  }
4079 
4080  void resize(size_t newCount, bool freeMemory = false)
4081  {
4082  size_t newCapacity = m_Capacity;
4083  if(newCount > m_Capacity)
4084  {
4085  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4086  }
4087  else if(freeMemory)
4088  {
4089  newCapacity = newCount;
4090  }
4091 
4092  if(newCapacity != m_Capacity)
4093  {
4094  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4095  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4096  if(elementsToCopy != 0)
4097  {
4098  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4099  }
4100  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4101  m_Capacity = newCapacity;
4102  m_pArray = newArray;
4103  }
4104 
4105  m_Count = newCount;
4106  }
4107 
4108  void clear(bool freeMemory = false)
4109  {
4110  resize(0, freeMemory);
4111  }
4112 
4113  void insert(size_t index, const T& src)
4114  {
4115  VMA_HEAVY_ASSERT(index <= m_Count);
4116  const size_t oldCount = size();
4117  resize(oldCount + 1);
4118  if(index < oldCount)
4119  {
4120  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4121  }
4122  m_pArray[index] = src;
4123  }
4124 
4125  void remove(size_t index)
4126  {
4127  VMA_HEAVY_ASSERT(index < m_Count);
4128  const size_t oldCount = size();
4129  if(index < oldCount - 1)
4130  {
4131  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4132  }
4133  resize(oldCount - 1);
4134  }
4135 
4136  void push_back(const T& src)
4137  {
4138  const size_t newIndex = size();
4139  resize(newIndex + 1);
4140  m_pArray[newIndex] = src;
4141  }
4142 
4143  void pop_back()
4144  {
4145  VMA_HEAVY_ASSERT(m_Count > 0);
4146  resize(size() - 1);
4147  }
4148 
4149  void push_front(const T& src)
4150  {
4151  insert(0, src);
4152  }
4153 
4154  void pop_front()
4155  {
4156  VMA_HEAVY_ASSERT(m_Count > 0);
4157  remove(0);
4158  }
4159 
4160  typedef T* iterator;
4161 
4162  iterator begin() { return m_pArray; }
4163  iterator end() { return m_pArray + m_Count; }
4164 
4165 private:
4166  AllocatorT m_Allocator;
4167  T* m_pArray;
4168  size_t m_Count;
4169  size_t m_Capacity;
4170 };
4171 
4172 template<typename T, typename allocatorT>
4173 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4174 {
4175  vec.insert(index, item);
4176 }
4177 
4178 template<typename T, typename allocatorT>
4179 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4180 {
4181  vec.remove(index);
4182 }
4183 
4184 #endif // #if VMA_USE_STL_VECTOR
4185 
4186 template<typename CmpLess, typename VectorT>
4187 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4188 {
4189  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4190  vector.data(),
4191  vector.data() + vector.size(),
4192  value,
4193  CmpLess()) - vector.data();
4194  VmaVectorInsert(vector, indexToInsert, value);
4195  return indexToInsert;
4196 }
4197 
4198 template<typename CmpLess, typename VectorT>
4199 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4200 {
4201  CmpLess comparator;
4202  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4203  vector.begin(),
4204  vector.end(),
4205  value,
4206  comparator);
4207  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4208  {
4209  size_t indexToRemove = it - vector.begin();
4210  VmaVectorRemove(vector, indexToRemove);
4211  return true;
4212  }
4213  return false;
4214 }
4215 
4216 template<typename CmpLess, typename IterT, typename KeyT>
4217 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4218 {
4219  CmpLess comparator;
4220  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4221  beg, end, value, comparator);
4222  if(it == end ||
4223  (!comparator(*it, value) && !comparator(value, *it)))
4224  {
4225  return it;
4226  }
4227  return end;
4228 }
4229 
4231 // class VmaPoolAllocator
4232 
4233 /*
4234 Allocator for objects of type T using a list of arrays (pools) to speed up
4235 allocation. Number of elements that can be allocated is not bounded because
4236 allocator can create multiple blocks.
4237 */
4238 template<typename T>
4239 class VmaPoolAllocator
4240 {
4241  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4242 public:
4243  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4244  ~VmaPoolAllocator();
4245  void Clear();
4246  T* Alloc();
4247  void Free(T* ptr);
4248 
4249 private:
4250  union Item
4251  {
4252  uint32_t NextFreeIndex;
4253  T Value;
4254  };
4255 
4256  struct ItemBlock
4257  {
4258  Item* pItems;
4259  uint32_t Capacity;
4260  uint32_t FirstFreeIndex;
4261  };
4262 
4263  const VkAllocationCallbacks* m_pAllocationCallbacks;
4264  const uint32_t m_FirstBlockCapacity;
4265  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4266 
4267  ItemBlock& CreateNewBlock();
4268 };
4269 
4270 template<typename T>
4271 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4272  m_pAllocationCallbacks(pAllocationCallbacks),
4273  m_FirstBlockCapacity(firstBlockCapacity),
4274  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4275 {
4276  VMA_ASSERT(m_FirstBlockCapacity > 1);
4277 }
4278 
4279 template<typename T>
4280 VmaPoolAllocator<T>::~VmaPoolAllocator()
4281 {
4282  Clear();
4283 }
4284 
4285 template<typename T>
4286 void VmaPoolAllocator<T>::Clear()
4287 {
4288  for(size_t i = m_ItemBlocks.size(); i--; )
4289  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4290  m_ItemBlocks.clear();
4291 }
4292 
4293 template<typename T>
4294 T* VmaPoolAllocator<T>::Alloc()
4295 {
4296  for(size_t i = m_ItemBlocks.size(); i--; )
4297  {
4298  ItemBlock& block = m_ItemBlocks[i];
4299  // This block has some free items: Use first one.
4300  if(block.FirstFreeIndex != UINT32_MAX)
4301  {
4302  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4303  block.FirstFreeIndex = pItem->NextFreeIndex;
4304  return &pItem->Value;
4305  }
4306  }
4307 
4308  // No block has free item: Create new one and use it.
4309  ItemBlock& newBlock = CreateNewBlock();
4310  Item* const pItem = &newBlock.pItems[0];
4311  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4312  return &pItem->Value;
4313 }
4314 
4315 template<typename T>
4316 void VmaPoolAllocator<T>::Free(T* ptr)
4317 {
4318  // Search all memory blocks to find ptr.
4319  for(size_t i = m_ItemBlocks.size(); i--; )
4320  {
4321  ItemBlock& block = m_ItemBlocks[i];
4322 
4323  // Casting to union.
4324  Item* pItemPtr;
4325  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4326 
4327  // Check if pItemPtr is in address range of this block.
4328  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4329  {
4330  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4331  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4332  block.FirstFreeIndex = index;
4333  return;
4334  }
4335  }
4336  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4337 }
4338 
4339 template<typename T>
4340 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4341 {
4342  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4343  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4344 
4345  const ItemBlock newBlock = {
4346  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4347  newBlockCapacity,
4348  0 };
4349 
4350  m_ItemBlocks.push_back(newBlock);
4351 
4352  // Setup singly-linked list of all free items in this block.
4353  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4354  newBlock.pItems[i].NextFreeIndex = i + 1;
4355  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4356  return m_ItemBlocks.back();
4357 }
4358 
4360 // class VmaRawList, VmaList
4361 
4362 #if VMA_USE_STL_LIST
4363 
4364 #define VmaList std::list
4365 
4366 #else // #if VMA_USE_STL_LIST
4367 
4368 template<typename T>
4369 struct VmaListItem
4370 {
4371  VmaListItem* pPrev;
4372  VmaListItem* pNext;
4373  T Value;
4374 };
4375 
4376 // Doubly linked list.
4377 template<typename T>
4378 class VmaRawList
4379 {
4380  VMA_CLASS_NO_COPY(VmaRawList)
4381 public:
4382  typedef VmaListItem<T> ItemType;
4383 
4384  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4385  ~VmaRawList();
4386  void Clear();
4387 
4388  size_t GetCount() const { return m_Count; }
4389  bool IsEmpty() const { return m_Count == 0; }
4390 
4391  ItemType* Front() { return m_pFront; }
4392  const ItemType* Front() const { return m_pFront; }
4393  ItemType* Back() { return m_pBack; }
4394  const ItemType* Back() const { return m_pBack; }
4395 
4396  ItemType* PushBack();
4397  ItemType* PushFront();
4398  ItemType* PushBack(const T& value);
4399  ItemType* PushFront(const T& value);
4400  void PopBack();
4401  void PopFront();
4402 
4403  // Item can be null - it means PushBack.
4404  ItemType* InsertBefore(ItemType* pItem);
4405  // Item can be null - it means PushFront.
4406  ItemType* InsertAfter(ItemType* pItem);
4407 
4408  ItemType* InsertBefore(ItemType* pItem, const T& value);
4409  ItemType* InsertAfter(ItemType* pItem, const T& value);
4410 
4411  void Remove(ItemType* pItem);
4412 
4413 private:
4414  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4415  VmaPoolAllocator<ItemType> m_ItemAllocator;
4416  ItemType* m_pFront;
4417  ItemType* m_pBack;
4418  size_t m_Count;
4419 };
4420 
4421 template<typename T>
4422 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4423  m_pAllocationCallbacks(pAllocationCallbacks),
4424  m_ItemAllocator(pAllocationCallbacks, 128),
4425  m_pFront(VMA_NULL),
4426  m_pBack(VMA_NULL),
4427  m_Count(0)
4428 {
4429 }
4430 
4431 template<typename T>
4432 VmaRawList<T>::~VmaRawList()
4433 {
4434  // Intentionally not calling Clear, because that would be unnecessary
4435  // computations to return all items to m_ItemAllocator as free.
4436 }
4437 
4438 template<typename T>
4439 void VmaRawList<T>::Clear()
4440 {
4441  if(IsEmpty() == false)
4442  {
4443  ItemType* pItem = m_pBack;
4444  while(pItem != VMA_NULL)
4445  {
4446  ItemType* const pPrevItem = pItem->pPrev;
4447  m_ItemAllocator.Free(pItem);
4448  pItem = pPrevItem;
4449  }
4450  m_pFront = VMA_NULL;
4451  m_pBack = VMA_NULL;
4452  m_Count = 0;
4453  }
4454 }
4455 
4456 template<typename T>
4457 VmaListItem<T>* VmaRawList<T>::PushBack()
4458 {
4459  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4460  pNewItem->pNext = VMA_NULL;
4461  if(IsEmpty())
4462  {
4463  pNewItem->pPrev = VMA_NULL;
4464  m_pFront = pNewItem;
4465  m_pBack = pNewItem;
4466  m_Count = 1;
4467  }
4468  else
4469  {
4470  pNewItem->pPrev = m_pBack;
4471  m_pBack->pNext = pNewItem;
4472  m_pBack = pNewItem;
4473  ++m_Count;
4474  }
4475  return pNewItem;
4476 }
4477 
4478 template<typename T>
4479 VmaListItem<T>* VmaRawList<T>::PushFront()
4480 {
4481  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4482  pNewItem->pPrev = VMA_NULL;
4483  if(IsEmpty())
4484  {
4485  pNewItem->pNext = VMA_NULL;
4486  m_pFront = pNewItem;
4487  m_pBack = pNewItem;
4488  m_Count = 1;
4489  }
4490  else
4491  {
4492  pNewItem->pNext = m_pFront;
4493  m_pFront->pPrev = pNewItem;
4494  m_pFront = pNewItem;
4495  ++m_Count;
4496  }
4497  return pNewItem;
4498 }
4499 
4500 template<typename T>
4501 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4502 {
4503  ItemType* const pNewItem = PushBack();
4504  pNewItem->Value = value;
4505  return pNewItem;
4506 }
4507 
4508 template<typename T>
4509 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4510 {
4511  ItemType* const pNewItem = PushFront();
4512  pNewItem->Value = value;
4513  return pNewItem;
4514 }
4515 
4516 template<typename T>
4517 void VmaRawList<T>::PopBack()
4518 {
4519  VMA_HEAVY_ASSERT(m_Count > 0);
4520  ItemType* const pBackItem = m_pBack;
4521  ItemType* const pPrevItem = pBackItem->pPrev;
4522  if(pPrevItem != VMA_NULL)
4523  {
4524  pPrevItem->pNext = VMA_NULL;
4525  }
4526  m_pBack = pPrevItem;
4527  m_ItemAllocator.Free(pBackItem);
4528  --m_Count;
4529 }
4530 
4531 template<typename T>
4532 void VmaRawList<T>::PopFront()
4533 {
4534  VMA_HEAVY_ASSERT(m_Count > 0);
4535  ItemType* const pFrontItem = m_pFront;
4536  ItemType* const pNextItem = pFrontItem->pNext;
4537  if(pNextItem != VMA_NULL)
4538  {
4539  pNextItem->pPrev = VMA_NULL;
4540  }
4541  m_pFront = pNextItem;
4542  m_ItemAllocator.Free(pFrontItem);
4543  --m_Count;
4544 }
4545 
4546 template<typename T>
4547 void VmaRawList<T>::Remove(ItemType* pItem)
4548 {
4549  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4550  VMA_HEAVY_ASSERT(m_Count > 0);
4551 
4552  if(pItem->pPrev != VMA_NULL)
4553  {
4554  pItem->pPrev->pNext = pItem->pNext;
4555  }
4556  else
4557  {
4558  VMA_HEAVY_ASSERT(m_pFront == pItem);
4559  m_pFront = pItem->pNext;
4560  }
4561 
4562  if(pItem->pNext != VMA_NULL)
4563  {
4564  pItem->pNext->pPrev = pItem->pPrev;
4565  }
4566  else
4567  {
4568  VMA_HEAVY_ASSERT(m_pBack == pItem);
4569  m_pBack = pItem->pPrev;
4570  }
4571 
4572  m_ItemAllocator.Free(pItem);
4573  --m_Count;
4574 }
4575 
4576 template<typename T>
4577 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4578 {
4579  if(pItem != VMA_NULL)
4580  {
4581  ItemType* const prevItem = pItem->pPrev;
4582  ItemType* const newItem = m_ItemAllocator.Alloc();
4583  newItem->pPrev = prevItem;
4584  newItem->pNext = pItem;
4585  pItem->pPrev = newItem;
4586  if(prevItem != VMA_NULL)
4587  {
4588  prevItem->pNext = newItem;
4589  }
4590  else
4591  {
4592  VMA_HEAVY_ASSERT(m_pFront == pItem);
4593  m_pFront = newItem;
4594  }
4595  ++m_Count;
4596  return newItem;
4597  }
4598  else
4599  return PushBack();
4600 }
4601 
4602 template<typename T>
4603 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4604 {
4605  if(pItem != VMA_NULL)
4606  {
4607  ItemType* const nextItem = pItem->pNext;
4608  ItemType* const newItem = m_ItemAllocator.Alloc();
4609  newItem->pNext = nextItem;
4610  newItem->pPrev = pItem;
4611  pItem->pNext = newItem;
4612  if(nextItem != VMA_NULL)
4613  {
4614  nextItem->pPrev = newItem;
4615  }
4616  else
4617  {
4618  VMA_HEAVY_ASSERT(m_pBack == pItem);
4619  m_pBack = newItem;
4620  }
4621  ++m_Count;
4622  return newItem;
4623  }
4624  else
4625  return PushFront();
4626 }
4627 
4628 template<typename T>
4629 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4630 {
4631  ItemType* const newItem = InsertBefore(pItem);
4632  newItem->Value = value;
4633  return newItem;
4634 }
4635 
4636 template<typename T>
4637 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4638 {
4639  ItemType* const newItem = InsertAfter(pItem);
4640  newItem->Value = value;
4641  return newItem;
4642 }
4643 
4644 template<typename T, typename AllocatorT>
4645 class VmaList
4646 {
4647  VMA_CLASS_NO_COPY(VmaList)
4648 public:
4649  class iterator
4650  {
4651  public:
4652  iterator() :
4653  m_pList(VMA_NULL),
4654  m_pItem(VMA_NULL)
4655  {
4656  }
4657 
4658  T& operator*() const
4659  {
4660  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4661  return m_pItem->Value;
4662  }
4663  T* operator->() const
4664  {
4665  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4666  return &m_pItem->Value;
4667  }
4668 
4669  iterator& operator++()
4670  {
4671  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4672  m_pItem = m_pItem->pNext;
4673  return *this;
4674  }
4675  iterator& operator--()
4676  {
4677  if(m_pItem != VMA_NULL)
4678  {
4679  m_pItem = m_pItem->pPrev;
4680  }
4681  else
4682  {
4683  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4684  m_pItem = m_pList->Back();
4685  }
4686  return *this;
4687  }
4688 
4689  iterator operator++(int)
4690  {
4691  iterator result = *this;
4692  ++*this;
4693  return result;
4694  }
4695  iterator operator--(int)
4696  {
4697  iterator result = *this;
4698  --*this;
4699  return result;
4700  }
4701 
4702  bool operator==(const iterator& rhs) const
4703  {
4704  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4705  return m_pItem == rhs.m_pItem;
4706  }
4707  bool operator!=(const iterator& rhs) const
4708  {
4709  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4710  return m_pItem != rhs.m_pItem;
4711  }
4712 
4713  private:
4714  VmaRawList<T>* m_pList;
4715  VmaListItem<T>* m_pItem;
4716 
4717  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4718  m_pList(pList),
4719  m_pItem(pItem)
4720  {
4721  }
4722 
4723  friend class VmaList<T, AllocatorT>;
4724  };
4725 
4726  class const_iterator
4727  {
4728  public:
4729  const_iterator() :
4730  m_pList(VMA_NULL),
4731  m_pItem(VMA_NULL)
4732  {
4733  }
4734 
4735  const_iterator(const iterator& src) :
4736  m_pList(src.m_pList),
4737  m_pItem(src.m_pItem)
4738  {
4739  }
4740 
4741  const T& operator*() const
4742  {
4743  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4744  return m_pItem->Value;
4745  }
4746  const T* operator->() const
4747  {
4748  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4749  return &m_pItem->Value;
4750  }
4751 
4752  const_iterator& operator++()
4753  {
4754  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4755  m_pItem = m_pItem->pNext;
4756  return *this;
4757  }
4758  const_iterator& operator--()
4759  {
4760  if(m_pItem != VMA_NULL)
4761  {
4762  m_pItem = m_pItem->pPrev;
4763  }
4764  else
4765  {
4766  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4767  m_pItem = m_pList->Back();
4768  }
4769  return *this;
4770  }
4771 
4772  const_iterator operator++(int)
4773  {
4774  const_iterator result = *this;
4775  ++*this;
4776  return result;
4777  }
4778  const_iterator operator--(int)
4779  {
4780  const_iterator result = *this;
4781  --*this;
4782  return result;
4783  }
4784 
4785  bool operator==(const const_iterator& rhs) const
4786  {
4787  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4788  return m_pItem == rhs.m_pItem;
4789  }
4790  bool operator!=(const const_iterator& rhs) const
4791  {
4792  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4793  return m_pItem != rhs.m_pItem;
4794  }
4795 
4796  private:
4797  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4798  m_pList(pList),
4799  m_pItem(pItem)
4800  {
4801  }
4802 
4803  const VmaRawList<T>* m_pList;
4804  const VmaListItem<T>* m_pItem;
4805 
4806  friend class VmaList<T, AllocatorT>;
4807  };
4808 
4809  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4810 
4811  bool empty() const { return m_RawList.IsEmpty(); }
4812  size_t size() const { return m_RawList.GetCount(); }
4813 
4814  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4815  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4816 
4817  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4818  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4819 
4820  void clear() { m_RawList.Clear(); }
4821  void push_back(const T& value) { m_RawList.PushBack(value); }
4822  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4823  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4824 
4825 private:
4826  VmaRawList<T> m_RawList;
4827 };
4828 
4829 #endif // #if VMA_USE_STL_LIST
4830 
4832 // class VmaMap
4833 
4834 // Unused in this version.
4835 #if 0
4836 
4837 #if VMA_USE_STL_UNORDERED_MAP
4838 
4839 #define VmaPair std::pair
4840 
4841 #define VMA_MAP_TYPE(KeyT, ValueT) \
4842  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4843 
4844 #else // #if VMA_USE_STL_UNORDERED_MAP
4845 
4846 template<typename T1, typename T2>
4847 struct VmaPair
4848 {
4849  T1 first;
4850  T2 second;
4851 
4852  VmaPair() : first(), second() { }
4853  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4854 };
4855 
4856 /* Class compatible with subset of interface of std::unordered_map.
4857 KeyT, ValueT must be POD because they will be stored in VmaVector.
4858 */
4859 template<typename KeyT, typename ValueT>
4860 class VmaMap
4861 {
4862 public:
4863  typedef VmaPair<KeyT, ValueT> PairType;
4864  typedef PairType* iterator;
4865 
4866  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4867 
4868  iterator begin() { return m_Vector.begin(); }
4869  iterator end() { return m_Vector.end(); }
4870 
4871  void insert(const PairType& pair);
4872  iterator find(const KeyT& key);
4873  void erase(iterator it);
4874 
4875 private:
4876  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4877 };
4878 
4879 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4880 
4881 template<typename FirstT, typename SecondT>
4882 struct VmaPairFirstLess
4883 {
4884  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4885  {
4886  return lhs.first < rhs.first;
4887  }
4888  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4889  {
4890  return lhs.first < rhsFirst;
4891  }
4892 };
4893 
4894 template<typename KeyT, typename ValueT>
4895 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4896 {
4897  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4898  m_Vector.data(),
4899  m_Vector.data() + m_Vector.size(),
4900  pair,
4901  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4902  VmaVectorInsert(m_Vector, indexToInsert, pair);
4903 }
4904 
4905 template<typename KeyT, typename ValueT>
4906 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4907 {
4908  PairType* it = VmaBinaryFindFirstNotLess(
4909  m_Vector.data(),
4910  m_Vector.data() + m_Vector.size(),
4911  key,
4912  VmaPairFirstLess<KeyT, ValueT>());
4913  if((it != m_Vector.end()) && (it->first == key))
4914  {
4915  return it;
4916  }
4917  else
4918  {
4919  return m_Vector.end();
4920  }
4921 }
4922 
4923 template<typename KeyT, typename ValueT>
4924 void VmaMap<KeyT, ValueT>::erase(iterator it)
4925 {
4926  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4927 }
4928 
4929 #endif // #if VMA_USE_STL_UNORDERED_MAP
4930 
4931 #endif // #if 0
4932 
4934 
4935 class VmaDeviceMemoryBlock;
4936 
4937 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4938 
4939 struct VmaAllocation_T
4940 {
4941 private:
4942  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4943 
4944  enum FLAGS
4945  {
4946  FLAG_USER_DATA_STRING = 0x01,
4947  };
4948 
4949 public:
4950  enum ALLOCATION_TYPE
4951  {
4952  ALLOCATION_TYPE_NONE,
4953  ALLOCATION_TYPE_BLOCK,
4954  ALLOCATION_TYPE_DEDICATED,
4955  };
4956 
4957  /*
4958  This struct cannot have constructor or destructor. It must be POD because it is
4959  allocated using VmaPoolAllocator.
4960  */
4961 
4962  void Ctor(uint32_t currentFrameIndex, bool userDataString)
4963  {
4964  m_Alignment = 1;
4965  m_Size = 0;
4966  m_pUserData = VMA_NULL;
4967  m_LastUseFrameIndex = currentFrameIndex;
4968  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
4969  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
4970  m_MapCount = 0;
4971  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
4972 
4973 #if VMA_STATS_STRING_ENABLED
4974  m_CreationFrameIndex = currentFrameIndex;
4975  m_BufferImageUsage = 0;
4976 #endif
4977  }
4978 
4979  void Dtor()
4980  {
4981  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4982 
4983  // Check if owned string was freed.
4984  VMA_ASSERT(m_pUserData == VMA_NULL);
4985  }
4986 
4987  void InitBlockAllocation(
4988  VmaDeviceMemoryBlock* block,
4989  VkDeviceSize offset,
4990  VkDeviceSize alignment,
4991  VkDeviceSize size,
4992  VmaSuballocationType suballocationType,
4993  bool mapped,
4994  bool canBecomeLost)
4995  {
4996  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4997  VMA_ASSERT(block != VMA_NULL);
4998  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4999  m_Alignment = alignment;
5000  m_Size = size;
5001  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5002  m_SuballocationType = (uint8_t)suballocationType;
5003  m_BlockAllocation.m_Block = block;
5004  m_BlockAllocation.m_Offset = offset;
5005  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5006  }
5007 
5008  void InitLost()
5009  {
5010  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5011  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5012  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5013  m_BlockAllocation.m_Block = VMA_NULL;
5014  m_BlockAllocation.m_Offset = 0;
5015  m_BlockAllocation.m_CanBecomeLost = true;
5016  }
5017 
5018  void ChangeBlockAllocation(
5019  VmaAllocator hAllocator,
5020  VmaDeviceMemoryBlock* block,
5021  VkDeviceSize offset);
5022 
5023  void ChangeSize(VkDeviceSize newSize);
5024  void ChangeOffset(VkDeviceSize newOffset);
5025 
5026  // pMappedData not null means allocation is created with MAPPED flag.
5027  void InitDedicatedAllocation(
5028  uint32_t memoryTypeIndex,
5029  VkDeviceMemory hMemory,
5030  VmaSuballocationType suballocationType,
5031  void* pMappedData,
5032  VkDeviceSize size)
5033  {
5034  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5035  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5036  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5037  m_Alignment = 0;
5038  m_Size = size;
5039  m_SuballocationType = (uint8_t)suballocationType;
5040  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5041  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5042  m_DedicatedAllocation.m_hMemory = hMemory;
5043  m_DedicatedAllocation.m_pMappedData = pMappedData;
5044  }
5045 
5046  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5047  VkDeviceSize GetAlignment() const { return m_Alignment; }
5048  VkDeviceSize GetSize() const { return m_Size; }
5049  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5050  void* GetUserData() const { return m_pUserData; }
5051  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5052  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5053 
5054  VmaDeviceMemoryBlock* GetBlock() const
5055  {
5056  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5057  return m_BlockAllocation.m_Block;
5058  }
5059  VkDeviceSize GetOffset() const;
5060  VkDeviceMemory GetMemory() const;
5061  uint32_t GetMemoryTypeIndex() const;
5062  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5063  void* GetMappedData() const;
5064  bool CanBecomeLost() const;
5065 
5066  uint32_t GetLastUseFrameIndex() const
5067  {
5068  return m_LastUseFrameIndex.load();
5069  }
5070  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5071  {
5072  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5073  }
5074  /*
5075  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5076  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5077  - Else, returns false.
5078 
5079  If hAllocation is already lost, assert - you should not call it then.
5080  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5081  */
5082  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5083 
5084  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5085  {
5086  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5087  outInfo.blockCount = 1;
5088  outInfo.allocationCount = 1;
5089  outInfo.unusedRangeCount = 0;
5090  outInfo.usedBytes = m_Size;
5091  outInfo.unusedBytes = 0;
5092  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5093  outInfo.unusedRangeSizeMin = UINT64_MAX;
5094  outInfo.unusedRangeSizeMax = 0;
5095  }
5096 
5097  void BlockAllocMap();
5098  void BlockAllocUnmap();
5099  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5100  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5101 
5102 #if VMA_STATS_STRING_ENABLED
5103  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5104  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5105 
5106  void InitBufferImageUsage(uint32_t bufferImageUsage)
5107  {
5108  VMA_ASSERT(m_BufferImageUsage == 0);
5109  m_BufferImageUsage = bufferImageUsage;
5110  }
5111 
5112  void PrintParameters(class VmaJsonWriter& json) const;
5113 #endif
5114 
5115 private:
5116  VkDeviceSize m_Alignment;
5117  VkDeviceSize m_Size;
5118  void* m_pUserData;
5119  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5120  uint8_t m_Type; // ALLOCATION_TYPE
5121  uint8_t m_SuballocationType; // VmaSuballocationType
5122  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5123  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5124  uint8_t m_MapCount;
5125  uint8_t m_Flags; // enum FLAGS
5126 
5127  // Allocation out of VmaDeviceMemoryBlock.
5128  struct BlockAllocation
5129  {
5130  VmaDeviceMemoryBlock* m_Block;
5131  VkDeviceSize m_Offset;
5132  bool m_CanBecomeLost;
5133  };
5134 
5135  // Allocation for an object that has its own private VkDeviceMemory.
5136  struct DedicatedAllocation
5137  {
5138  uint32_t m_MemoryTypeIndex;
5139  VkDeviceMemory m_hMemory;
5140  void* m_pMappedData; // Not null means memory is mapped.
5141  };
5142 
5143  union
5144  {
5145  // Allocation out of VmaDeviceMemoryBlock.
5146  BlockAllocation m_BlockAllocation;
5147  // Allocation for an object that has its own private VkDeviceMemory.
5148  DedicatedAllocation m_DedicatedAllocation;
5149  };
5150 
5151 #if VMA_STATS_STRING_ENABLED
5152  uint32_t m_CreationFrameIndex;
5153  uint32_t m_BufferImageUsage; // 0 if unknown.
5154 #endif
5155 
5156  void FreeUserDataString(VmaAllocator hAllocator);
5157 };
5158 
5159 /*
5160 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5161 allocated memory block or free.
5162 */
5163 struct VmaSuballocation
5164 {
5165  VkDeviceSize offset;
5166  VkDeviceSize size;
5167  VmaAllocation hAllocation;
5168  VmaSuballocationType type;
5169 };
5170 
5171 // Comparator for offsets.
5172 struct VmaSuballocationOffsetLess
5173 {
5174  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5175  {
5176  return lhs.offset < rhs.offset;
5177  }
5178 };
5179 struct VmaSuballocationOffsetGreater
5180 {
5181  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5182  {
5183  return lhs.offset > rhs.offset;
5184  }
5185 };
5186 
5187 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5188 
5189 // Cost of one additional allocation lost, as equivalent in bytes.
5190 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5191 
5192 enum class VmaAllocationRequestType
5193 {
5194  Normal,
5195  // Used by "Linear" algorithm.
5196  UpperAddress,
5197  EndOf1st,
5198  EndOf2nd,
5199 };
5200 
5201 /*
5202 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5203 
5204 If canMakeOtherLost was false:
5205 - item points to a FREE suballocation.
5206 - itemsToMakeLostCount is 0.
5207 
5208 If canMakeOtherLost was true:
5209 - item points to first of sequence of suballocations, which are either FREE,
5210  or point to VmaAllocations that can become lost.
5211 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5212  the requested allocation to succeed.
5213 */
5214 struct VmaAllocationRequest
5215 {
5216  VkDeviceSize offset;
5217  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5218  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5219  VmaSuballocationList::iterator item;
5220  size_t itemsToMakeLostCount;
5221  void* customData;
5222  VmaAllocationRequestType type;
5223 
5224  VkDeviceSize CalcCost() const
5225  {
5226  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5227  }
5228 };
5229 
5230 /*
5231 Data structure used for bookkeeping of allocations and unused ranges of memory
5232 in a single VkDeviceMemory block.
5233 */
5234 class VmaBlockMetadata
5235 {
5236 public:
5237  VmaBlockMetadata(VmaAllocator hAllocator);
5238  virtual ~VmaBlockMetadata() { }
5239  virtual void Init(VkDeviceSize size) { m_Size = size; }
5240 
5241  // Validates all data structures inside this object. If not valid, returns false.
5242  virtual bool Validate() const = 0;
5243  VkDeviceSize GetSize() const { return m_Size; }
5244  virtual size_t GetAllocationCount() const = 0;
5245  virtual VkDeviceSize GetSumFreeSize() const = 0;
5246  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5247  // Returns true if this block is empty - contains only single free suballocation.
5248  virtual bool IsEmpty() const = 0;
5249 
5250  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5251  // Shouldn't modify blockCount.
5252  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5253 
5254 #if VMA_STATS_STRING_ENABLED
5255  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5256 #endif
5257 
5258  // Tries to find a place for suballocation with given parameters inside this block.
5259  // If succeeded, fills pAllocationRequest and returns true.
5260  // If failed, returns false.
5261  virtual bool CreateAllocationRequest(
5262  uint32_t currentFrameIndex,
5263  uint32_t frameInUseCount,
5264  VkDeviceSize bufferImageGranularity,
5265  VkDeviceSize allocSize,
5266  VkDeviceSize allocAlignment,
5267  bool upperAddress,
5268  VmaSuballocationType allocType,
5269  bool canMakeOtherLost,
5270  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5271  uint32_t strategy,
5272  VmaAllocationRequest* pAllocationRequest) = 0;
5273 
5274  virtual bool MakeRequestedAllocationsLost(
5275  uint32_t currentFrameIndex,
5276  uint32_t frameInUseCount,
5277  VmaAllocationRequest* pAllocationRequest) = 0;
5278 
5279  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5280 
5281  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5282 
5283  // Makes actual allocation based on request. Request must already be checked and valid.
5284  virtual void Alloc(
5285  const VmaAllocationRequest& request,
5286  VmaSuballocationType type,
5287  VkDeviceSize allocSize,
5288  VmaAllocation hAllocation) = 0;
5289 
5290  // Frees suballocation assigned to given memory region.
5291  virtual void Free(const VmaAllocation allocation) = 0;
5292  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5293 
5294  // Tries to resize (grow or shrink) space for given allocation, in place.
5295  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5296 
5297 protected:
5298  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5299 
5300 #if VMA_STATS_STRING_ENABLED
5301  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5302  VkDeviceSize unusedBytes,
5303  size_t allocationCount,
5304  size_t unusedRangeCount) const;
5305  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5306  VkDeviceSize offset,
5307  VmaAllocation hAllocation) const;
5308  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5309  VkDeviceSize offset,
5310  VkDeviceSize size) const;
5311  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5312 #endif
5313 
5314 private:
5315  VkDeviceSize m_Size;
5316  const VkAllocationCallbacks* m_pAllocationCallbacks;
5317 };
5318 
5319 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5320  VMA_ASSERT(0 && "Validation failed: " #cond); \
5321  return false; \
5322  } } while(false)
5323 
5324 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5325 {
5326  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5327 public:
5328  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5329  virtual ~VmaBlockMetadata_Generic();
5330  virtual void Init(VkDeviceSize size);
5331 
5332  virtual bool Validate() const;
5333  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5334  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5335  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5336  virtual bool IsEmpty() const;
5337 
5338  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5339  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5340 
5341 #if VMA_STATS_STRING_ENABLED
5342  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5343 #endif
5344 
5345  virtual bool CreateAllocationRequest(
5346  uint32_t currentFrameIndex,
5347  uint32_t frameInUseCount,
5348  VkDeviceSize bufferImageGranularity,
5349  VkDeviceSize allocSize,
5350  VkDeviceSize allocAlignment,
5351  bool upperAddress,
5352  VmaSuballocationType allocType,
5353  bool canMakeOtherLost,
5354  uint32_t strategy,
5355  VmaAllocationRequest* pAllocationRequest);
5356 
5357  virtual bool MakeRequestedAllocationsLost(
5358  uint32_t currentFrameIndex,
5359  uint32_t frameInUseCount,
5360  VmaAllocationRequest* pAllocationRequest);
5361 
5362  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5363 
5364  virtual VkResult CheckCorruption(const void* pBlockData);
5365 
5366  virtual void Alloc(
5367  const VmaAllocationRequest& request,
5368  VmaSuballocationType type,
5369  VkDeviceSize allocSize,
5370  VmaAllocation hAllocation);
5371 
5372  virtual void Free(const VmaAllocation allocation);
5373  virtual void FreeAtOffset(VkDeviceSize offset);
5374 
5375  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5376 
5378  // For defragmentation
5379 
5380  bool IsBufferImageGranularityConflictPossible(
5381  VkDeviceSize bufferImageGranularity,
5382  VmaSuballocationType& inOutPrevSuballocType) const;
5383 
5384 private:
5385  friend class VmaDefragmentationAlgorithm_Generic;
5386  friend class VmaDefragmentationAlgorithm_Fast;
5387 
5388  uint32_t m_FreeCount;
5389  VkDeviceSize m_SumFreeSize;
5390  VmaSuballocationList m_Suballocations;
5391  // Suballocations that are free and have size greater than certain threshold.
5392  // Sorted by size, ascending.
5393  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5394 
5395  bool ValidateFreeSuballocationList() const;
5396 
5397  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5398  // If yes, fills pOffset and returns true. If no, returns false.
5399  bool CheckAllocation(
5400  uint32_t currentFrameIndex,
5401  uint32_t frameInUseCount,
5402  VkDeviceSize bufferImageGranularity,
5403  VkDeviceSize allocSize,
5404  VkDeviceSize allocAlignment,
5405  VmaSuballocationType allocType,
5406  VmaSuballocationList::const_iterator suballocItem,
5407  bool canMakeOtherLost,
5408  VkDeviceSize* pOffset,
5409  size_t* itemsToMakeLostCount,
5410  VkDeviceSize* pSumFreeSize,
5411  VkDeviceSize* pSumItemSize) const;
5412  // Given free suballocation, it merges it with following one, which must also be free.
5413  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5414  // Releases given suballocation, making it free.
5415  // Merges it with adjacent free suballocations if applicable.
5416  // Returns iterator to new free suballocation at this place.
5417  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5418  // Given free suballocation, it inserts it into sorted list of
5419  // m_FreeSuballocationsBySize if it's suitable.
5420  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5421  // Given free suballocation, it removes it from sorted list of
5422  // m_FreeSuballocationsBySize if it's suitable.
5423  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5424 };
5425 
5426 /*
5427 Allocations and their references in internal data structure look like this:
5428 
5429 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5430 
5431  0 +-------+
5432  | |
5433  | |
5434  | |
5435  +-------+
5436  | Alloc | 1st[m_1stNullItemsBeginCount]
5437  +-------+
5438  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5439  +-------+
5440  | ... |
5441  +-------+
5442  | Alloc | 1st[1st.size() - 1]
5443  +-------+
5444  | |
5445  | |
5446  | |
5447 GetSize() +-------+
5448 
5449 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5450 
5451  0 +-------+
5452  | Alloc | 2nd[0]
5453  +-------+
5454  | Alloc | 2nd[1]
5455  +-------+
5456  | ... |
5457  +-------+
5458  | Alloc | 2nd[2nd.size() - 1]
5459  +-------+
5460  | |
5461  | |
5462  | |
5463  +-------+
5464  | Alloc | 1st[m_1stNullItemsBeginCount]
5465  +-------+
5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5467  +-------+
5468  | ... |
5469  +-------+
5470  | Alloc | 1st[1st.size() - 1]
5471  +-------+
5472  | |
5473 GetSize() +-------+
5474 
5475 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5476 
5477  0 +-------+
5478  | |
5479  | |
5480  | |
5481  +-------+
5482  | Alloc | 1st[m_1stNullItemsBeginCount]
5483  +-------+
5484  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5485  +-------+
5486  | ... |
5487  +-------+
5488  | Alloc | 1st[1st.size() - 1]
5489  +-------+
5490  | |
5491  | |
5492  | |
5493  +-------+
5494  | Alloc | 2nd[2nd.size() - 1]
5495  +-------+
5496  | ... |
5497  +-------+
5498  | Alloc | 2nd[1]
5499  +-------+
5500  | Alloc | 2nd[0]
5501 GetSize() +-------+
5502 
5503 */
5504 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5505 {
5506  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5507 public:
5508  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5509  virtual ~VmaBlockMetadata_Linear();
5510  virtual void Init(VkDeviceSize size);
5511 
5512  virtual bool Validate() const;
5513  virtual size_t GetAllocationCount() const;
5514  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5515  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5516  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5517 
5518  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5519  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5520 
5521 #if VMA_STATS_STRING_ENABLED
5522  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5523 #endif
5524 
5525  virtual bool CreateAllocationRequest(
5526  uint32_t currentFrameIndex,
5527  uint32_t frameInUseCount,
5528  VkDeviceSize bufferImageGranularity,
5529  VkDeviceSize allocSize,
5530  VkDeviceSize allocAlignment,
5531  bool upperAddress,
5532  VmaSuballocationType allocType,
5533  bool canMakeOtherLost,
5534  uint32_t strategy,
5535  VmaAllocationRequest* pAllocationRequest);
5536 
5537  virtual bool MakeRequestedAllocationsLost(
5538  uint32_t currentFrameIndex,
5539  uint32_t frameInUseCount,
5540  VmaAllocationRequest* pAllocationRequest);
5541 
5542  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5543 
5544  virtual VkResult CheckCorruption(const void* pBlockData);
5545 
5546  virtual void Alloc(
5547  const VmaAllocationRequest& request,
5548  VmaSuballocationType type,
5549  VkDeviceSize allocSize,
5550  VmaAllocation hAllocation);
5551 
5552  virtual void Free(const VmaAllocation allocation);
5553  virtual void FreeAtOffset(VkDeviceSize offset);
5554 
5555 private:
5556  /*
5557  There are two suballocation vectors, used in ping-pong way.
5558  The one with index m_1stVectorIndex is called 1st.
5559  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5560  2nd can be non-empty only when 1st is not empty.
5561  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5562  */
5563  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5564 
5565  enum SECOND_VECTOR_MODE
5566  {
5567  SECOND_VECTOR_EMPTY,
5568  /*
5569  Suballocations in 2nd vector are created later than the ones in 1st, but they
5570  all have smaller offset.
5571  */
5572  SECOND_VECTOR_RING_BUFFER,
5573  /*
5574  Suballocations in 2nd vector are upper side of double stack.
5575  They all have offsets higher than those in 1st vector.
5576  Top of this stack means smaller offsets, but higher indices in this vector.
5577  */
5578  SECOND_VECTOR_DOUBLE_STACK,
5579  };
5580 
5581  VkDeviceSize m_SumFreeSize;
5582  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5583  uint32_t m_1stVectorIndex;
5584  SECOND_VECTOR_MODE m_2ndVectorMode;
5585 
5586  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5587  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5588  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5589  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5590 
5591  // Number of items in 1st vector with hAllocation = null at the beginning.
5592  size_t m_1stNullItemsBeginCount;
5593  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5594  size_t m_1stNullItemsMiddleCount;
5595  // Number of items in 2nd vector with hAllocation = null.
5596  size_t m_2ndNullItemsCount;
5597 
5598  bool ShouldCompact1st() const;
5599  void CleanupAfterFree();
5600 
5601  bool CreateAllocationRequest_LowerAddress(
5602  uint32_t currentFrameIndex,
5603  uint32_t frameInUseCount,
5604  VkDeviceSize bufferImageGranularity,
5605  VkDeviceSize allocSize,
5606  VkDeviceSize allocAlignment,
5607  VmaSuballocationType allocType,
5608  bool canMakeOtherLost,
5609  uint32_t strategy,
5610  VmaAllocationRequest* pAllocationRequest);
5611  bool CreateAllocationRequest_UpperAddress(
5612  uint32_t currentFrameIndex,
5613  uint32_t frameInUseCount,
5614  VkDeviceSize bufferImageGranularity,
5615  VkDeviceSize allocSize,
5616  VkDeviceSize allocAlignment,
5617  VmaSuballocationType allocType,
5618  bool canMakeOtherLost,
5619  uint32_t strategy,
5620  VmaAllocationRequest* pAllocationRequest);
5621 };
5622 
5623 /*
5624 - GetSize() is the original size of allocated memory block.
5625 - m_UsableSize is this size aligned down to a power of two.
5626  All allocations and calculations happen relative to m_UsableSize.
5627 - GetUnusableSize() is the difference between them.
5628  It is repoted as separate, unused range, not available for allocations.
5629 
5630 Node at level 0 has size = m_UsableSize.
5631 Each next level contains nodes with size 2 times smaller than current level.
5632 m_LevelCount is the maximum number of levels to use in the current object.
5633 */
5634 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5635 {
5636  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5637 public:
5638  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5639  virtual ~VmaBlockMetadata_Buddy();
5640  virtual void Init(VkDeviceSize size);
5641 
5642  virtual bool Validate() const;
5643  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5644  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5645  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5646  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5647 
5648  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5649  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5650 
5651 #if VMA_STATS_STRING_ENABLED
5652  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5653 #endif
5654 
5655  virtual bool CreateAllocationRequest(
5656  uint32_t currentFrameIndex,
5657  uint32_t frameInUseCount,
5658  VkDeviceSize bufferImageGranularity,
5659  VkDeviceSize allocSize,
5660  VkDeviceSize allocAlignment,
5661  bool upperAddress,
5662  VmaSuballocationType allocType,
5663  bool canMakeOtherLost,
5664  uint32_t strategy,
5665  VmaAllocationRequest* pAllocationRequest);
5666 
5667  virtual bool MakeRequestedAllocationsLost(
5668  uint32_t currentFrameIndex,
5669  uint32_t frameInUseCount,
5670  VmaAllocationRequest* pAllocationRequest);
5671 
5672  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5673 
5674  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5675 
5676  virtual void Alloc(
5677  const VmaAllocationRequest& request,
5678  VmaSuballocationType type,
5679  VkDeviceSize allocSize,
5680  VmaAllocation hAllocation);
5681 
5682  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5683  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5684 
5685 private:
5686  static const VkDeviceSize MIN_NODE_SIZE = 32;
5687  static const size_t MAX_LEVELS = 30;
5688 
5689  struct ValidationContext
5690  {
5691  size_t calculatedAllocationCount;
5692  size_t calculatedFreeCount;
5693  VkDeviceSize calculatedSumFreeSize;
5694 
5695  ValidationContext() :
5696  calculatedAllocationCount(0),
5697  calculatedFreeCount(0),
5698  calculatedSumFreeSize(0) { }
5699  };
5700 
5701  struct Node
5702  {
5703  VkDeviceSize offset;
5704  enum TYPE
5705  {
5706  TYPE_FREE,
5707  TYPE_ALLOCATION,
5708  TYPE_SPLIT,
5709  TYPE_COUNT
5710  } type;
5711  Node* parent;
5712  Node* buddy;
5713 
5714  union
5715  {
5716  struct
5717  {
5718  Node* prev;
5719  Node* next;
5720  } free;
5721  struct
5722  {
5723  VmaAllocation alloc;
5724  } allocation;
5725  struct
5726  {
5727  Node* leftChild;
5728  } split;
5729  };
5730  };
5731 
5732  // Size of the memory block aligned down to a power of two.
5733  VkDeviceSize m_UsableSize;
5734  uint32_t m_LevelCount;
5735 
5736  Node* m_Root;
5737  struct {
5738  Node* front;
5739  Node* back;
5740  } m_FreeList[MAX_LEVELS];
5741  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5742  size_t m_AllocationCount;
5743  // Number of nodes in the tree with type == TYPE_FREE.
5744  size_t m_FreeCount;
5745  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5746  VkDeviceSize m_SumFreeSize;
5747 
5748  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5749  void DeleteNode(Node* node);
5750  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5751  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5752  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5753  // Alloc passed just for validation. Can be null.
5754  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5755  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5756  // Adds node to the front of FreeList at given level.
5757  // node->type must be FREE.
5758  // node->free.prev, next can be undefined.
5759  void AddToFreeListFront(uint32_t level, Node* node);
5760  // Removes node from FreeList at given level.
5761  // node->type must be FREE.
5762  // node->free.prev, next stay untouched.
5763  void RemoveFromFreeList(uint32_t level, Node* node);
5764 
5765 #if VMA_STATS_STRING_ENABLED
5766  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5767 #endif
5768 };
5769 
5770 /*
5771 Represents a single block of device memory (`VkDeviceMemory`) with all the
5772 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5773 
5774 Thread-safety: This class must be externally synchronized.
5775 */
5776 class VmaDeviceMemoryBlock
5777 {
5778  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5779 public:
5780  VmaBlockMetadata* m_pMetadata;
5781 
5782  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5783 
5784  ~VmaDeviceMemoryBlock()
5785  {
5786  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5787  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5788  }
5789 
5790  // Always call after construction.
5791  void Init(
5792  VmaAllocator hAllocator,
5793  VmaPool hParentPool,
5794  uint32_t newMemoryTypeIndex,
5795  VkDeviceMemory newMemory,
5796  VkDeviceSize newSize,
5797  uint32_t id,
5798  uint32_t algorithm);
5799  // Always call before destruction.
5800  void Destroy(VmaAllocator allocator);
5801 
5802  VmaPool GetParentPool() const { return m_hParentPool; }
5803  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5804  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5805  uint32_t GetId() const { return m_Id; }
5806  void* GetMappedData() const { return m_pMappedData; }
5807 
5808  // Validates all data structures inside this object. If not valid, returns false.
5809  bool Validate() const;
5810 
5811  VkResult CheckCorruption(VmaAllocator hAllocator);
5812 
5813  // ppData can be null.
5814  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5815  void Unmap(VmaAllocator hAllocator, uint32_t count);
5816 
5817  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5818  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5819 
5820  VkResult BindBufferMemory(
5821  const VmaAllocator hAllocator,
5822  const VmaAllocation hAllocation,
5823  VkBuffer hBuffer);
5824  VkResult BindImageMemory(
5825  const VmaAllocator hAllocator,
5826  const VmaAllocation hAllocation,
5827  VkImage hImage);
5828 
5829 private:
5830  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5831  uint32_t m_MemoryTypeIndex;
5832  uint32_t m_Id;
5833  VkDeviceMemory m_hMemory;
5834 
5835  /*
5836  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5837  Also protects m_MapCount, m_pMappedData.
5838  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5839  */
5840  VMA_MUTEX m_Mutex;
5841  uint32_t m_MapCount;
5842  void* m_pMappedData;
5843 };
5844 
5845 struct VmaPointerLess
5846 {
5847  bool operator()(const void* lhs, const void* rhs) const
5848  {
5849  return lhs < rhs;
5850  }
5851 };
5852 
5853 struct VmaDefragmentationMove
5854 {
5855  size_t srcBlockIndex;
5856  size_t dstBlockIndex;
5857  VkDeviceSize srcOffset;
5858  VkDeviceSize dstOffset;
5859  VkDeviceSize size;
5860 };
5861 
5862 class VmaDefragmentationAlgorithm;
5863 
5864 /*
5865 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5866 Vulkan memory type.
5867 
5868 Synchronized internally with a mutex.
5869 */
5870 struct VmaBlockVector
5871 {
5872  VMA_CLASS_NO_COPY(VmaBlockVector)
5873 public:
5874  VmaBlockVector(
5875  VmaAllocator hAllocator,
5876  VmaPool hParentPool,
5877  uint32_t memoryTypeIndex,
5878  VkDeviceSize preferredBlockSize,
5879  size_t minBlockCount,
5880  size_t maxBlockCount,
5881  VkDeviceSize bufferImageGranularity,
5882  uint32_t frameInUseCount,
5883  bool isCustomPool,
5884  bool explicitBlockSize,
5885  uint32_t algorithm);
5886  ~VmaBlockVector();
5887 
5888  VkResult CreateMinBlocks();
5889 
5890  VmaPool GetParentPool() const { return m_hParentPool; }
5891  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5892  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5893  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5894  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5895  uint32_t GetAlgorithm() const { return m_Algorithm; }
5896 
5897  void GetPoolStats(VmaPoolStats* pStats);
5898 
5899  bool IsEmpty() const { return m_Blocks.empty(); }
5900  bool IsCorruptionDetectionEnabled() const;
5901 
5902  VkResult Allocate(
5903  uint32_t currentFrameIndex,
5904  VkDeviceSize size,
5905  VkDeviceSize alignment,
5906  const VmaAllocationCreateInfo& createInfo,
5907  VmaSuballocationType suballocType,
5908  size_t allocationCount,
5909  VmaAllocation* pAllocations);
5910 
5911  void Free(
5912  VmaAllocation hAllocation);
5913 
5914  // Adds statistics of this BlockVector to pStats.
5915  void AddStats(VmaStats* pStats);
5916 
5917 #if VMA_STATS_STRING_ENABLED
5918  void PrintDetailedMap(class VmaJsonWriter& json);
5919 #endif
5920 
5921  void MakePoolAllocationsLost(
5922  uint32_t currentFrameIndex,
5923  size_t* pLostAllocationCount);
5924  VkResult CheckCorruption();
5925 
5926  // Saves results in pCtx->res.
5927  void Defragment(
5928  class VmaBlockVectorDefragmentationContext* pCtx,
5929  VmaDefragmentationStats* pStats,
5930  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5931  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5932  VkCommandBuffer commandBuffer);
5933  void DefragmentationEnd(
5934  class VmaBlockVectorDefragmentationContext* pCtx,
5935  VmaDefragmentationStats* pStats);
5936 
5938  // To be used only while the m_Mutex is locked. Used during defragmentation.
5939 
5940  size_t GetBlockCount() const { return m_Blocks.size(); }
5941  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5942  size_t CalcAllocationCount() const;
5943  bool IsBufferImageGranularityConflictPossible() const;
5944 
5945 private:
5946  friend class VmaDefragmentationAlgorithm_Generic;
5947 
5948  const VmaAllocator m_hAllocator;
5949  const VmaPool m_hParentPool;
5950  const uint32_t m_MemoryTypeIndex;
5951  const VkDeviceSize m_PreferredBlockSize;
5952  const size_t m_MinBlockCount;
5953  const size_t m_MaxBlockCount;
5954  const VkDeviceSize m_BufferImageGranularity;
5955  const uint32_t m_FrameInUseCount;
5956  const bool m_IsCustomPool;
5957  const bool m_ExplicitBlockSize;
5958  const uint32_t m_Algorithm;
5959  /* There can be at most one allocation that is completely empty - a
5960  hysteresis to avoid pessimistic case of alternating creation and destruction
5961  of a VkDeviceMemory. */
5962  bool m_HasEmptyBlock;
5963  VMA_RW_MUTEX m_Mutex;
5964  // Incrementally sorted by sumFreeSize, ascending.
5965  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5966  uint32_t m_NextBlockId;
5967 
5968  VkDeviceSize CalcMaxBlockSize() const;
5969 
5970  // Finds and removes given block from vector.
5971  void Remove(VmaDeviceMemoryBlock* pBlock);
5972 
5973  // Performs single step in sorting m_Blocks. They may not be fully sorted
5974  // after this call.
5975  void IncrementallySortBlocks();
5976 
5977  VkResult AllocatePage(
5978  uint32_t currentFrameIndex,
5979  VkDeviceSize size,
5980  VkDeviceSize alignment,
5981  const VmaAllocationCreateInfo& createInfo,
5982  VmaSuballocationType suballocType,
5983  VmaAllocation* pAllocation);
5984 
5985  // To be used only without CAN_MAKE_OTHER_LOST flag.
5986  VkResult AllocateFromBlock(
5987  VmaDeviceMemoryBlock* pBlock,
5988  uint32_t currentFrameIndex,
5989  VkDeviceSize size,
5990  VkDeviceSize alignment,
5991  VmaAllocationCreateFlags allocFlags,
5992  void* pUserData,
5993  VmaSuballocationType suballocType,
5994  uint32_t strategy,
5995  VmaAllocation* pAllocation);
5996 
5997  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5998 
5999  // Saves result to pCtx->res.
6000  void ApplyDefragmentationMovesCpu(
6001  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6002  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6003  // Saves result to pCtx->res.
6004  void ApplyDefragmentationMovesGpu(
6005  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6006  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6007  VkCommandBuffer commandBuffer);
6008 
6009  /*
6010  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6011  - updated with new data.
6012  */
6013  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6014 };
6015 
6016 struct VmaPool_T
6017 {
6018  VMA_CLASS_NO_COPY(VmaPool_T)
6019 public:
6020  VmaBlockVector m_BlockVector;
6021 
6022  VmaPool_T(
6023  VmaAllocator hAllocator,
6024  const VmaPoolCreateInfo& createInfo,
6025  VkDeviceSize preferredBlockSize);
6026  ~VmaPool_T();
6027 
6028  uint32_t GetId() const { return m_Id; }
6029  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6030 
6031 #if VMA_STATS_STRING_ENABLED
6032  //void PrintDetailedMap(class VmaStringBuilder& sb);
6033 #endif
6034 
6035 private:
6036  uint32_t m_Id;
6037 };
6038 
6039 /*
6040 Performs defragmentation:
6041 
6042 - Updates `pBlockVector->m_pMetadata`.
6043 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6044 - Does not move actual data, only returns requested moves as `moves`.
6045 */
6046 class VmaDefragmentationAlgorithm
6047 {
6048  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6049 public:
6050  VmaDefragmentationAlgorithm(
6051  VmaAllocator hAllocator,
6052  VmaBlockVector* pBlockVector,
6053  uint32_t currentFrameIndex) :
6054  m_hAllocator(hAllocator),
6055  m_pBlockVector(pBlockVector),
6056  m_CurrentFrameIndex(currentFrameIndex)
6057  {
6058  }
6059  virtual ~VmaDefragmentationAlgorithm()
6060  {
6061  }
6062 
6063  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6064  virtual void AddAll() = 0;
6065 
6066  virtual VkResult Defragment(
6067  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6068  VkDeviceSize maxBytesToMove,
6069  uint32_t maxAllocationsToMove) = 0;
6070 
6071  virtual VkDeviceSize GetBytesMoved() const = 0;
6072  virtual uint32_t GetAllocationsMoved() const = 0;
6073 
6074 protected:
6075  VmaAllocator const m_hAllocator;
6076  VmaBlockVector* const m_pBlockVector;
6077  const uint32_t m_CurrentFrameIndex;
6078 
6079  struct AllocationInfo
6080  {
6081  VmaAllocation m_hAllocation;
6082  VkBool32* m_pChanged;
6083 
6084  AllocationInfo() :
6085  m_hAllocation(VK_NULL_HANDLE),
6086  m_pChanged(VMA_NULL)
6087  {
6088  }
6089  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6090  m_hAllocation(hAlloc),
6091  m_pChanged(pChanged)
6092  {
6093  }
6094  };
6095 };
6096 
6097 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6098 {
6099  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6100 public:
6101  VmaDefragmentationAlgorithm_Generic(
6102  VmaAllocator hAllocator,
6103  VmaBlockVector* pBlockVector,
6104  uint32_t currentFrameIndex,
6105  bool overlappingMoveSupported);
6106  virtual ~VmaDefragmentationAlgorithm_Generic();
6107 
6108  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6109  virtual void AddAll() { m_AllAllocations = true; }
6110 
6111  virtual VkResult Defragment(
6112  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6113  VkDeviceSize maxBytesToMove,
6114  uint32_t maxAllocationsToMove);
6115 
6116  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6117  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6118 
6119 private:
6120  uint32_t m_AllocationCount;
6121  bool m_AllAllocations;
6122 
6123  VkDeviceSize m_BytesMoved;
6124  uint32_t m_AllocationsMoved;
6125 
6126  struct AllocationInfoSizeGreater
6127  {
6128  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6129  {
6130  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6131  }
6132  };
6133 
6134  struct AllocationInfoOffsetGreater
6135  {
6136  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6137  {
6138  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6139  }
6140  };
6141 
6142  struct BlockInfo
6143  {
6144  size_t m_OriginalBlockIndex;
6145  VmaDeviceMemoryBlock* m_pBlock;
6146  bool m_HasNonMovableAllocations;
6147  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6148 
6149  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6150  m_OriginalBlockIndex(SIZE_MAX),
6151  m_pBlock(VMA_NULL),
6152  m_HasNonMovableAllocations(true),
6153  m_Allocations(pAllocationCallbacks)
6154  {
6155  }
6156 
6157  void CalcHasNonMovableAllocations()
6158  {
6159  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6160  const size_t defragmentAllocCount = m_Allocations.size();
6161  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6162  }
6163 
6164  void SortAllocationsBySizeDescending()
6165  {
6166  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6167  }
6168 
6169  void SortAllocationsByOffsetDescending()
6170  {
6171  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6172  }
6173  };
6174 
6175  struct BlockPointerLess
6176  {
6177  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6178  {
6179  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6180  }
6181  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6182  {
6183  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6184  }
6185  };
6186 
6187  // 1. Blocks with some non-movable allocations go first.
6188  // 2. Blocks with smaller sumFreeSize go first.
6189  struct BlockInfoCompareMoveDestination
6190  {
6191  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6192  {
6193  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6194  {
6195  return true;
6196  }
6197  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6198  {
6199  return false;
6200  }
6201  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6202  {
6203  return true;
6204  }
6205  return false;
6206  }
6207  };
6208 
6209  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6210  BlockInfoVector m_Blocks;
6211 
6212  VkResult DefragmentRound(
6213  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6214  VkDeviceSize maxBytesToMove,
6215  uint32_t maxAllocationsToMove);
6216 
6217  size_t CalcBlocksWithNonMovableCount() const;
6218 
6219  static bool MoveMakesSense(
6220  size_t dstBlockIndex, VkDeviceSize dstOffset,
6221  size_t srcBlockIndex, VkDeviceSize srcOffset);
6222 };
6223 
6224 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6225 {
6226  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6227 public:
6228  VmaDefragmentationAlgorithm_Fast(
6229  VmaAllocator hAllocator,
6230  VmaBlockVector* pBlockVector,
6231  uint32_t currentFrameIndex,
6232  bool overlappingMoveSupported);
6233  virtual ~VmaDefragmentationAlgorithm_Fast();
6234 
6235  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6236  virtual void AddAll() { m_AllAllocations = true; }
6237 
6238  virtual VkResult Defragment(
6239  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6240  VkDeviceSize maxBytesToMove,
6241  uint32_t maxAllocationsToMove);
6242 
6243  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6244  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6245 
6246 private:
6247  struct BlockInfo
6248  {
6249  size_t origBlockIndex;
6250  };
6251 
6252  class FreeSpaceDatabase
6253  {
6254  public:
6255  FreeSpaceDatabase()
6256  {
6257  FreeSpace s = {};
6258  s.blockInfoIndex = SIZE_MAX;
6259  for(size_t i = 0; i < MAX_COUNT; ++i)
6260  {
6261  m_FreeSpaces[i] = s;
6262  }
6263  }
6264 
6265  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6266  {
6267  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6268  {
6269  return;
6270  }
6271 
6272  // Find first invalid or the smallest structure.
6273  size_t bestIndex = SIZE_MAX;
6274  for(size_t i = 0; i < MAX_COUNT; ++i)
6275  {
6276  // Empty structure.
6277  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6278  {
6279  bestIndex = i;
6280  break;
6281  }
6282  if(m_FreeSpaces[i].size < size &&
6283  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6284  {
6285  bestIndex = i;
6286  }
6287  }
6288 
6289  if(bestIndex != SIZE_MAX)
6290  {
6291  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6292  m_FreeSpaces[bestIndex].offset = offset;
6293  m_FreeSpaces[bestIndex].size = size;
6294  }
6295  }
6296 
6297  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6298  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6299  {
6300  size_t bestIndex = SIZE_MAX;
6301  VkDeviceSize bestFreeSpaceAfter = 0;
6302  for(size_t i = 0; i < MAX_COUNT; ++i)
6303  {
6304  // Structure is valid.
6305  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6306  {
6307  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6308  // Allocation fits into this structure.
6309  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6310  {
6311  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6312  (dstOffset + size);
6313  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6314  {
6315  bestIndex = i;
6316  bestFreeSpaceAfter = freeSpaceAfter;
6317  }
6318  }
6319  }
6320  }
6321 
6322  if(bestIndex != SIZE_MAX)
6323  {
6324  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6325  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6326 
6327  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6328  {
6329  // Leave this structure for remaining empty space.
6330  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6331  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6332  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6333  }
6334  else
6335  {
6336  // This structure becomes invalid.
6337  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6338  }
6339 
6340  return true;
6341  }
6342 
6343  return false;
6344  }
6345 
6346  private:
6347  static const size_t MAX_COUNT = 4;
6348 
6349  struct FreeSpace
6350  {
6351  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6352  VkDeviceSize offset;
6353  VkDeviceSize size;
6354  } m_FreeSpaces[MAX_COUNT];
6355  };
6356 
6357  const bool m_OverlappingMoveSupported;
6358 
6359  uint32_t m_AllocationCount;
6360  bool m_AllAllocations;
6361 
6362  VkDeviceSize m_BytesMoved;
6363  uint32_t m_AllocationsMoved;
6364 
6365  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6366 
6367  void PreprocessMetadata();
6368  void PostprocessMetadata();
6369  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6370 };
6371 
6372 struct VmaBlockDefragmentationContext
6373 {
6374  enum BLOCK_FLAG
6375  {
6376  BLOCK_FLAG_USED = 0x00000001,
6377  };
6378  uint32_t flags;
6379  VkBuffer hBuffer;
6380 
6381  VmaBlockDefragmentationContext() :
6382  flags(0),
6383  hBuffer(VK_NULL_HANDLE)
6384  {
6385  }
6386 };
6387 
6388 class VmaBlockVectorDefragmentationContext
6389 {
6390  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6391 public:
6392  VkResult res;
6393  bool mutexLocked;
6394  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6395 
6396  VmaBlockVectorDefragmentationContext(
6397  VmaAllocator hAllocator,
6398  VmaPool hCustomPool, // Optional.
6399  VmaBlockVector* pBlockVector,
6400  uint32_t currFrameIndex,
6401  uint32_t flags);
6402  ~VmaBlockVectorDefragmentationContext();
6403 
6404  VmaPool GetCustomPool() const { return m_hCustomPool; }
6405  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6406  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6407 
6408  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6409  void AddAll() { m_AllAllocations = true; }
6410 
6411  void Begin(bool overlappingMoveSupported);
6412 
6413 private:
6414  const VmaAllocator m_hAllocator;
6415  // Null if not from custom pool.
6416  const VmaPool m_hCustomPool;
6417  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6418  VmaBlockVector* const m_pBlockVector;
6419  const uint32_t m_CurrFrameIndex;
6420  const uint32_t m_AlgorithmFlags;
6421  // Owner of this object.
6422  VmaDefragmentationAlgorithm* m_pAlgorithm;
6423 
6424  struct AllocInfo
6425  {
6426  VmaAllocation hAlloc;
6427  VkBool32* pChanged;
6428  };
6429  // Used between constructor and Begin.
6430  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6431  bool m_AllAllocations;
6432 };
6433 
6434 struct VmaDefragmentationContext_T
6435 {
6436 private:
6437  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6438 public:
6439  VmaDefragmentationContext_T(
6440  VmaAllocator hAllocator,
6441  uint32_t currFrameIndex,
6442  uint32_t flags,
6443  VmaDefragmentationStats* pStats);
6444  ~VmaDefragmentationContext_T();
6445 
6446  void AddPools(uint32_t poolCount, VmaPool* pPools);
6447  void AddAllocations(
6448  uint32_t allocationCount,
6449  VmaAllocation* pAllocations,
6450  VkBool32* pAllocationsChanged);
6451 
6452  /*
6453  Returns:
6454  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6455  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6456  - Negative value if error occured and object can be destroyed immediately.
6457  */
6458  VkResult Defragment(
6459  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6460  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6461  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6462 
6463 private:
6464  const VmaAllocator m_hAllocator;
6465  const uint32_t m_CurrFrameIndex;
6466  const uint32_t m_Flags;
6467  VmaDefragmentationStats* const m_pStats;
6468  // Owner of these objects.
6469  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6470  // Owner of these objects.
6471  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6472 };
6473 
6474 #if VMA_RECORDING_ENABLED
6475 
6476 class VmaRecorder
6477 {
6478 public:
6479  VmaRecorder();
6480  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6481  void WriteConfiguration(
6482  const VkPhysicalDeviceProperties& devProps,
6483  const VkPhysicalDeviceMemoryProperties& memProps,
6484  bool dedicatedAllocationExtensionEnabled);
6485  ~VmaRecorder();
6486 
6487  void RecordCreateAllocator(uint32_t frameIndex);
6488  void RecordDestroyAllocator(uint32_t frameIndex);
6489  void RecordCreatePool(uint32_t frameIndex,
6490  const VmaPoolCreateInfo& createInfo,
6491  VmaPool pool);
6492  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6493  void RecordAllocateMemory(uint32_t frameIndex,
6494  const VkMemoryRequirements& vkMemReq,
6495  const VmaAllocationCreateInfo& createInfo,
6496  VmaAllocation allocation);
6497  void RecordAllocateMemoryPages(uint32_t frameIndex,
6498  const VkMemoryRequirements& vkMemReq,
6499  const VmaAllocationCreateInfo& createInfo,
6500  uint64_t allocationCount,
6501  const VmaAllocation* pAllocations);
6502  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6503  const VkMemoryRequirements& vkMemReq,
6504  bool requiresDedicatedAllocation,
6505  bool prefersDedicatedAllocation,
6506  const VmaAllocationCreateInfo& createInfo,
6507  VmaAllocation allocation);
6508  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6509  const VkMemoryRequirements& vkMemReq,
6510  bool requiresDedicatedAllocation,
6511  bool prefersDedicatedAllocation,
6512  const VmaAllocationCreateInfo& createInfo,
6513  VmaAllocation allocation);
6514  void RecordFreeMemory(uint32_t frameIndex,
6515  VmaAllocation allocation);
6516  void RecordFreeMemoryPages(uint32_t frameIndex,
6517  uint64_t allocationCount,
6518  const VmaAllocation* pAllocations);
6519  void RecordResizeAllocation(
6520  uint32_t frameIndex,
6521  VmaAllocation allocation,
6522  VkDeviceSize newSize);
6523  void RecordSetAllocationUserData(uint32_t frameIndex,
6524  VmaAllocation allocation,
6525  const void* pUserData);
6526  void RecordCreateLostAllocation(uint32_t frameIndex,
6527  VmaAllocation allocation);
6528  void RecordMapMemory(uint32_t frameIndex,
6529  VmaAllocation allocation);
6530  void RecordUnmapMemory(uint32_t frameIndex,
6531  VmaAllocation allocation);
6532  void RecordFlushAllocation(uint32_t frameIndex,
6533  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6534  void RecordInvalidateAllocation(uint32_t frameIndex,
6535  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6536  void RecordCreateBuffer(uint32_t frameIndex,
6537  const VkBufferCreateInfo& bufCreateInfo,
6538  const VmaAllocationCreateInfo& allocCreateInfo,
6539  VmaAllocation allocation);
6540  void RecordCreateImage(uint32_t frameIndex,
6541  const VkImageCreateInfo& imageCreateInfo,
6542  const VmaAllocationCreateInfo& allocCreateInfo,
6543  VmaAllocation allocation);
6544  void RecordDestroyBuffer(uint32_t frameIndex,
6545  VmaAllocation allocation);
6546  void RecordDestroyImage(uint32_t frameIndex,
6547  VmaAllocation allocation);
6548  void RecordTouchAllocation(uint32_t frameIndex,
6549  VmaAllocation allocation);
6550  void RecordGetAllocationInfo(uint32_t frameIndex,
6551  VmaAllocation allocation);
6552  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6553  VmaPool pool);
6554  void RecordDefragmentationBegin(uint32_t frameIndex,
6555  const VmaDefragmentationInfo2& info,
6557  void RecordDefragmentationEnd(uint32_t frameIndex,
6559 
6560 private:
6561  struct CallParams
6562  {
6563  uint32_t threadId;
6564  double time;
6565  };
6566 
6567  class UserDataString
6568  {
6569  public:
6570  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6571  const char* GetString() const { return m_Str; }
6572 
6573  private:
6574  char m_PtrStr[17];
6575  const char* m_Str;
6576  };
6577 
6578  bool m_UseMutex;
6579  VmaRecordFlags m_Flags;
6580  FILE* m_File;
6581  VMA_MUTEX m_FileMutex;
6582  int64_t m_Freq;
6583  int64_t m_StartCounter;
6584 
6585  void GetBasicParams(CallParams& outParams);
6586 
6587  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6588  template<typename T>
6589  void PrintPointerList(uint64_t count, const T* pItems)
6590  {
6591  if(count)
6592  {
6593  fprintf(m_File, "%p", pItems[0]);
6594  for(uint64_t i = 1; i < count; ++i)
6595  {
6596  fprintf(m_File, " %p", pItems[i]);
6597  }
6598  }
6599  }
6600 
6601  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6602  void Flush();
6603 };
6604 
6605 #endif // #if VMA_RECORDING_ENABLED
6606 
6607 /*
6608 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6609 */
6610 class VmaAllocationObjectAllocator
6611 {
6612  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6613 public:
6614  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6615 
6616  VmaAllocation Allocate();
6617  void Free(VmaAllocation hAlloc);
6618 
6619 private:
6620  VMA_MUTEX m_Mutex;
6621  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6622 };
6623 
6624 // Main allocator object.
6625 struct VmaAllocator_T
6626 {
6627  VMA_CLASS_NO_COPY(VmaAllocator_T)
6628 public:
6629  bool m_UseMutex;
6630  bool m_UseKhrDedicatedAllocation;
6631  VkDevice m_hDevice;
6632  bool m_AllocationCallbacksSpecified;
6633  VkAllocationCallbacks m_AllocationCallbacks;
6634  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6635  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6636 
6637  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6638  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6639  VMA_MUTEX m_HeapSizeLimitMutex;
6640 
6641  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6642  VkPhysicalDeviceMemoryProperties m_MemProps;
6643 
6644  // Default pools.
6645  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6646 
6647  // Each vector is sorted by memory (handle value).
6648  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6649  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6650  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6651 
6652  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6653  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6654  ~VmaAllocator_T();
6655 
6656  const VkAllocationCallbacks* GetAllocationCallbacks() const
6657  {
6658  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6659  }
6660  const VmaVulkanFunctions& GetVulkanFunctions() const
6661  {
6662  return m_VulkanFunctions;
6663  }
6664 
6665  VkDeviceSize GetBufferImageGranularity() const
6666  {
6667  return VMA_MAX(
6668  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6669  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6670  }
6671 
6672  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6673  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6674 
6675  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6676  {
6677  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6678  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6679  }
6680  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6681  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6682  {
6683  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6684  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6685  }
6686  // Minimum alignment for all allocations in specific memory type.
6687  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6688  {
6689  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6690  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6691  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6692  }
6693 
6694  bool IsIntegratedGpu() const
6695  {
6696  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6697  }
6698 
6699 #if VMA_RECORDING_ENABLED
6700  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6701 #endif
6702 
6703  void GetBufferMemoryRequirements(
6704  VkBuffer hBuffer,
6705  VkMemoryRequirements& memReq,
6706  bool& requiresDedicatedAllocation,
6707  bool& prefersDedicatedAllocation) const;
6708  void GetImageMemoryRequirements(
6709  VkImage hImage,
6710  VkMemoryRequirements& memReq,
6711  bool& requiresDedicatedAllocation,
6712  bool& prefersDedicatedAllocation) const;
6713 
6714  // Main allocation function.
6715  VkResult AllocateMemory(
6716  const VkMemoryRequirements& vkMemReq,
6717  bool requiresDedicatedAllocation,
6718  bool prefersDedicatedAllocation,
6719  VkBuffer dedicatedBuffer,
6720  VkImage dedicatedImage,
6721  const VmaAllocationCreateInfo& createInfo,
6722  VmaSuballocationType suballocType,
6723  size_t allocationCount,
6724  VmaAllocation* pAllocations);
6725 
6726  // Main deallocation function.
6727  void FreeMemory(
6728  size_t allocationCount,
6729  const VmaAllocation* pAllocations);
6730 
6731  VkResult ResizeAllocation(
6732  const VmaAllocation alloc,
6733  VkDeviceSize newSize);
6734 
6735  void CalculateStats(VmaStats* pStats);
6736 
6737 #if VMA_STATS_STRING_ENABLED
6738  void PrintDetailedMap(class VmaJsonWriter& json);
6739 #endif
6740 
6741  VkResult DefragmentationBegin(
6742  const VmaDefragmentationInfo2& info,
6743  VmaDefragmentationStats* pStats,
6744  VmaDefragmentationContext* pContext);
6745  VkResult DefragmentationEnd(
6746  VmaDefragmentationContext context);
6747 
6748  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6749  bool TouchAllocation(VmaAllocation hAllocation);
6750 
6751  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6752  void DestroyPool(VmaPool pool);
6753  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6754 
6755  void SetCurrentFrameIndex(uint32_t frameIndex);
6756  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6757 
6758  void MakePoolAllocationsLost(
6759  VmaPool hPool,
6760  size_t* pLostAllocationCount);
6761  VkResult CheckPoolCorruption(VmaPool hPool);
6762  VkResult CheckCorruption(uint32_t memoryTypeBits);
6763 
6764  void CreateLostAllocation(VmaAllocation* pAllocation);
6765 
6766  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6767  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6768 
6769  VkResult Map(VmaAllocation hAllocation, void** ppData);
6770  void Unmap(VmaAllocation hAllocation);
6771 
6772  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6773  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6774 
6775  void FlushOrInvalidateAllocation(
6776  VmaAllocation hAllocation,
6777  VkDeviceSize offset, VkDeviceSize size,
6778  VMA_CACHE_OPERATION op);
6779 
6780  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6781 
6782 private:
6783  VkDeviceSize m_PreferredLargeHeapBlockSize;
6784 
6785  VkPhysicalDevice m_PhysicalDevice;
6786  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6787 
6788  VMA_RW_MUTEX m_PoolsMutex;
6789  // Protected by m_PoolsMutex. Sorted by pointer value.
6790  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6791  uint32_t m_NextPoolId;
6792 
6793  VmaVulkanFunctions m_VulkanFunctions;
6794 
6795 #if VMA_RECORDING_ENABLED
6796  VmaRecorder* m_pRecorder;
6797 #endif
6798 
6799  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6800 
6801  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6802 
6803  VkResult AllocateMemoryOfType(
6804  VkDeviceSize size,
6805  VkDeviceSize alignment,
6806  bool dedicatedAllocation,
6807  VkBuffer dedicatedBuffer,
6808  VkImage dedicatedImage,
6809  const VmaAllocationCreateInfo& createInfo,
6810  uint32_t memTypeIndex,
6811  VmaSuballocationType suballocType,
6812  size_t allocationCount,
6813  VmaAllocation* pAllocations);
6814 
6815  // Helper function only to be used inside AllocateDedicatedMemory.
6816  VkResult AllocateDedicatedMemoryPage(
6817  VkDeviceSize size,
6818  VmaSuballocationType suballocType,
6819  uint32_t memTypeIndex,
6820  const VkMemoryAllocateInfo& allocInfo,
6821  bool map,
6822  bool isUserDataString,
6823  void* pUserData,
6824  VmaAllocation* pAllocation);
6825 
6826  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
6827  VkResult AllocateDedicatedMemory(
6828  VkDeviceSize size,
6829  VmaSuballocationType suballocType,
6830  uint32_t memTypeIndex,
6831  bool map,
6832  bool isUserDataString,
6833  void* pUserData,
6834  VkBuffer dedicatedBuffer,
6835  VkImage dedicatedImage,
6836  size_t allocationCount,
6837  VmaAllocation* pAllocations);
6838 
6839  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6840  void FreeDedicatedMemory(VmaAllocation allocation);
6841 };
6842 
6844 // Memory allocation #2 after VmaAllocator_T definition
6845 
6846 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6847 {
6848  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6849 }
6850 
6851 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6852 {
6853  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6854 }
6855 
6856 template<typename T>
6857 static T* VmaAllocate(VmaAllocator hAllocator)
6858 {
6859  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6860 }
6861 
6862 template<typename T>
6863 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6864 {
6865  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6866 }
6867 
6868 template<typename T>
6869 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6870 {
6871  if(ptr != VMA_NULL)
6872  {
6873  ptr->~T();
6874  VmaFree(hAllocator, ptr);
6875  }
6876 }
6877 
6878 template<typename T>
6879 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6880 {
6881  if(ptr != VMA_NULL)
6882  {
6883  for(size_t i = count; i--; )
6884  ptr[i].~T();
6885  VmaFree(hAllocator, ptr);
6886  }
6887 }
6888 
6890 // VmaStringBuilder
6891 
6892 #if VMA_STATS_STRING_ENABLED
6893 
6894 class VmaStringBuilder
6895 {
6896 public:
6897  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6898  size_t GetLength() const { return m_Data.size(); }
6899  const char* GetData() const { return m_Data.data(); }
6900 
6901  void Add(char ch) { m_Data.push_back(ch); }
6902  void Add(const char* pStr);
6903  void AddNewLine() { Add('\n'); }
6904  void AddNumber(uint32_t num);
6905  void AddNumber(uint64_t num);
6906  void AddPointer(const void* ptr);
6907 
6908 private:
6909  VmaVector< char, VmaStlAllocator<char> > m_Data;
6910 };
6911 
6912 void VmaStringBuilder::Add(const char* pStr)
6913 {
6914  const size_t strLen = strlen(pStr);
6915  if(strLen > 0)
6916  {
6917  const size_t oldCount = m_Data.size();
6918  m_Data.resize(oldCount + strLen);
6919  memcpy(m_Data.data() + oldCount, pStr, strLen);
6920  }
6921 }
6922 
6923 void VmaStringBuilder::AddNumber(uint32_t num)
6924 {
6925  char buf[11];
6926  VmaUint32ToStr(buf, sizeof(buf), num);
6927  Add(buf);
6928 }
6929 
6930 void VmaStringBuilder::AddNumber(uint64_t num)
6931 {
6932  char buf[21];
6933  VmaUint64ToStr(buf, sizeof(buf), num);
6934  Add(buf);
6935 }
6936 
6937 void VmaStringBuilder::AddPointer(const void* ptr)
6938 {
6939  char buf[21];
6940  VmaPtrToStr(buf, sizeof(buf), ptr);
6941  Add(buf);
6942 }
6943 
6944 #endif // #if VMA_STATS_STRING_ENABLED
6945 
6947 // VmaJsonWriter
6948 
6949 #if VMA_STATS_STRING_ENABLED
6950 
6951 class VmaJsonWriter
6952 {
6953  VMA_CLASS_NO_COPY(VmaJsonWriter)
6954 public:
6955  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6956  ~VmaJsonWriter();
6957 
6958  void BeginObject(bool singleLine = false);
6959  void EndObject();
6960 
6961  void BeginArray(bool singleLine = false);
6962  void EndArray();
6963 
6964  void WriteString(const char* pStr);
6965  void BeginString(const char* pStr = VMA_NULL);
6966  void ContinueString(const char* pStr);
6967  void ContinueString(uint32_t n);
6968  void ContinueString(uint64_t n);
6969  void ContinueString_Pointer(const void* ptr);
6970  void EndString(const char* pStr = VMA_NULL);
6971 
6972  void WriteNumber(uint32_t n);
6973  void WriteNumber(uint64_t n);
6974  void WriteBool(bool b);
6975  void WriteNull();
6976 
6977 private:
6978  static const char* const INDENT;
6979 
6980  enum COLLECTION_TYPE
6981  {
6982  COLLECTION_TYPE_OBJECT,
6983  COLLECTION_TYPE_ARRAY,
6984  };
6985  struct StackItem
6986  {
6987  COLLECTION_TYPE type;
6988  uint32_t valueCount;
6989  bool singleLineMode;
6990  };
6991 
6992  VmaStringBuilder& m_SB;
6993  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6994  bool m_InsideString;
6995 
6996  void BeginValue(bool isString);
6997  void WriteIndent(bool oneLess = false);
6998 };
6999 
7000 const char* const VmaJsonWriter::INDENT = " ";
7001 
7002 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7003  m_SB(sb),
7004  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7005  m_InsideString(false)
7006 {
7007 }
7008 
7009 VmaJsonWriter::~VmaJsonWriter()
7010 {
7011  VMA_ASSERT(!m_InsideString);
7012  VMA_ASSERT(m_Stack.empty());
7013 }
7014 
7015 void VmaJsonWriter::BeginObject(bool singleLine)
7016 {
7017  VMA_ASSERT(!m_InsideString);
7018 
7019  BeginValue(false);
7020  m_SB.Add('{');
7021 
7022  StackItem item;
7023  item.type = COLLECTION_TYPE_OBJECT;
7024  item.valueCount = 0;
7025  item.singleLineMode = singleLine;
7026  m_Stack.push_back(item);
7027 }
7028 
7029 void VmaJsonWriter::EndObject()
7030 {
7031  VMA_ASSERT(!m_InsideString);
7032 
7033  WriteIndent(true);
7034  m_SB.Add('}');
7035 
7036  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7037  m_Stack.pop_back();
7038 }
7039 
7040 void VmaJsonWriter::BeginArray(bool singleLine)
7041 {
7042  VMA_ASSERT(!m_InsideString);
7043 
7044  BeginValue(false);
7045  m_SB.Add('[');
7046 
7047  StackItem item;
7048  item.type = COLLECTION_TYPE_ARRAY;
7049  item.valueCount = 0;
7050  item.singleLineMode = singleLine;
7051  m_Stack.push_back(item);
7052 }
7053 
7054 void VmaJsonWriter::EndArray()
7055 {
7056  VMA_ASSERT(!m_InsideString);
7057 
7058  WriteIndent(true);
7059  m_SB.Add(']');
7060 
7061  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7062  m_Stack.pop_back();
7063 }
7064 
7065 void VmaJsonWriter::WriteString(const char* pStr)
7066 {
7067  BeginString(pStr);
7068  EndString();
7069 }
7070 
7071 void VmaJsonWriter::BeginString(const char* pStr)
7072 {
7073  VMA_ASSERT(!m_InsideString);
7074 
7075  BeginValue(true);
7076  m_SB.Add('"');
7077  m_InsideString = true;
7078  if(pStr != VMA_NULL && pStr[0] != '\0')
7079  {
7080  ContinueString(pStr);
7081  }
7082 }
7083 
7084 void VmaJsonWriter::ContinueString(const char* pStr)
7085 {
7086  VMA_ASSERT(m_InsideString);
7087 
7088  const size_t strLen = strlen(pStr);
7089  for(size_t i = 0; i < strLen; ++i)
7090  {
7091  char ch = pStr[i];
7092  if(ch == '\\')
7093  {
7094  m_SB.Add("\\\\");
7095  }
7096  else if(ch == '"')
7097  {
7098  m_SB.Add("\\\"");
7099  }
7100  else if(ch >= 32)
7101  {
7102  m_SB.Add(ch);
7103  }
7104  else switch(ch)
7105  {
7106  case '\b':
7107  m_SB.Add("\\b");
7108  break;
7109  case '\f':
7110  m_SB.Add("\\f");
7111  break;
7112  case '\n':
7113  m_SB.Add("\\n");
7114  break;
7115  case '\r':
7116  m_SB.Add("\\r");
7117  break;
7118  case '\t':
7119  m_SB.Add("\\t");
7120  break;
7121  default:
7122  VMA_ASSERT(0 && "Character not currently supported.");
7123  break;
7124  }
7125  }
7126 }
7127 
7128 void VmaJsonWriter::ContinueString(uint32_t n)
7129 {
7130  VMA_ASSERT(m_InsideString);
7131  m_SB.AddNumber(n);
7132 }
7133 
7134 void VmaJsonWriter::ContinueString(uint64_t n)
7135 {
7136  VMA_ASSERT(m_InsideString);
7137  m_SB.AddNumber(n);
7138 }
7139 
7140 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7141 {
7142  VMA_ASSERT(m_InsideString);
7143  m_SB.AddPointer(ptr);
7144 }
7145 
7146 void VmaJsonWriter::EndString(const char* pStr)
7147 {
7148  VMA_ASSERT(m_InsideString);
7149  if(pStr != VMA_NULL && pStr[0] != '\0')
7150  {
7151  ContinueString(pStr);
7152  }
7153  m_SB.Add('"');
7154  m_InsideString = false;
7155 }
7156 
7157 void VmaJsonWriter::WriteNumber(uint32_t n)
7158 {
7159  VMA_ASSERT(!m_InsideString);
7160  BeginValue(false);
7161  m_SB.AddNumber(n);
7162 }
7163 
7164 void VmaJsonWriter::WriteNumber(uint64_t n)
7165 {
7166  VMA_ASSERT(!m_InsideString);
7167  BeginValue(false);
7168  m_SB.AddNumber(n);
7169 }
7170 
7171 void VmaJsonWriter::WriteBool(bool b)
7172 {
7173  VMA_ASSERT(!m_InsideString);
7174  BeginValue(false);
7175  m_SB.Add(b ? "true" : "false");
7176 }
7177 
7178 void VmaJsonWriter::WriteNull()
7179 {
7180  VMA_ASSERT(!m_InsideString);
7181  BeginValue(false);
7182  m_SB.Add("null");
7183 }
7184 
7185 void VmaJsonWriter::BeginValue(bool isString)
7186 {
7187  if(!m_Stack.empty())
7188  {
7189  StackItem& currItem = m_Stack.back();
7190  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7191  currItem.valueCount % 2 == 0)
7192  {
7193  VMA_ASSERT(isString);
7194  }
7195 
7196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7197  currItem.valueCount % 2 != 0)
7198  {
7199  m_SB.Add(": ");
7200  }
7201  else if(currItem.valueCount > 0)
7202  {
7203  m_SB.Add(", ");
7204  WriteIndent();
7205  }
7206  else
7207  {
7208  WriteIndent();
7209  }
7210  ++currItem.valueCount;
7211  }
7212 }
7213 
7214 void VmaJsonWriter::WriteIndent(bool oneLess)
7215 {
7216  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7217  {
7218  m_SB.AddNewLine();
7219 
7220  size_t count = m_Stack.size();
7221  if(count > 0 && oneLess)
7222  {
7223  --count;
7224  }
7225  for(size_t i = 0; i < count; ++i)
7226  {
7227  m_SB.Add(INDENT);
7228  }
7229  }
7230 }
7231 
7232 #endif // #if VMA_STATS_STRING_ENABLED
7233 
7235 
7236 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7237 {
7238  if(IsUserDataString())
7239  {
7240  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7241 
7242  FreeUserDataString(hAllocator);
7243 
7244  if(pUserData != VMA_NULL)
7245  {
7246  const char* const newStrSrc = (char*)pUserData;
7247  const size_t newStrLen = strlen(newStrSrc);
7248  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7249  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7250  m_pUserData = newStrDst;
7251  }
7252  }
7253  else
7254  {
7255  m_pUserData = pUserData;
7256  }
7257 }
7258 
7259 void VmaAllocation_T::ChangeBlockAllocation(
7260  VmaAllocator hAllocator,
7261  VmaDeviceMemoryBlock* block,
7262  VkDeviceSize offset)
7263 {
7264  VMA_ASSERT(block != VMA_NULL);
7265  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7266 
7267  // Move mapping reference counter from old block to new block.
7268  if(block != m_BlockAllocation.m_Block)
7269  {
7270  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7271  if(IsPersistentMap())
7272  ++mapRefCount;
7273  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7274  block->Map(hAllocator, mapRefCount, VMA_NULL);
7275  }
7276 
7277  m_BlockAllocation.m_Block = block;
7278  m_BlockAllocation.m_Offset = offset;
7279 }
7280 
7281 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7282 {
7283  VMA_ASSERT(newSize > 0);
7284  m_Size = newSize;
7285 }
7286 
7287 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7288 {
7289  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7290  m_BlockAllocation.m_Offset = newOffset;
7291 }
7292 
7293 VkDeviceSize VmaAllocation_T::GetOffset() const
7294 {
7295  switch(m_Type)
7296  {
7297  case ALLOCATION_TYPE_BLOCK:
7298  return m_BlockAllocation.m_Offset;
7299  case ALLOCATION_TYPE_DEDICATED:
7300  return 0;
7301  default:
7302  VMA_ASSERT(0);
7303  return 0;
7304  }
7305 }
7306 
7307 VkDeviceMemory VmaAllocation_T::GetMemory() const
7308 {
7309  switch(m_Type)
7310  {
7311  case ALLOCATION_TYPE_BLOCK:
7312  return m_BlockAllocation.m_Block->GetDeviceMemory();
7313  case ALLOCATION_TYPE_DEDICATED:
7314  return m_DedicatedAllocation.m_hMemory;
7315  default:
7316  VMA_ASSERT(0);
7317  return VK_NULL_HANDLE;
7318  }
7319 }
7320 
7321 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7322 {
7323  switch(m_Type)
7324  {
7325  case ALLOCATION_TYPE_BLOCK:
7326  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7327  case ALLOCATION_TYPE_DEDICATED:
7328  return m_DedicatedAllocation.m_MemoryTypeIndex;
7329  default:
7330  VMA_ASSERT(0);
7331  return UINT32_MAX;
7332  }
7333 }
7334 
7335 void* VmaAllocation_T::GetMappedData() const
7336 {
7337  switch(m_Type)
7338  {
7339  case ALLOCATION_TYPE_BLOCK:
7340  if(m_MapCount != 0)
7341  {
7342  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7343  VMA_ASSERT(pBlockData != VMA_NULL);
7344  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7345  }
7346  else
7347  {
7348  return VMA_NULL;
7349  }
7350  break;
7351  case ALLOCATION_TYPE_DEDICATED:
7352  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7353  return m_DedicatedAllocation.m_pMappedData;
7354  default:
7355  VMA_ASSERT(0);
7356  return VMA_NULL;
7357  }
7358 }
7359 
7360 bool VmaAllocation_T::CanBecomeLost() const
7361 {
7362  switch(m_Type)
7363  {
7364  case ALLOCATION_TYPE_BLOCK:
7365  return m_BlockAllocation.m_CanBecomeLost;
7366  case ALLOCATION_TYPE_DEDICATED:
7367  return false;
7368  default:
7369  VMA_ASSERT(0);
7370  return false;
7371  }
7372 }
7373 
7374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7375 {
7376  VMA_ASSERT(CanBecomeLost());
7377 
7378  /*
7379  Warning: This is a carefully designed algorithm.
7380  Do not modify unless you really know what you're doing :)
7381  */
7382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7383  for(;;)
7384  {
7385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7386  {
7387  VMA_ASSERT(0);
7388  return false;
7389  }
7390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7391  {
7392  return false;
7393  }
7394  else // Last use time earlier than current time.
7395  {
7396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7397  {
7398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7400  return true;
7401  }
7402  }
7403  }
7404 }
7405 
7406 #if VMA_STATS_STRING_ENABLED
7407 
7408 // Correspond to values of enum VmaSuballocationType.
7409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7410  "FREE",
7411  "UNKNOWN",
7412  "BUFFER",
7413  "IMAGE_UNKNOWN",
7414  "IMAGE_LINEAR",
7415  "IMAGE_OPTIMAL",
7416 };
7417 
7418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7419 {
7420  json.WriteString("Type");
7421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7422 
7423  json.WriteString("Size");
7424  json.WriteNumber(m_Size);
7425 
7426  if(m_pUserData != VMA_NULL)
7427  {
7428  json.WriteString("UserData");
7429  if(IsUserDataString())
7430  {
7431  json.WriteString((const char*)m_pUserData);
7432  }
7433  else
7434  {
7435  json.BeginString();
7436  json.ContinueString_Pointer(m_pUserData);
7437  json.EndString();
7438  }
7439  }
7440 
7441  json.WriteString("CreationFrameIndex");
7442  json.WriteNumber(m_CreationFrameIndex);
7443 
7444  json.WriteString("LastUseFrameIndex");
7445  json.WriteNumber(GetLastUseFrameIndex());
7446 
7447  if(m_BufferImageUsage != 0)
7448  {
7449  json.WriteString("Usage");
7450  json.WriteNumber(m_BufferImageUsage);
7451  }
7452 }
7453 
7454 #endif
7455 
7456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7457 {
7458  VMA_ASSERT(IsUserDataString());
7459  if(m_pUserData != VMA_NULL)
7460  {
7461  char* const oldStr = (char*)m_pUserData;
7462  const size_t oldStrLen = strlen(oldStr);
7463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7464  m_pUserData = VMA_NULL;
7465  }
7466 }
7467 
7468 void VmaAllocation_T::BlockAllocMap()
7469 {
7470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7471 
7472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7473  {
7474  ++m_MapCount;
7475  }
7476  else
7477  {
7478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7479  }
7480 }
7481 
7482 void VmaAllocation_T::BlockAllocUnmap()
7483 {
7484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7485 
7486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7487  {
7488  --m_MapCount;
7489  }
7490  else
7491  {
7492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7493  }
7494 }
7495 
7496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7497 {
7498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7499 
7500  if(m_MapCount != 0)
7501  {
7502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7503  {
7504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7505  *ppData = m_DedicatedAllocation.m_pMappedData;
7506  ++m_MapCount;
7507  return VK_SUCCESS;
7508  }
7509  else
7510  {
7511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7512  return VK_ERROR_MEMORY_MAP_FAILED;
7513  }
7514  }
7515  else
7516  {
7517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7518  hAllocator->m_hDevice,
7519  m_DedicatedAllocation.m_hMemory,
7520  0, // offset
7521  VK_WHOLE_SIZE,
7522  0, // flags
7523  ppData);
7524  if(result == VK_SUCCESS)
7525  {
7526  m_DedicatedAllocation.m_pMappedData = *ppData;
7527  m_MapCount = 1;
7528  }
7529  return result;
7530  }
7531 }
7532 
7533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7534 {
7535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7536 
7537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7538  {
7539  --m_MapCount;
7540  if(m_MapCount == 0)
7541  {
7542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7544  hAllocator->m_hDevice,
7545  m_DedicatedAllocation.m_hMemory);
7546  }
7547  }
7548  else
7549  {
7550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7551  }
7552 }
7553 
7554 #if VMA_STATS_STRING_ENABLED
7555 
7556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7557 {
7558  json.BeginObject();
7559 
7560  json.WriteString("Blocks");
7561  json.WriteNumber(stat.blockCount);
7562 
7563  json.WriteString("Allocations");
7564  json.WriteNumber(stat.allocationCount);
7565 
7566  json.WriteString("UnusedRanges");
7567  json.WriteNumber(stat.unusedRangeCount);
7568 
7569  json.WriteString("UsedBytes");
7570  json.WriteNumber(stat.usedBytes);
7571 
7572  json.WriteString("UnusedBytes");
7573  json.WriteNumber(stat.unusedBytes);
7574 
7575  if(stat.allocationCount > 1)
7576  {
7577  json.WriteString("AllocationSize");
7578  json.BeginObject(true);
7579  json.WriteString("Min");
7580  json.WriteNumber(stat.allocationSizeMin);
7581  json.WriteString("Avg");
7582  json.WriteNumber(stat.allocationSizeAvg);
7583  json.WriteString("Max");
7584  json.WriteNumber(stat.allocationSizeMax);
7585  json.EndObject();
7586  }
7587 
7588  if(stat.unusedRangeCount > 1)
7589  {
7590  json.WriteString("UnusedRangeSize");
7591  json.BeginObject(true);
7592  json.WriteString("Min");
7593  json.WriteNumber(stat.unusedRangeSizeMin);
7594  json.WriteString("Avg");
7595  json.WriteNumber(stat.unusedRangeSizeAvg);
7596  json.WriteString("Max");
7597  json.WriteNumber(stat.unusedRangeSizeMax);
7598  json.EndObject();
7599  }
7600 
7601  json.EndObject();
7602 }
7603 
7604 #endif // #if VMA_STATS_STRING_ENABLED
7605 
7606 struct VmaSuballocationItemSizeLess
7607 {
7608  bool operator()(
7609  const VmaSuballocationList::iterator lhs,
7610  const VmaSuballocationList::iterator rhs) const
7611  {
7612  return lhs->size < rhs->size;
7613  }
7614  bool operator()(
7615  const VmaSuballocationList::iterator lhs,
7616  VkDeviceSize rhsSize) const
7617  {
7618  return lhs->size < rhsSize;
7619  }
7620 };
7621 
7622 
7624 // class VmaBlockMetadata
7625 
7626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7627  m_Size(0),
7628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7629 {
7630 }
7631 
7632 #if VMA_STATS_STRING_ENABLED
7633 
7634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7635  VkDeviceSize unusedBytes,
7636  size_t allocationCount,
7637  size_t unusedRangeCount) const
7638 {
7639  json.BeginObject();
7640 
7641  json.WriteString("TotalBytes");
7642  json.WriteNumber(GetSize());
7643 
7644  json.WriteString("UnusedBytes");
7645  json.WriteNumber(unusedBytes);
7646 
7647  json.WriteString("Allocations");
7648  json.WriteNumber((uint64_t)allocationCount);
7649 
7650  json.WriteString("UnusedRanges");
7651  json.WriteNumber((uint64_t)unusedRangeCount);
7652 
7653  json.WriteString("Suballocations");
7654  json.BeginArray();
7655 }
7656 
7657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7658  VkDeviceSize offset,
7659  VmaAllocation hAllocation) const
7660 {
7661  json.BeginObject(true);
7662 
7663  json.WriteString("Offset");
7664  json.WriteNumber(offset);
7665 
7666  hAllocation->PrintParameters(json);
7667 
7668  json.EndObject();
7669 }
7670 
7671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7672  VkDeviceSize offset,
7673  VkDeviceSize size) const
7674 {
7675  json.BeginObject(true);
7676 
7677  json.WriteString("Offset");
7678  json.WriteNumber(offset);
7679 
7680  json.WriteString("Type");
7681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7682 
7683  json.WriteString("Size");
7684  json.WriteNumber(size);
7685 
7686  json.EndObject();
7687 }
7688 
7689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7690 {
7691  json.EndArray();
7692  json.EndObject();
7693 }
7694 
7695 #endif // #if VMA_STATS_STRING_ENABLED
7696 
7698 // class VmaBlockMetadata_Generic
7699 
7700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7701  VmaBlockMetadata(hAllocator),
7702  m_FreeCount(0),
7703  m_SumFreeSize(0),
7704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7706 {
7707 }
7708 
7709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7710 {
7711 }
7712 
7713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7714 {
7715  VmaBlockMetadata::Init(size);
7716 
7717  m_FreeCount = 1;
7718  m_SumFreeSize = size;
7719 
7720  VmaSuballocation suballoc = {};
7721  suballoc.offset = 0;
7722  suballoc.size = size;
7723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7724  suballoc.hAllocation = VK_NULL_HANDLE;
7725 
7726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7727  m_Suballocations.push_back(suballoc);
7728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7729  --suballocItem;
7730  m_FreeSuballocationsBySize.push_back(suballocItem);
7731 }
7732 
7733 bool VmaBlockMetadata_Generic::Validate() const
7734 {
7735  VMA_VALIDATE(!m_Suballocations.empty());
7736 
7737  // Expected offset of new suballocation as calculated from previous ones.
7738  VkDeviceSize calculatedOffset = 0;
7739  // Expected number of free suballocations as calculated from traversing their list.
7740  uint32_t calculatedFreeCount = 0;
7741  // Expected sum size of free suballocations as calculated from traversing their list.
7742  VkDeviceSize calculatedSumFreeSize = 0;
7743  // Expected number of free suballocations that should be registered in
7744  // m_FreeSuballocationsBySize calculated from traversing their list.
7745  size_t freeSuballocationsToRegister = 0;
7746  // True if previous visited suballocation was free.
7747  bool prevFree = false;
7748 
7749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7750  suballocItem != m_Suballocations.cend();
7751  ++suballocItem)
7752  {
7753  const VmaSuballocation& subAlloc = *suballocItem;
7754 
7755  // Actual offset of this suballocation doesn't match expected one.
7756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7757 
7758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7759  // Two adjacent free suballocations are invalid. They should be merged.
7760  VMA_VALIDATE(!prevFree || !currFree);
7761 
7762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7763 
7764  if(currFree)
7765  {
7766  calculatedSumFreeSize += subAlloc.size;
7767  ++calculatedFreeCount;
7768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7769  {
7770  ++freeSuballocationsToRegister;
7771  }
7772 
7773  // Margin required between allocations - every free space must be at least that large.
7774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7775  }
7776  else
7777  {
7778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7780 
7781  // Margin required between allocations - previous allocation must be free.
7782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7783  }
7784 
7785  calculatedOffset += subAlloc.size;
7786  prevFree = currFree;
7787  }
7788 
7789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7790  // match expected one.
7791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7792 
7793  VkDeviceSize lastSize = 0;
7794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7795  {
7796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7797 
7798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7800  // They must be sorted by size ascending.
7801  VMA_VALIDATE(suballocItem->size >= lastSize);
7802 
7803  lastSize = suballocItem->size;
7804  }
7805 
7806  // Check if totals match calculacted values.
7807  VMA_VALIDATE(ValidateFreeSuballocationList());
7808  VMA_VALIDATE(calculatedOffset == GetSize());
7809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7811 
7812  return true;
7813 }
7814 
7815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7816 {
7817  if(!m_FreeSuballocationsBySize.empty())
7818  {
7819  return m_FreeSuballocationsBySize.back()->size;
7820  }
7821  else
7822  {
7823  return 0;
7824  }
7825 }
7826 
7827 bool VmaBlockMetadata_Generic::IsEmpty() const
7828 {
7829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7830 }
7831 
7832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7833 {
7834  outInfo.blockCount = 1;
7835 
7836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7837  outInfo.allocationCount = rangeCount - m_FreeCount;
7838  outInfo.unusedRangeCount = m_FreeCount;
7839 
7840  outInfo.unusedBytes = m_SumFreeSize;
7841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7842 
7843  outInfo.allocationSizeMin = UINT64_MAX;
7844  outInfo.allocationSizeMax = 0;
7845  outInfo.unusedRangeSizeMin = UINT64_MAX;
7846  outInfo.unusedRangeSizeMax = 0;
7847 
7848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7849  suballocItem != m_Suballocations.cend();
7850  ++suballocItem)
7851  {
7852  const VmaSuballocation& suballoc = *suballocItem;
7853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7854  {
7855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7857  }
7858  else
7859  {
7860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7862  }
7863  }
7864 }
7865 
7866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7867 {
7868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7869 
7870  inoutStats.size += GetSize();
7871  inoutStats.unusedSize += m_SumFreeSize;
7872  inoutStats.allocationCount += rangeCount - m_FreeCount;
7873  inoutStats.unusedRangeCount += m_FreeCount;
7874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7875 }
7876 
7877 #if VMA_STATS_STRING_ENABLED
7878 
7879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7880 {
7881  PrintDetailedMap_Begin(json,
7882  m_SumFreeSize, // unusedBytes
7883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7884  m_FreeCount); // unusedRangeCount
7885 
7886  size_t i = 0;
7887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7888  suballocItem != m_Suballocations.cend();
7889  ++suballocItem, ++i)
7890  {
7891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7892  {
7893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7894  }
7895  else
7896  {
7897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7898  }
7899  }
7900 
7901  PrintDetailedMap_End(json);
7902 }
7903 
7904 #endif // #if VMA_STATS_STRING_ENABLED
7905 
7906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7907  uint32_t currentFrameIndex,
7908  uint32_t frameInUseCount,
7909  VkDeviceSize bufferImageGranularity,
7910  VkDeviceSize allocSize,
7911  VkDeviceSize allocAlignment,
7912  bool upperAddress,
7913  VmaSuballocationType allocType,
7914  bool canMakeOtherLost,
7915  uint32_t strategy,
7916  VmaAllocationRequest* pAllocationRequest)
7917 {
7918  VMA_ASSERT(allocSize > 0);
7919  VMA_ASSERT(!upperAddress);
7920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7922  VMA_HEAVY_ASSERT(Validate());
7923 
7924  pAllocationRequest->type = VmaAllocationRequestType::Normal;
7925 
7926  // There is not enough total free space in this block to fullfill the request: Early return.
7927  if(canMakeOtherLost == false &&
7928  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7929  {
7930  return false;
7931  }
7932 
7933  // New algorithm, efficiently searching freeSuballocationsBySize.
7934  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7935  if(freeSuballocCount > 0)
7936  {
7938  {
7939  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7940  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7941  m_FreeSuballocationsBySize.data(),
7942  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7943  allocSize + 2 * VMA_DEBUG_MARGIN,
7944  VmaSuballocationItemSizeLess());
7945  size_t index = it - m_FreeSuballocationsBySize.data();
7946  for(; index < freeSuballocCount; ++index)
7947  {
7948  if(CheckAllocation(
7949  currentFrameIndex,
7950  frameInUseCount,
7951  bufferImageGranularity,
7952  allocSize,
7953  allocAlignment,
7954  allocType,
7955  m_FreeSuballocationsBySize[index],
7956  false, // canMakeOtherLost
7957  &pAllocationRequest->offset,
7958  &pAllocationRequest->itemsToMakeLostCount,
7959  &pAllocationRequest->sumFreeSize,
7960  &pAllocationRequest->sumItemSize))
7961  {
7962  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7963  return true;
7964  }
7965  }
7966  }
7967  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7968  {
7969  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7970  it != m_Suballocations.end();
7971  ++it)
7972  {
7973  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7974  currentFrameIndex,
7975  frameInUseCount,
7976  bufferImageGranularity,
7977  allocSize,
7978  allocAlignment,
7979  allocType,
7980  it,
7981  false, // canMakeOtherLost
7982  &pAllocationRequest->offset,
7983  &pAllocationRequest->itemsToMakeLostCount,
7984  &pAllocationRequest->sumFreeSize,
7985  &pAllocationRequest->sumItemSize))
7986  {
7987  pAllocationRequest->item = it;
7988  return true;
7989  }
7990  }
7991  }
7992  else // WORST_FIT, FIRST_FIT
7993  {
7994  // Search staring from biggest suballocations.
7995  for(size_t index = freeSuballocCount; index--; )
7996  {
7997  if(CheckAllocation(
7998  currentFrameIndex,
7999  frameInUseCount,
8000  bufferImageGranularity,
8001  allocSize,
8002  allocAlignment,
8003  allocType,
8004  m_FreeSuballocationsBySize[index],
8005  false, // canMakeOtherLost
8006  &pAllocationRequest->offset,
8007  &pAllocationRequest->itemsToMakeLostCount,
8008  &pAllocationRequest->sumFreeSize,
8009  &pAllocationRequest->sumItemSize))
8010  {
8011  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8012  return true;
8013  }
8014  }
8015  }
8016  }
8017 
8018  if(canMakeOtherLost)
8019  {
8020  // Brute-force algorithm. TODO: Come up with something better.
8021 
8022  bool found = false;
8023  VmaAllocationRequest tmpAllocRequest = {};
8024  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8025  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8026  suballocIt != m_Suballocations.end();
8027  ++suballocIt)
8028  {
8029  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8030  suballocIt->hAllocation->CanBecomeLost())
8031  {
8032  if(CheckAllocation(
8033  currentFrameIndex,
8034  frameInUseCount,
8035  bufferImageGranularity,
8036  allocSize,
8037  allocAlignment,
8038  allocType,
8039  suballocIt,
8040  canMakeOtherLost,
8041  &tmpAllocRequest.offset,
8042  &tmpAllocRequest.itemsToMakeLostCount,
8043  &tmpAllocRequest.sumFreeSize,
8044  &tmpAllocRequest.sumItemSize))
8045  {
8047  {
8048  *pAllocationRequest = tmpAllocRequest;
8049  pAllocationRequest->item = suballocIt;
8050  break;
8051  }
8052  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8053  {
8054  *pAllocationRequest = tmpAllocRequest;
8055  pAllocationRequest->item = suballocIt;
8056  found = true;
8057  }
8058  }
8059  }
8060  }
8061 
8062  return found;
8063  }
8064 
8065  return false;
8066 }
8067 
8068 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8069  uint32_t currentFrameIndex,
8070  uint32_t frameInUseCount,
8071  VmaAllocationRequest* pAllocationRequest)
8072 {
8073  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8074 
8075  while(pAllocationRequest->itemsToMakeLostCount > 0)
8076  {
8077  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8078  {
8079  ++pAllocationRequest->item;
8080  }
8081  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8082  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8083  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8084  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8085  {
8086  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8087  --pAllocationRequest->itemsToMakeLostCount;
8088  }
8089  else
8090  {
8091  return false;
8092  }
8093  }
8094 
8095  VMA_HEAVY_ASSERT(Validate());
8096  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8097  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8098 
8099  return true;
8100 }
8101 
8102 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8103 {
8104  uint32_t lostAllocationCount = 0;
8105  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8106  it != m_Suballocations.end();
8107  ++it)
8108  {
8109  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8110  it->hAllocation->CanBecomeLost() &&
8111  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8112  {
8113  it = FreeSuballocation(it);
8114  ++lostAllocationCount;
8115  }
8116  }
8117  return lostAllocationCount;
8118 }
8119 
8120 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8121 {
8122  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8123  it != m_Suballocations.end();
8124  ++it)
8125  {
8126  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8127  {
8128  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8129  {
8130  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8131  return VK_ERROR_VALIDATION_FAILED_EXT;
8132  }
8133  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8134  {
8135  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8136  return VK_ERROR_VALIDATION_FAILED_EXT;
8137  }
8138  }
8139  }
8140 
8141  return VK_SUCCESS;
8142 }
8143 
8144 void VmaBlockMetadata_Generic::Alloc(
8145  const VmaAllocationRequest& request,
8146  VmaSuballocationType type,
8147  VkDeviceSize allocSize,
8148  VmaAllocation hAllocation)
8149 {
8150  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8151  VMA_ASSERT(request.item != m_Suballocations.end());
8152  VmaSuballocation& suballoc = *request.item;
8153  // Given suballocation is a free block.
8154  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8155  // Given offset is inside this suballocation.
8156  VMA_ASSERT(request.offset >= suballoc.offset);
8157  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8158  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8159  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8160 
8161  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8162  // it to become used.
8163  UnregisterFreeSuballocation(request.item);
8164 
8165  suballoc.offset = request.offset;
8166  suballoc.size = allocSize;
8167  suballoc.type = type;
8168  suballoc.hAllocation = hAllocation;
8169 
8170  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8171  if(paddingEnd)
8172  {
8173  VmaSuballocation paddingSuballoc = {};
8174  paddingSuballoc.offset = request.offset + allocSize;
8175  paddingSuballoc.size = paddingEnd;
8176  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8177  VmaSuballocationList::iterator next = request.item;
8178  ++next;
8179  const VmaSuballocationList::iterator paddingEndItem =
8180  m_Suballocations.insert(next, paddingSuballoc);
8181  RegisterFreeSuballocation(paddingEndItem);
8182  }
8183 
8184  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8185  if(paddingBegin)
8186  {
8187  VmaSuballocation paddingSuballoc = {};
8188  paddingSuballoc.offset = request.offset - paddingBegin;
8189  paddingSuballoc.size = paddingBegin;
8190  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8191  const VmaSuballocationList::iterator paddingBeginItem =
8192  m_Suballocations.insert(request.item, paddingSuballoc);
8193  RegisterFreeSuballocation(paddingBeginItem);
8194  }
8195 
8196  // Update totals.
8197  m_FreeCount = m_FreeCount - 1;
8198  if(paddingBegin > 0)
8199  {
8200  ++m_FreeCount;
8201  }
8202  if(paddingEnd > 0)
8203  {
8204  ++m_FreeCount;
8205  }
8206  m_SumFreeSize -= allocSize;
8207 }
8208 
8209 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8210 {
8211  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8212  suballocItem != m_Suballocations.end();
8213  ++suballocItem)
8214  {
8215  VmaSuballocation& suballoc = *suballocItem;
8216  if(suballoc.hAllocation == allocation)
8217  {
8218  FreeSuballocation(suballocItem);
8219  VMA_HEAVY_ASSERT(Validate());
8220  return;
8221  }
8222  }
8223  VMA_ASSERT(0 && "Not found!");
8224 }
8225 
8226 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8227 {
8228  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8229  suballocItem != m_Suballocations.end();
8230  ++suballocItem)
8231  {
8232  VmaSuballocation& suballoc = *suballocItem;
8233  if(suballoc.offset == offset)
8234  {
8235  FreeSuballocation(suballocItem);
8236  return;
8237  }
8238  }
8239  VMA_ASSERT(0 && "Not found!");
8240 }
8241 
8242 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8243 {
8244  typedef VmaSuballocationList::iterator iter_type;
8245  for(iter_type suballocItem = m_Suballocations.begin();
8246  suballocItem != m_Suballocations.end();
8247  ++suballocItem)
8248  {
8249  VmaSuballocation& suballoc = *suballocItem;
8250  if(suballoc.hAllocation == alloc)
8251  {
8252  iter_type nextItem = suballocItem;
8253  ++nextItem;
8254 
8255  // Should have been ensured on higher level.
8256  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8257 
8258  // Shrinking.
8259  if(newSize < alloc->GetSize())
8260  {
8261  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8262 
8263  // There is next item.
8264  if(nextItem != m_Suballocations.end())
8265  {
8266  // Next item is free.
8267  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8268  {
8269  // Grow this next item backward.
8270  UnregisterFreeSuballocation(nextItem);
8271  nextItem->offset -= sizeDiff;
8272  nextItem->size += sizeDiff;
8273  RegisterFreeSuballocation(nextItem);
8274  }
8275  // Next item is not free.
8276  else
8277  {
8278  // Create free item after current one.
8279  VmaSuballocation newFreeSuballoc;
8280  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8281  newFreeSuballoc.offset = suballoc.offset + newSize;
8282  newFreeSuballoc.size = sizeDiff;
8283  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8284  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8285  RegisterFreeSuballocation(newFreeSuballocIt);
8286 
8287  ++m_FreeCount;
8288  }
8289  }
8290  // This is the last item.
8291  else
8292  {
8293  // Create free item at the end.
8294  VmaSuballocation newFreeSuballoc;
8295  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8296  newFreeSuballoc.offset = suballoc.offset + newSize;
8297  newFreeSuballoc.size = sizeDiff;
8298  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8299  m_Suballocations.push_back(newFreeSuballoc);
8300 
8301  iter_type newFreeSuballocIt = m_Suballocations.end();
8302  RegisterFreeSuballocation(--newFreeSuballocIt);
8303 
8304  ++m_FreeCount;
8305  }
8306 
8307  suballoc.size = newSize;
8308  m_SumFreeSize += sizeDiff;
8309  }
8310  // Growing.
8311  else
8312  {
8313  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8314 
8315  // There is next item.
8316  if(nextItem != m_Suballocations.end())
8317  {
8318  // Next item is free.
8319  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8320  {
8321  // There is not enough free space, including margin.
8322  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8323  {
8324  return false;
8325  }
8326 
8327  // There is more free space than required.
8328  if(nextItem->size > sizeDiff)
8329  {
8330  // Move and shrink this next item.
8331  UnregisterFreeSuballocation(nextItem);
8332  nextItem->offset += sizeDiff;
8333  nextItem->size -= sizeDiff;
8334  RegisterFreeSuballocation(nextItem);
8335  }
8336  // There is exactly the amount of free space required.
8337  else
8338  {
8339  // Remove this next free item.
8340  UnregisterFreeSuballocation(nextItem);
8341  m_Suballocations.erase(nextItem);
8342  --m_FreeCount;
8343  }
8344  }
8345  // Next item is not free - there is no space to grow.
8346  else
8347  {
8348  return false;
8349  }
8350  }
8351  // This is the last item - there is no space to grow.
8352  else
8353  {
8354  return false;
8355  }
8356 
8357  suballoc.size = newSize;
8358  m_SumFreeSize -= sizeDiff;
8359  }
8360 
8361  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8362  return true;
8363  }
8364  }
8365  VMA_ASSERT(0 && "Not found!");
8366  return false;
8367 }
8368 
8369 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8370 {
8371  VkDeviceSize lastSize = 0;
8372  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8373  {
8374  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8375 
8376  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8377  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8378  VMA_VALIDATE(it->size >= lastSize);
8379  lastSize = it->size;
8380  }
8381  return true;
8382 }
8383 
8384 bool VmaBlockMetadata_Generic::CheckAllocation(
8385  uint32_t currentFrameIndex,
8386  uint32_t frameInUseCount,
8387  VkDeviceSize bufferImageGranularity,
8388  VkDeviceSize allocSize,
8389  VkDeviceSize allocAlignment,
8390  VmaSuballocationType allocType,
8391  VmaSuballocationList::const_iterator suballocItem,
8392  bool canMakeOtherLost,
8393  VkDeviceSize* pOffset,
8394  size_t* itemsToMakeLostCount,
8395  VkDeviceSize* pSumFreeSize,
8396  VkDeviceSize* pSumItemSize) const
8397 {
8398  VMA_ASSERT(allocSize > 0);
8399  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8400  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8401  VMA_ASSERT(pOffset != VMA_NULL);
8402 
8403  *itemsToMakeLostCount = 0;
8404  *pSumFreeSize = 0;
8405  *pSumItemSize = 0;
8406 
8407  if(canMakeOtherLost)
8408  {
8409  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8410  {
8411  *pSumFreeSize = suballocItem->size;
8412  }
8413  else
8414  {
8415  if(suballocItem->hAllocation->CanBecomeLost() &&
8416  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8417  {
8418  ++*itemsToMakeLostCount;
8419  *pSumItemSize = suballocItem->size;
8420  }
8421  else
8422  {
8423  return false;
8424  }
8425  }
8426 
8427  // Remaining size is too small for this request: Early return.
8428  if(GetSize() - suballocItem->offset < allocSize)
8429  {
8430  return false;
8431  }
8432 
8433  // Start from offset equal to beginning of this suballocation.
8434  *pOffset = suballocItem->offset;
8435 
8436  // Apply VMA_DEBUG_MARGIN at the beginning.
8437  if(VMA_DEBUG_MARGIN > 0)
8438  {
8439  *pOffset += VMA_DEBUG_MARGIN;
8440  }
8441 
8442  // Apply alignment.
8443  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8444 
8445  // Check previous suballocations for BufferImageGranularity conflicts.
8446  // Make bigger alignment if necessary.
8447  if(bufferImageGranularity > 1)
8448  {
8449  bool bufferImageGranularityConflict = false;
8450  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8451  while(prevSuballocItem != m_Suballocations.cbegin())
8452  {
8453  --prevSuballocItem;
8454  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8455  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8456  {
8457  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8458  {
8459  bufferImageGranularityConflict = true;
8460  break;
8461  }
8462  }
8463  else
8464  // Already on previous page.
8465  break;
8466  }
8467  if(bufferImageGranularityConflict)
8468  {
8469  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8470  }
8471  }
8472 
8473  // Now that we have final *pOffset, check if we are past suballocItem.
8474  // If yes, return false - this function should be called for another suballocItem as starting point.
8475  if(*pOffset >= suballocItem->offset + suballocItem->size)
8476  {
8477  return false;
8478  }
8479 
8480  // Calculate padding at the beginning based on current offset.
8481  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8482 
8483  // Calculate required margin at the end.
8484  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8485 
8486  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8487  // Another early return check.
8488  if(suballocItem->offset + totalSize > GetSize())
8489  {
8490  return false;
8491  }
8492 
8493  // Advance lastSuballocItem until desired size is reached.
8494  // Update itemsToMakeLostCount.
8495  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8496  if(totalSize > suballocItem->size)
8497  {
8498  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8499  while(remainingSize > 0)
8500  {
8501  ++lastSuballocItem;
8502  if(lastSuballocItem == m_Suballocations.cend())
8503  {
8504  return false;
8505  }
8506  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8507  {
8508  *pSumFreeSize += lastSuballocItem->size;
8509  }
8510  else
8511  {
8512  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8513  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8514  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8515  {
8516  ++*itemsToMakeLostCount;
8517  *pSumItemSize += lastSuballocItem->size;
8518  }
8519  else
8520  {
8521  return false;
8522  }
8523  }
8524  remainingSize = (lastSuballocItem->size < remainingSize) ?
8525  remainingSize - lastSuballocItem->size : 0;
8526  }
8527  }
8528 
8529  // Check next suballocations for BufferImageGranularity conflicts.
8530  // If conflict exists, we must mark more allocations lost or fail.
8531  if(bufferImageGranularity > 1)
8532  {
8533  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8534  ++nextSuballocItem;
8535  while(nextSuballocItem != m_Suballocations.cend())
8536  {
8537  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8538  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8539  {
8540  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8541  {
8542  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8543  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8544  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8545  {
8546  ++*itemsToMakeLostCount;
8547  }
8548  else
8549  {
8550  return false;
8551  }
8552  }
8553  }
8554  else
8555  {
8556  // Already on next page.
8557  break;
8558  }
8559  ++nextSuballocItem;
8560  }
8561  }
8562  }
8563  else
8564  {
8565  const VmaSuballocation& suballoc = *suballocItem;
8566  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8567 
8568  *pSumFreeSize = suballoc.size;
8569 
8570  // Size of this suballocation is too small for this request: Early return.
8571  if(suballoc.size < allocSize)
8572  {
8573  return false;
8574  }
8575 
8576  // Start from offset equal to beginning of this suballocation.
8577  *pOffset = suballoc.offset;
8578 
8579  // Apply VMA_DEBUG_MARGIN at the beginning.
8580  if(VMA_DEBUG_MARGIN > 0)
8581  {
8582  *pOffset += VMA_DEBUG_MARGIN;
8583  }
8584 
8585  // Apply alignment.
8586  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8587 
8588  // Check previous suballocations for BufferImageGranularity conflicts.
8589  // Make bigger alignment if necessary.
8590  if(bufferImageGranularity > 1)
8591  {
8592  bool bufferImageGranularityConflict = false;
8593  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8594  while(prevSuballocItem != m_Suballocations.cbegin())
8595  {
8596  --prevSuballocItem;
8597  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8598  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8599  {
8600  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8601  {
8602  bufferImageGranularityConflict = true;
8603  break;
8604  }
8605  }
8606  else
8607  // Already on previous page.
8608  break;
8609  }
8610  if(bufferImageGranularityConflict)
8611  {
8612  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8613  }
8614  }
8615 
8616  // Calculate padding at the beginning based on current offset.
8617  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8618 
8619  // Calculate required margin at the end.
8620  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8621 
8622  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8623  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8624  {
8625  return false;
8626  }
8627 
8628  // Check next suballocations for BufferImageGranularity conflicts.
8629  // If conflict exists, allocation cannot be made here.
8630  if(bufferImageGranularity > 1)
8631  {
8632  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8633  ++nextSuballocItem;
8634  while(nextSuballocItem != m_Suballocations.cend())
8635  {
8636  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8637  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8638  {
8639  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8640  {
8641  return false;
8642  }
8643  }
8644  else
8645  {
8646  // Already on next page.
8647  break;
8648  }
8649  ++nextSuballocItem;
8650  }
8651  }
8652  }
8653 
8654  // All tests passed: Success. pOffset is already filled.
8655  return true;
8656 }
8657 
8658 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8659 {
8660  VMA_ASSERT(item != m_Suballocations.end());
8661  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8662 
8663  VmaSuballocationList::iterator nextItem = item;
8664  ++nextItem;
8665  VMA_ASSERT(nextItem != m_Suballocations.end());
8666  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8667 
8668  item->size += nextItem->size;
8669  --m_FreeCount;
8670  m_Suballocations.erase(nextItem);
8671 }
8672 
8673 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8674 {
8675  // Change this suballocation to be marked as free.
8676  VmaSuballocation& suballoc = *suballocItem;
8677  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8678  suballoc.hAllocation = VK_NULL_HANDLE;
8679 
8680  // Update totals.
8681  ++m_FreeCount;
8682  m_SumFreeSize += suballoc.size;
8683 
8684  // Merge with previous and/or next suballocation if it's also free.
8685  bool mergeWithNext = false;
8686  bool mergeWithPrev = false;
8687 
8688  VmaSuballocationList::iterator nextItem = suballocItem;
8689  ++nextItem;
8690  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8691  {
8692  mergeWithNext = true;
8693  }
8694 
8695  VmaSuballocationList::iterator prevItem = suballocItem;
8696  if(suballocItem != m_Suballocations.begin())
8697  {
8698  --prevItem;
8699  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8700  {
8701  mergeWithPrev = true;
8702  }
8703  }
8704 
8705  if(mergeWithNext)
8706  {
8707  UnregisterFreeSuballocation(nextItem);
8708  MergeFreeWithNext(suballocItem);
8709  }
8710 
8711  if(mergeWithPrev)
8712  {
8713  UnregisterFreeSuballocation(prevItem);
8714  MergeFreeWithNext(prevItem);
8715  RegisterFreeSuballocation(prevItem);
8716  return prevItem;
8717  }
8718  else
8719  {
8720  RegisterFreeSuballocation(suballocItem);
8721  return suballocItem;
8722  }
8723 }
8724 
8725 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8726 {
8727  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8728  VMA_ASSERT(item->size > 0);
8729 
8730  // You may want to enable this validation at the beginning or at the end of
8731  // this function, depending on what do you want to check.
8732  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8733 
8734  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8735  {
8736  if(m_FreeSuballocationsBySize.empty())
8737  {
8738  m_FreeSuballocationsBySize.push_back(item);
8739  }
8740  else
8741  {
8742  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8743  }
8744  }
8745 
8746  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8747 }
8748 
8749 
8750 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8751 {
8752  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8753  VMA_ASSERT(item->size > 0);
8754 
8755  // You may want to enable this validation at the beginning or at the end of
8756  // this function, depending on what do you want to check.
8757  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8758 
8759  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8760  {
8761  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8762  m_FreeSuballocationsBySize.data(),
8763  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8764  item,
8765  VmaSuballocationItemSizeLess());
8766  for(size_t index = it - m_FreeSuballocationsBySize.data();
8767  index < m_FreeSuballocationsBySize.size();
8768  ++index)
8769  {
8770  if(m_FreeSuballocationsBySize[index] == item)
8771  {
8772  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8773  return;
8774  }
8775  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8776  }
8777  VMA_ASSERT(0 && "Not found.");
8778  }
8779 
8780  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8781 }
8782 
8783 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8784  VkDeviceSize bufferImageGranularity,
8785  VmaSuballocationType& inOutPrevSuballocType) const
8786 {
8787  if(bufferImageGranularity == 1 || IsEmpty())
8788  {
8789  return false;
8790  }
8791 
8792  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8793  bool typeConflictFound = false;
8794  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8795  it != m_Suballocations.cend();
8796  ++it)
8797  {
8798  const VmaSuballocationType suballocType = it->type;
8799  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8800  {
8801  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8802  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8803  {
8804  typeConflictFound = true;
8805  }
8806  inOutPrevSuballocType = suballocType;
8807  }
8808  }
8809 
8810  return typeConflictFound || minAlignment >= bufferImageGranularity;
8811 }
8812 
8814 // class VmaBlockMetadata_Linear
8815 
8816 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8817  VmaBlockMetadata(hAllocator),
8818  m_SumFreeSize(0),
8819  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8820  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8821  m_1stVectorIndex(0),
8822  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8823  m_1stNullItemsBeginCount(0),
8824  m_1stNullItemsMiddleCount(0),
8825  m_2ndNullItemsCount(0)
8826 {
8827 }
8828 
8829 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8830 {
8831 }
8832 
8833 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8834 {
8835  VmaBlockMetadata::Init(size);
8836  m_SumFreeSize = size;
8837 }
8838 
8839 bool VmaBlockMetadata_Linear::Validate() const
8840 {
8841  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8842  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8843 
8844  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8845  VMA_VALIDATE(!suballocations1st.empty() ||
8846  suballocations2nd.empty() ||
8847  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8848 
8849  if(!suballocations1st.empty())
8850  {
8851  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8852  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8853  // Null item at the end should be just pop_back().
8854  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8855  }
8856  if(!suballocations2nd.empty())
8857  {
8858  // Null item at the end should be just pop_back().
8859  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8860  }
8861 
8862  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8863  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8864 
8865  VkDeviceSize sumUsedSize = 0;
8866  const size_t suballoc1stCount = suballocations1st.size();
8867  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8868 
8869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8870  {
8871  const size_t suballoc2ndCount = suballocations2nd.size();
8872  size_t nullItem2ndCount = 0;
8873  for(size_t i = 0; i < suballoc2ndCount; ++i)
8874  {
8875  const VmaSuballocation& suballoc = suballocations2nd[i];
8876  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8877 
8878  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8879  VMA_VALIDATE(suballoc.offset >= offset);
8880 
8881  if(!currFree)
8882  {
8883  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8884  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8885  sumUsedSize += suballoc.size;
8886  }
8887  else
8888  {
8889  ++nullItem2ndCount;
8890  }
8891 
8892  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8893  }
8894 
8895  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8896  }
8897 
8898  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8899  {
8900  const VmaSuballocation& suballoc = suballocations1st[i];
8901  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8902  suballoc.hAllocation == VK_NULL_HANDLE);
8903  }
8904 
8905  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8906 
8907  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8908  {
8909  const VmaSuballocation& suballoc = suballocations1st[i];
8910  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8911 
8912  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8913  VMA_VALIDATE(suballoc.offset >= offset);
8914  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8915 
8916  if(!currFree)
8917  {
8918  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8919  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8920  sumUsedSize += suballoc.size;
8921  }
8922  else
8923  {
8924  ++nullItem1stCount;
8925  }
8926 
8927  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8928  }
8929  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8930 
8931  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8932  {
8933  const size_t suballoc2ndCount = suballocations2nd.size();
8934  size_t nullItem2ndCount = 0;
8935  for(size_t i = suballoc2ndCount; i--; )
8936  {
8937  const VmaSuballocation& suballoc = suballocations2nd[i];
8938  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8939 
8940  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8941  VMA_VALIDATE(suballoc.offset >= offset);
8942 
8943  if(!currFree)
8944  {
8945  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8946  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8947  sumUsedSize += suballoc.size;
8948  }
8949  else
8950  {
8951  ++nullItem2ndCount;
8952  }
8953 
8954  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8955  }
8956 
8957  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8958  }
8959 
8960  VMA_VALIDATE(offset <= GetSize());
8961  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8962 
8963  return true;
8964 }
8965 
8966 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8967 {
8968  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8969  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8970 }
8971 
8972 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8973 {
8974  const VkDeviceSize size = GetSize();
8975 
8976  /*
8977  We don't consider gaps inside allocation vectors with freed allocations because
8978  they are not suitable for reuse in linear allocator. We consider only space that
8979  is available for new allocations.
8980  */
8981  if(IsEmpty())
8982  {
8983  return size;
8984  }
8985 
8986  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8987 
8988  switch(m_2ndVectorMode)
8989  {
8990  case SECOND_VECTOR_EMPTY:
8991  /*
8992  Available space is after end of 1st, as well as before beginning of 1st (which
8993  whould make it a ring buffer).
8994  */
8995  {
8996  const size_t suballocations1stCount = suballocations1st.size();
8997  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8998  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8999  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9000  return VMA_MAX(
9001  firstSuballoc.offset,
9002  size - (lastSuballoc.offset + lastSuballoc.size));
9003  }
9004  break;
9005 
9006  case SECOND_VECTOR_RING_BUFFER:
9007  /*
9008  Available space is only between end of 2nd and beginning of 1st.
9009  */
9010  {
9011  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9012  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9013  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9014  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9015  }
9016  break;
9017 
9018  case SECOND_VECTOR_DOUBLE_STACK:
9019  /*
9020  Available space is only between end of 1st and top of 2nd.
9021  */
9022  {
9023  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9024  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9025  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9026  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9027  }
9028  break;
9029 
9030  default:
9031  VMA_ASSERT(0);
9032  return 0;
9033  }
9034 }
9035 
9036 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9037 {
9038  const VkDeviceSize size = GetSize();
9039  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9040  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9041  const size_t suballoc1stCount = suballocations1st.size();
9042  const size_t suballoc2ndCount = suballocations2nd.size();
9043 
9044  outInfo.blockCount = 1;
9045  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9046  outInfo.unusedRangeCount = 0;
9047  outInfo.usedBytes = 0;
9048  outInfo.allocationSizeMin = UINT64_MAX;
9049  outInfo.allocationSizeMax = 0;
9050  outInfo.unusedRangeSizeMin = UINT64_MAX;
9051  outInfo.unusedRangeSizeMax = 0;
9052 
9053  VkDeviceSize lastOffset = 0;
9054 
9055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9056  {
9057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9058  size_t nextAlloc2ndIndex = 0;
9059  while(lastOffset < freeSpace2ndTo1stEnd)
9060  {
9061  // Find next non-null allocation or move nextAllocIndex to the end.
9062  while(nextAlloc2ndIndex < suballoc2ndCount &&
9063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9064  {
9065  ++nextAlloc2ndIndex;
9066  }
9067 
9068  // Found non-null allocation.
9069  if(nextAlloc2ndIndex < suballoc2ndCount)
9070  {
9071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9072 
9073  // 1. Process free space before this allocation.
9074  if(lastOffset < suballoc.offset)
9075  {
9076  // There is free space from lastOffset to suballoc.offset.
9077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9078  ++outInfo.unusedRangeCount;
9079  outInfo.unusedBytes += unusedRangeSize;
9080  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9081  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9082  }
9083 
9084  // 2. Process this allocation.
9085  // There is allocation with suballoc.offset, suballoc.size.
9086  outInfo.usedBytes += suballoc.size;
9087  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9088  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9089 
9090  // 3. Prepare for next iteration.
9091  lastOffset = suballoc.offset + suballoc.size;
9092  ++nextAlloc2ndIndex;
9093  }
9094  // We are at the end.
9095  else
9096  {
9097  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9098  if(lastOffset < freeSpace2ndTo1stEnd)
9099  {
9100  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9101  ++outInfo.unusedRangeCount;
9102  outInfo.unusedBytes += unusedRangeSize;
9103  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9104  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9105  }
9106 
9107  // End of loop.
9108  lastOffset = freeSpace2ndTo1stEnd;
9109  }
9110  }
9111  }
9112 
9113  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9114  const VkDeviceSize freeSpace1stTo2ndEnd =
9115  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9116  while(lastOffset < freeSpace1stTo2ndEnd)
9117  {
9118  // Find next non-null allocation or move nextAllocIndex to the end.
9119  while(nextAlloc1stIndex < suballoc1stCount &&
9120  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9121  {
9122  ++nextAlloc1stIndex;
9123  }
9124 
9125  // Found non-null allocation.
9126  if(nextAlloc1stIndex < suballoc1stCount)
9127  {
9128  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9129 
9130  // 1. Process free space before this allocation.
9131  if(lastOffset < suballoc.offset)
9132  {
9133  // There is free space from lastOffset to suballoc.offset.
9134  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9135  ++outInfo.unusedRangeCount;
9136  outInfo.unusedBytes += unusedRangeSize;
9137  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9138  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9139  }
9140 
9141  // 2. Process this allocation.
9142  // There is allocation with suballoc.offset, suballoc.size.
9143  outInfo.usedBytes += suballoc.size;
9144  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9145  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9146 
9147  // 3. Prepare for next iteration.
9148  lastOffset = suballoc.offset + suballoc.size;
9149  ++nextAlloc1stIndex;
9150  }
9151  // We are at the end.
9152  else
9153  {
9154  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9155  if(lastOffset < freeSpace1stTo2ndEnd)
9156  {
9157  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9158  ++outInfo.unusedRangeCount;
9159  outInfo.unusedBytes += unusedRangeSize;
9160  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9161  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9162  }
9163 
9164  // End of loop.
9165  lastOffset = freeSpace1stTo2ndEnd;
9166  }
9167  }
9168 
9169  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9170  {
9171  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9172  while(lastOffset < size)
9173  {
9174  // Find next non-null allocation or move nextAllocIndex to the end.
9175  while(nextAlloc2ndIndex != SIZE_MAX &&
9176  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9177  {
9178  --nextAlloc2ndIndex;
9179  }
9180 
9181  // Found non-null allocation.
9182  if(nextAlloc2ndIndex != SIZE_MAX)
9183  {
9184  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9185 
9186  // 1. Process free space before this allocation.
9187  if(lastOffset < suballoc.offset)
9188  {
9189  // There is free space from lastOffset to suballoc.offset.
9190  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9191  ++outInfo.unusedRangeCount;
9192  outInfo.unusedBytes += unusedRangeSize;
9193  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9194  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9195  }
9196 
9197  // 2. Process this allocation.
9198  // There is allocation with suballoc.offset, suballoc.size.
9199  outInfo.usedBytes += suballoc.size;
9200  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9201  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9202 
9203  // 3. Prepare for next iteration.
9204  lastOffset = suballoc.offset + suballoc.size;
9205  --nextAlloc2ndIndex;
9206  }
9207  // We are at the end.
9208  else
9209  {
9210  // There is free space from lastOffset to size.
9211  if(lastOffset < size)
9212  {
9213  const VkDeviceSize unusedRangeSize = size - lastOffset;
9214  ++outInfo.unusedRangeCount;
9215  outInfo.unusedBytes += unusedRangeSize;
9216  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9217  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9218  }
9219 
9220  // End of loop.
9221  lastOffset = size;
9222  }
9223  }
9224  }
9225 
9226  outInfo.unusedBytes = size - outInfo.usedBytes;
9227 }
9228 
9229 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9230 {
9231  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9232  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9233  const VkDeviceSize size = GetSize();
9234  const size_t suballoc1stCount = suballocations1st.size();
9235  const size_t suballoc2ndCount = suballocations2nd.size();
9236 
9237  inoutStats.size += size;
9238 
9239  VkDeviceSize lastOffset = 0;
9240 
9241  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9242  {
9243  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9244  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9245  while(lastOffset < freeSpace2ndTo1stEnd)
9246  {
9247  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9248  while(nextAlloc2ndIndex < suballoc2ndCount &&
9249  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9250  {
9251  ++nextAlloc2ndIndex;
9252  }
9253 
9254  // Found non-null allocation.
9255  if(nextAlloc2ndIndex < suballoc2ndCount)
9256  {
9257  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9258 
9259  // 1. Process free space before this allocation.
9260  if(lastOffset < suballoc.offset)
9261  {
9262  // There is free space from lastOffset to suballoc.offset.
9263  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9264  inoutStats.unusedSize += unusedRangeSize;
9265  ++inoutStats.unusedRangeCount;
9266  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9267  }
9268 
9269  // 2. Process this allocation.
9270  // There is allocation with suballoc.offset, suballoc.size.
9271  ++inoutStats.allocationCount;
9272 
9273  // 3. Prepare for next iteration.
9274  lastOffset = suballoc.offset + suballoc.size;
9275  ++nextAlloc2ndIndex;
9276  }
9277  // We are at the end.
9278  else
9279  {
9280  if(lastOffset < freeSpace2ndTo1stEnd)
9281  {
9282  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9283  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9284  inoutStats.unusedSize += unusedRangeSize;
9285  ++inoutStats.unusedRangeCount;
9286  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9287  }
9288 
9289  // End of loop.
9290  lastOffset = freeSpace2ndTo1stEnd;
9291  }
9292  }
9293  }
9294 
9295  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9296  const VkDeviceSize freeSpace1stTo2ndEnd =
9297  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9298  while(lastOffset < freeSpace1stTo2ndEnd)
9299  {
9300  // Find next non-null allocation or move nextAllocIndex to the end.
9301  while(nextAlloc1stIndex < suballoc1stCount &&
9302  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9303  {
9304  ++nextAlloc1stIndex;
9305  }
9306 
9307  // Found non-null allocation.
9308  if(nextAlloc1stIndex < suballoc1stCount)
9309  {
9310  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9311 
9312  // 1. Process free space before this allocation.
9313  if(lastOffset < suballoc.offset)
9314  {
9315  // There is free space from lastOffset to suballoc.offset.
9316  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9317  inoutStats.unusedSize += unusedRangeSize;
9318  ++inoutStats.unusedRangeCount;
9319  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9320  }
9321 
9322  // 2. Process this allocation.
9323  // There is allocation with suballoc.offset, suballoc.size.
9324  ++inoutStats.allocationCount;
9325 
9326  // 3. Prepare for next iteration.
9327  lastOffset = suballoc.offset + suballoc.size;
9328  ++nextAlloc1stIndex;
9329  }
9330  // We are at the end.
9331  else
9332  {
9333  if(lastOffset < freeSpace1stTo2ndEnd)
9334  {
9335  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9336  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9337  inoutStats.unusedSize += unusedRangeSize;
9338  ++inoutStats.unusedRangeCount;
9339  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9340  }
9341 
9342  // End of loop.
9343  lastOffset = freeSpace1stTo2ndEnd;
9344  }
9345  }
9346 
9347  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9348  {
9349  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9350  while(lastOffset < size)
9351  {
9352  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9353  while(nextAlloc2ndIndex != SIZE_MAX &&
9354  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9355  {
9356  --nextAlloc2ndIndex;
9357  }
9358 
9359  // Found non-null allocation.
9360  if(nextAlloc2ndIndex != SIZE_MAX)
9361  {
9362  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9363 
9364  // 1. Process free space before this allocation.
9365  if(lastOffset < suballoc.offset)
9366  {
9367  // There is free space from lastOffset to suballoc.offset.
9368  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9369  inoutStats.unusedSize += unusedRangeSize;
9370  ++inoutStats.unusedRangeCount;
9371  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9372  }
9373 
9374  // 2. Process this allocation.
9375  // There is allocation with suballoc.offset, suballoc.size.
9376  ++inoutStats.allocationCount;
9377 
9378  // 3. Prepare for next iteration.
9379  lastOffset = suballoc.offset + suballoc.size;
9380  --nextAlloc2ndIndex;
9381  }
9382  // We are at the end.
9383  else
9384  {
9385  if(lastOffset < size)
9386  {
9387  // There is free space from lastOffset to size.
9388  const VkDeviceSize unusedRangeSize = size - lastOffset;
9389  inoutStats.unusedSize += unusedRangeSize;
9390  ++inoutStats.unusedRangeCount;
9391  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9392  }
9393 
9394  // End of loop.
9395  lastOffset = size;
9396  }
9397  }
9398  }
9399 }
9400 
9401 #if VMA_STATS_STRING_ENABLED
9402 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9403 {
9404  const VkDeviceSize size = GetSize();
9405  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9406  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9407  const size_t suballoc1stCount = suballocations1st.size();
9408  const size_t suballoc2ndCount = suballocations2nd.size();
9409 
9410  // FIRST PASS
9411 
9412  size_t unusedRangeCount = 0;
9413  VkDeviceSize usedBytes = 0;
9414 
9415  VkDeviceSize lastOffset = 0;
9416 
9417  size_t alloc2ndCount = 0;
9418  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9419  {
9420  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9421  size_t nextAlloc2ndIndex = 0;
9422  while(lastOffset < freeSpace2ndTo1stEnd)
9423  {
9424  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9425  while(nextAlloc2ndIndex < suballoc2ndCount &&
9426  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9427  {
9428  ++nextAlloc2ndIndex;
9429  }
9430 
9431  // Found non-null allocation.
9432  if(nextAlloc2ndIndex < suballoc2ndCount)
9433  {
9434  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9435 
9436  // 1. Process free space before this allocation.
9437  if(lastOffset < suballoc.offset)
9438  {
9439  // There is free space from lastOffset to suballoc.offset.
9440  ++unusedRangeCount;
9441  }
9442 
9443  // 2. Process this allocation.
9444  // There is allocation with suballoc.offset, suballoc.size.
9445  ++alloc2ndCount;
9446  usedBytes += suballoc.size;
9447 
9448  // 3. Prepare for next iteration.
9449  lastOffset = suballoc.offset + suballoc.size;
9450  ++nextAlloc2ndIndex;
9451  }
9452  // We are at the end.
9453  else
9454  {
9455  if(lastOffset < freeSpace2ndTo1stEnd)
9456  {
9457  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9458  ++unusedRangeCount;
9459  }
9460 
9461  // End of loop.
9462  lastOffset = freeSpace2ndTo1stEnd;
9463  }
9464  }
9465  }
9466 
9467  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9468  size_t alloc1stCount = 0;
9469  const VkDeviceSize freeSpace1stTo2ndEnd =
9470  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9471  while(lastOffset < freeSpace1stTo2ndEnd)
9472  {
9473  // Find next non-null allocation or move nextAllocIndex to the end.
9474  while(nextAlloc1stIndex < suballoc1stCount &&
9475  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9476  {
9477  ++nextAlloc1stIndex;
9478  }
9479 
9480  // Found non-null allocation.
9481  if(nextAlloc1stIndex < suballoc1stCount)
9482  {
9483  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9484 
9485  // 1. Process free space before this allocation.
9486  if(lastOffset < suballoc.offset)
9487  {
9488  // There is free space from lastOffset to suballoc.offset.
9489  ++unusedRangeCount;
9490  }
9491 
9492  // 2. Process this allocation.
9493  // There is allocation with suballoc.offset, suballoc.size.
9494  ++alloc1stCount;
9495  usedBytes += suballoc.size;
9496 
9497  // 3. Prepare for next iteration.
9498  lastOffset = suballoc.offset + suballoc.size;
9499  ++nextAlloc1stIndex;
9500  }
9501  // We are at the end.
9502  else
9503  {
9504  if(lastOffset < size)
9505  {
9506  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9507  ++unusedRangeCount;
9508  }
9509 
9510  // End of loop.
9511  lastOffset = freeSpace1stTo2ndEnd;
9512  }
9513  }
9514 
9515  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9516  {
9517  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9518  while(lastOffset < size)
9519  {
9520  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9521  while(nextAlloc2ndIndex != SIZE_MAX &&
9522  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9523  {
9524  --nextAlloc2ndIndex;
9525  }
9526 
9527  // Found non-null allocation.
9528  if(nextAlloc2ndIndex != SIZE_MAX)
9529  {
9530  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9531 
9532  // 1. Process free space before this allocation.
9533  if(lastOffset < suballoc.offset)
9534  {
9535  // There is free space from lastOffset to suballoc.offset.
9536  ++unusedRangeCount;
9537  }
9538 
9539  // 2. Process this allocation.
9540  // There is allocation with suballoc.offset, suballoc.size.
9541  ++alloc2ndCount;
9542  usedBytes += suballoc.size;
9543 
9544  // 3. Prepare for next iteration.
9545  lastOffset = suballoc.offset + suballoc.size;
9546  --nextAlloc2ndIndex;
9547  }
9548  // We are at the end.
9549  else
9550  {
9551  if(lastOffset < size)
9552  {
9553  // There is free space from lastOffset to size.
9554  ++unusedRangeCount;
9555  }
9556 
9557  // End of loop.
9558  lastOffset = size;
9559  }
9560  }
9561  }
9562 
9563  const VkDeviceSize unusedBytes = size - usedBytes;
9564  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9565 
9566  // SECOND PASS
9567  lastOffset = 0;
9568 
9569  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9570  {
9571  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9572  size_t nextAlloc2ndIndex = 0;
9573  while(lastOffset < freeSpace2ndTo1stEnd)
9574  {
9575  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9576  while(nextAlloc2ndIndex < suballoc2ndCount &&
9577  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9578  {
9579  ++nextAlloc2ndIndex;
9580  }
9581 
9582  // Found non-null allocation.
9583  if(nextAlloc2ndIndex < suballoc2ndCount)
9584  {
9585  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9586 
9587  // 1. Process free space before this allocation.
9588  if(lastOffset < suballoc.offset)
9589  {
9590  // There is free space from lastOffset to suballoc.offset.
9591  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9592  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9593  }
9594 
9595  // 2. Process this allocation.
9596  // There is allocation with suballoc.offset, suballoc.size.
9597  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9598 
9599  // 3. Prepare for next iteration.
9600  lastOffset = suballoc.offset + suballoc.size;
9601  ++nextAlloc2ndIndex;
9602  }
9603  // We are at the end.
9604  else
9605  {
9606  if(lastOffset < freeSpace2ndTo1stEnd)
9607  {
9608  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9609  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9610  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9611  }
9612 
9613  // End of loop.
9614  lastOffset = freeSpace2ndTo1stEnd;
9615  }
9616  }
9617  }
9618 
9619  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9620  while(lastOffset < freeSpace1stTo2ndEnd)
9621  {
9622  // Find next non-null allocation or move nextAllocIndex to the end.
9623  while(nextAlloc1stIndex < suballoc1stCount &&
9624  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9625  {
9626  ++nextAlloc1stIndex;
9627  }
9628 
9629  // Found non-null allocation.
9630  if(nextAlloc1stIndex < suballoc1stCount)
9631  {
9632  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9633 
9634  // 1. Process free space before this allocation.
9635  if(lastOffset < suballoc.offset)
9636  {
9637  // There is free space from lastOffset to suballoc.offset.
9638  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9640  }
9641 
9642  // 2. Process this allocation.
9643  // There is allocation with suballoc.offset, suballoc.size.
9644  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9645 
9646  // 3. Prepare for next iteration.
9647  lastOffset = suballoc.offset + suballoc.size;
9648  ++nextAlloc1stIndex;
9649  }
9650  // We are at the end.
9651  else
9652  {
9653  if(lastOffset < freeSpace1stTo2ndEnd)
9654  {
9655  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9656  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9657  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9658  }
9659 
9660  // End of loop.
9661  lastOffset = freeSpace1stTo2ndEnd;
9662  }
9663  }
9664 
9665  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9666  {
9667  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9668  while(lastOffset < size)
9669  {
9670  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9671  while(nextAlloc2ndIndex != SIZE_MAX &&
9672  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9673  {
9674  --nextAlloc2ndIndex;
9675  }
9676 
9677  // Found non-null allocation.
9678  if(nextAlloc2ndIndex != SIZE_MAX)
9679  {
9680  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9681 
9682  // 1. Process free space before this allocation.
9683  if(lastOffset < suballoc.offset)
9684  {
9685  // There is free space from lastOffset to suballoc.offset.
9686  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9687  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9688  }
9689 
9690  // 2. Process this allocation.
9691  // There is allocation with suballoc.offset, suballoc.size.
9692  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9693 
9694  // 3. Prepare for next iteration.
9695  lastOffset = suballoc.offset + suballoc.size;
9696  --nextAlloc2ndIndex;
9697  }
9698  // We are at the end.
9699  else
9700  {
9701  if(lastOffset < size)
9702  {
9703  // There is free space from lastOffset to size.
9704  const VkDeviceSize unusedRangeSize = size - lastOffset;
9705  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9706  }
9707 
9708  // End of loop.
9709  lastOffset = size;
9710  }
9711  }
9712  }
9713 
9714  PrintDetailedMap_End(json);
9715 }
9716 #endif // #if VMA_STATS_STRING_ENABLED
9717 
9718 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9719  uint32_t currentFrameIndex,
9720  uint32_t frameInUseCount,
9721  VkDeviceSize bufferImageGranularity,
9722  VkDeviceSize allocSize,
9723  VkDeviceSize allocAlignment,
9724  bool upperAddress,
9725  VmaSuballocationType allocType,
9726  bool canMakeOtherLost,
9727  uint32_t strategy,
9728  VmaAllocationRequest* pAllocationRequest)
9729 {
9730  VMA_ASSERT(allocSize > 0);
9731  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9732  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9733  VMA_HEAVY_ASSERT(Validate());
9734  return upperAddress ?
9735  CreateAllocationRequest_UpperAddress(
9736  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9737  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9738  CreateAllocationRequest_LowerAddress(
9739  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9740  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9741 }
9742 
9743 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9744  uint32_t currentFrameIndex,
9745  uint32_t frameInUseCount,
9746  VkDeviceSize bufferImageGranularity,
9747  VkDeviceSize allocSize,
9748  VkDeviceSize allocAlignment,
9749  VmaSuballocationType allocType,
9750  bool canMakeOtherLost,
9751  uint32_t strategy,
9752  VmaAllocationRequest* pAllocationRequest)
9753 {
9754  const VkDeviceSize size = GetSize();
9755  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9756  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9757 
9758  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9759  {
9760  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9761  return false;
9762  }
9763 
9764  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9765  if(allocSize > size)
9766  {
9767  return false;
9768  }
9769  VkDeviceSize resultBaseOffset = size - allocSize;
9770  if(!suballocations2nd.empty())
9771  {
9772  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9773  resultBaseOffset = lastSuballoc.offset - allocSize;
9774  if(allocSize > lastSuballoc.offset)
9775  {
9776  return false;
9777  }
9778  }
9779 
9780  // Start from offset equal to end of free space.
9781  VkDeviceSize resultOffset = resultBaseOffset;
9782 
9783  // Apply VMA_DEBUG_MARGIN at the end.
9784  if(VMA_DEBUG_MARGIN > 0)
9785  {
9786  if(resultOffset < VMA_DEBUG_MARGIN)
9787  {
9788  return false;
9789  }
9790  resultOffset -= VMA_DEBUG_MARGIN;
9791  }
9792 
9793  // Apply alignment.
9794  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9795 
9796  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9797  // Make bigger alignment if necessary.
9798  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9799  {
9800  bool bufferImageGranularityConflict = false;
9801  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9802  {
9803  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9804  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9805  {
9806  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9807  {
9808  bufferImageGranularityConflict = true;
9809  break;
9810  }
9811  }
9812  else
9813  // Already on previous page.
9814  break;
9815  }
9816  if(bufferImageGranularityConflict)
9817  {
9818  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9819  }
9820  }
9821 
9822  // There is enough free space.
9823  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9824  suballocations1st.back().offset + suballocations1st.back().size :
9825  0;
9826  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9827  {
9828  // Check previous suballocations for BufferImageGranularity conflicts.
9829  // If conflict exists, allocation cannot be made here.
9830  if(bufferImageGranularity > 1)
9831  {
9832  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9833  {
9834  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9835  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9836  {
9837  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9838  {
9839  return false;
9840  }
9841  }
9842  else
9843  {
9844  // Already on next page.
9845  break;
9846  }
9847  }
9848  }
9849 
9850  // All tests passed: Success.
9851  pAllocationRequest->offset = resultOffset;
9852  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9853  pAllocationRequest->sumItemSize = 0;
9854  // pAllocationRequest->item unused.
9855  pAllocationRequest->itemsToMakeLostCount = 0;
9856  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9857  return true;
9858  }
9859 
9860  return false;
9861 }
9862 
9863 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
9864  uint32_t currentFrameIndex,
9865  uint32_t frameInUseCount,
9866  VkDeviceSize bufferImageGranularity,
9867  VkDeviceSize allocSize,
9868  VkDeviceSize allocAlignment,
9869  VmaSuballocationType allocType,
9870  bool canMakeOtherLost,
9871  uint32_t strategy,
9872  VmaAllocationRequest* pAllocationRequest)
9873 {
9874  const VkDeviceSize size = GetSize();
9875  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9876  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9877 
9878  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9879  {
9880  // Try to allocate at the end of 1st vector.
9881 
9882  VkDeviceSize resultBaseOffset = 0;
9883  if(!suballocations1st.empty())
9884  {
9885  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9886  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9887  }
9888 
9889  // Start from offset equal to beginning of free space.
9890  VkDeviceSize resultOffset = resultBaseOffset;
9891 
9892  // Apply VMA_DEBUG_MARGIN at the beginning.
9893  if(VMA_DEBUG_MARGIN > 0)
9894  {
9895  resultOffset += VMA_DEBUG_MARGIN;
9896  }
9897 
9898  // Apply alignment.
9899  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9900 
9901  // Check previous suballocations for BufferImageGranularity conflicts.
9902  // Make bigger alignment if necessary.
9903  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9904  {
9905  bool bufferImageGranularityConflict = false;
9906  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9907  {
9908  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9909  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9910  {
9911  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9912  {
9913  bufferImageGranularityConflict = true;
9914  break;
9915  }
9916  }
9917  else
9918  // Already on previous page.
9919  break;
9920  }
9921  if(bufferImageGranularityConflict)
9922  {
9923  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9924  }
9925  }
9926 
9927  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9928  suballocations2nd.back().offset : size;
9929 
9930  // There is enough free space at the end after alignment.
9931  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9932  {
9933  // Check next suballocations for BufferImageGranularity conflicts.
9934  // If conflict exists, allocation cannot be made here.
9935  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9936  {
9937  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9938  {
9939  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9940  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9941  {
9942  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9943  {
9944  return false;
9945  }
9946  }
9947  else
9948  {
9949  // Already on previous page.
9950  break;
9951  }
9952  }
9953  }
9954 
9955  // All tests passed: Success.
9956  pAllocationRequest->offset = resultOffset;
9957  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9958  pAllocationRequest->sumItemSize = 0;
9959  // pAllocationRequest->item, customData unused.
9960  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9961  pAllocationRequest->itemsToMakeLostCount = 0;
9962  return true;
9963  }
9964  }
9965 
9966  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9967  // beginning of 1st vector as the end of free space.
9968  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9969  {
9970  VMA_ASSERT(!suballocations1st.empty());
9971 
9972  VkDeviceSize resultBaseOffset = 0;
9973  if(!suballocations2nd.empty())
9974  {
9975  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9976  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9977  }
9978 
9979  // Start from offset equal to beginning of free space.
9980  VkDeviceSize resultOffset = resultBaseOffset;
9981 
9982  // Apply VMA_DEBUG_MARGIN at the beginning.
9983  if(VMA_DEBUG_MARGIN > 0)
9984  {
9985  resultOffset += VMA_DEBUG_MARGIN;
9986  }
9987 
9988  // Apply alignment.
9989  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9990 
9991  // Check previous suballocations for BufferImageGranularity conflicts.
9992  // Make bigger alignment if necessary.
9993  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9994  {
9995  bool bufferImageGranularityConflict = false;
9996  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9997  {
9998  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9999  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10000  {
10001  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10002  {
10003  bufferImageGranularityConflict = true;
10004  break;
10005  }
10006  }
10007  else
10008  // Already on previous page.
10009  break;
10010  }
10011  if(bufferImageGranularityConflict)
10012  {
10013  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10014  }
10015  }
10016 
10017  pAllocationRequest->itemsToMakeLostCount = 0;
10018  pAllocationRequest->sumItemSize = 0;
10019  size_t index1st = m_1stNullItemsBeginCount;
10020 
10021  if(canMakeOtherLost)
10022  {
10023  while(index1st < suballocations1st.size() &&
10024  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10025  {
10026  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10027  const VmaSuballocation& suballoc = suballocations1st[index1st];
10028  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10029  {
10030  // No problem.
10031  }
10032  else
10033  {
10034  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10035  if(suballoc.hAllocation->CanBecomeLost() &&
10036  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10037  {
10038  ++pAllocationRequest->itemsToMakeLostCount;
10039  pAllocationRequest->sumItemSize += suballoc.size;
10040  }
10041  else
10042  {
10043  return false;
10044  }
10045  }
10046  ++index1st;
10047  }
10048 
10049  // Check next suballocations for BufferImageGranularity conflicts.
10050  // If conflict exists, we must mark more allocations lost or fail.
10051  if(bufferImageGranularity > 1)
10052  {
10053  while(index1st < suballocations1st.size())
10054  {
10055  const VmaSuballocation& suballoc = suballocations1st[index1st];
10056  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10057  {
10058  if(suballoc.hAllocation != VK_NULL_HANDLE)
10059  {
10060  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10061  if(suballoc.hAllocation->CanBecomeLost() &&
10062  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10063  {
10064  ++pAllocationRequest->itemsToMakeLostCount;
10065  pAllocationRequest->sumItemSize += suballoc.size;
10066  }
10067  else
10068  {
10069  return false;
10070  }
10071  }
10072  }
10073  else
10074  {
10075  // Already on next page.
10076  break;
10077  }
10078  ++index1st;
10079  }
10080  }
10081 
10082  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10083  if(index1st == suballocations1st.size() &&
10084  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10085  {
10086  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10087  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10088  }
10089  }
10090 
10091  // There is enough free space at the end after alignment.
10092  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10093  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10094  {
10095  // Check next suballocations for BufferImageGranularity conflicts.
10096  // If conflict exists, allocation cannot be made here.
10097  if(bufferImageGranularity > 1)
10098  {
10099  for(size_t nextSuballocIndex = index1st;
10100  nextSuballocIndex < suballocations1st.size();
10101  nextSuballocIndex++)
10102  {
10103  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10104  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10105  {
10106  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10107  {
10108  return false;
10109  }
10110  }
10111  else
10112  {
10113  // Already on next page.
10114  break;
10115  }
10116  }
10117  }
10118 
10119  // All tests passed: Success.
10120  pAllocationRequest->offset = resultOffset;
10121  pAllocationRequest->sumFreeSize =
10122  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10123  - resultBaseOffset
10124  - pAllocationRequest->sumItemSize;
10125  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10126  // pAllocationRequest->item, customData unused.
10127  return true;
10128  }
10129  }
10130 
10131  return false;
10132 }
10133 
10134 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10135  uint32_t currentFrameIndex,
10136  uint32_t frameInUseCount,
10137  VmaAllocationRequest* pAllocationRequest)
10138 {
10139  if(pAllocationRequest->itemsToMakeLostCount == 0)
10140  {
10141  return true;
10142  }
10143 
10144  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10145 
10146  // We always start from 1st.
10147  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10148  size_t index = m_1stNullItemsBeginCount;
10149  size_t madeLostCount = 0;
10150  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10151  {
10152  if(index == suballocations->size())
10153  {
10154  index = 0;
10155  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10156  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10157  {
10158  suballocations = &AccessSuballocations2nd();
10159  }
10160  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10161  // suballocations continues pointing at AccessSuballocations1st().
10162  VMA_ASSERT(!suballocations->empty());
10163  }
10164  VmaSuballocation& suballoc = (*suballocations)[index];
10165  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10166  {
10167  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10168  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10169  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10170  {
10171  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10172  suballoc.hAllocation = VK_NULL_HANDLE;
10173  m_SumFreeSize += suballoc.size;
10174  if(suballocations == &AccessSuballocations1st())
10175  {
10176  ++m_1stNullItemsMiddleCount;
10177  }
10178  else
10179  {
10180  ++m_2ndNullItemsCount;
10181  }
10182  ++madeLostCount;
10183  }
10184  else
10185  {
10186  return false;
10187  }
10188  }
10189  ++index;
10190  }
10191 
10192  CleanupAfterFree();
10193  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10194 
10195  return true;
10196 }
10197 
10198 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10199 {
10200  uint32_t lostAllocationCount = 0;
10201 
10202  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10203  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10204  {
10205  VmaSuballocation& suballoc = suballocations1st[i];
10206  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10207  suballoc.hAllocation->CanBecomeLost() &&
10208  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10209  {
10210  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10211  suballoc.hAllocation = VK_NULL_HANDLE;
10212  ++m_1stNullItemsMiddleCount;
10213  m_SumFreeSize += suballoc.size;
10214  ++lostAllocationCount;
10215  }
10216  }
10217 
10218  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10219  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10220  {
10221  VmaSuballocation& suballoc = suballocations2nd[i];
10222  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10223  suballoc.hAllocation->CanBecomeLost() &&
10224  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10225  {
10226  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10227  suballoc.hAllocation = VK_NULL_HANDLE;
10228  ++m_2ndNullItemsCount;
10229  m_SumFreeSize += suballoc.size;
10230  ++lostAllocationCount;
10231  }
10232  }
10233 
10234  if(lostAllocationCount)
10235  {
10236  CleanupAfterFree();
10237  }
10238 
10239  return lostAllocationCount;
10240 }
10241 
10242 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10243 {
10244  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10245  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10246  {
10247  const VmaSuballocation& suballoc = suballocations1st[i];
10248  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10249  {
10250  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10251  {
10252  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10253  return VK_ERROR_VALIDATION_FAILED_EXT;
10254  }
10255  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10256  {
10257  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10258  return VK_ERROR_VALIDATION_FAILED_EXT;
10259  }
10260  }
10261  }
10262 
10263  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10264  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10265  {
10266  const VmaSuballocation& suballoc = suballocations2nd[i];
10267  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10268  {
10269  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10270  {
10271  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10272  return VK_ERROR_VALIDATION_FAILED_EXT;
10273  }
10274  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10275  {
10276  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10277  return VK_ERROR_VALIDATION_FAILED_EXT;
10278  }
10279  }
10280  }
10281 
10282  return VK_SUCCESS;
10283 }
10284 
10285 void VmaBlockMetadata_Linear::Alloc(
10286  const VmaAllocationRequest& request,
10287  VmaSuballocationType type,
10288  VkDeviceSize allocSize,
10289  VmaAllocation hAllocation)
10290 {
10291  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10292 
10293  switch(request.type)
10294  {
10295  case VmaAllocationRequestType::UpperAddress:
10296  {
10297  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10298  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10300  suballocations2nd.push_back(newSuballoc);
10301  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10302  }
10303  break;
10304  case VmaAllocationRequestType::EndOf1st:
10305  {
10306  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10307 
10308  VMA_ASSERT(suballocations1st.empty() ||
10309  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10310  // Check if it fits before the end of the block.
10311  VMA_ASSERT(request.offset + allocSize <= GetSize());
10312 
10313  suballocations1st.push_back(newSuballoc);
10314  }
10315  break;
10316  case VmaAllocationRequestType::EndOf2nd:
10317  {
10318  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10319  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10320  VMA_ASSERT(!suballocations1st.empty() &&
10321  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10322  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10323 
10324  switch(m_2ndVectorMode)
10325  {
10326  case SECOND_VECTOR_EMPTY:
10327  // First allocation from second part ring buffer.
10328  VMA_ASSERT(suballocations2nd.empty());
10329  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10330  break;
10331  case SECOND_VECTOR_RING_BUFFER:
10332  // 2-part ring buffer is already started.
10333  VMA_ASSERT(!suballocations2nd.empty());
10334  break;
10335  case SECOND_VECTOR_DOUBLE_STACK:
10336  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10337  break;
10338  default:
10339  VMA_ASSERT(0);
10340  }
10341 
10342  suballocations2nd.push_back(newSuballoc);
10343  }
10344  break;
10345  default:
10346  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10347  }
10348 
10349  m_SumFreeSize -= newSuballoc.size;
10350 }
10351 
10352 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10353 {
10354  FreeAtOffset(allocation->GetOffset());
10355 }
10356 
10357 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10358 {
10359  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10360  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10361 
10362  if(!suballocations1st.empty())
10363  {
10364  // First allocation: Mark it as next empty at the beginning.
10365  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10366  if(firstSuballoc.offset == offset)
10367  {
10368  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10369  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10370  m_SumFreeSize += firstSuballoc.size;
10371  ++m_1stNullItemsBeginCount;
10372  CleanupAfterFree();
10373  return;
10374  }
10375  }
10376 
10377  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10378  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10379  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10380  {
10381  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10382  if(lastSuballoc.offset == offset)
10383  {
10384  m_SumFreeSize += lastSuballoc.size;
10385  suballocations2nd.pop_back();
10386  CleanupAfterFree();
10387  return;
10388  }
10389  }
10390  // Last allocation in 1st vector.
10391  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10392  {
10393  VmaSuballocation& lastSuballoc = suballocations1st.back();
10394  if(lastSuballoc.offset == offset)
10395  {
10396  m_SumFreeSize += lastSuballoc.size;
10397  suballocations1st.pop_back();
10398  CleanupAfterFree();
10399  return;
10400  }
10401  }
10402 
10403  // Item from the middle of 1st vector.
10404  {
10405  VmaSuballocation refSuballoc;
10406  refSuballoc.offset = offset;
10407  // Rest of members stays uninitialized intentionally for better performance.
10408  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10409  suballocations1st.begin() + m_1stNullItemsBeginCount,
10410  suballocations1st.end(),
10411  refSuballoc);
10412  if(it != suballocations1st.end())
10413  {
10414  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10415  it->hAllocation = VK_NULL_HANDLE;
10416  ++m_1stNullItemsMiddleCount;
10417  m_SumFreeSize += it->size;
10418  CleanupAfterFree();
10419  return;
10420  }
10421  }
10422 
10423  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10424  {
10425  // Item from the middle of 2nd vector.
10426  VmaSuballocation refSuballoc;
10427  refSuballoc.offset = offset;
10428  // Rest of members stays uninitialized intentionally for better performance.
10429  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10430  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10431  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10432  if(it != suballocations2nd.end())
10433  {
10434  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10435  it->hAllocation = VK_NULL_HANDLE;
10436  ++m_2ndNullItemsCount;
10437  m_SumFreeSize += it->size;
10438  CleanupAfterFree();
10439  return;
10440  }
10441  }
10442 
10443  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10444 }
10445 
10446 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10447 {
10448  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10449  const size_t suballocCount = AccessSuballocations1st().size();
10450  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10451 }
10452 
10453 void VmaBlockMetadata_Linear::CleanupAfterFree()
10454 {
10455  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10456  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10457 
10458  if(IsEmpty())
10459  {
10460  suballocations1st.clear();
10461  suballocations2nd.clear();
10462  m_1stNullItemsBeginCount = 0;
10463  m_1stNullItemsMiddleCount = 0;
10464  m_2ndNullItemsCount = 0;
10465  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10466  }
10467  else
10468  {
10469  const size_t suballoc1stCount = suballocations1st.size();
10470  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10471  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10472 
10473  // Find more null items at the beginning of 1st vector.
10474  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10475  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10476  {
10477  ++m_1stNullItemsBeginCount;
10478  --m_1stNullItemsMiddleCount;
10479  }
10480 
10481  // Find more null items at the end of 1st vector.
10482  while(m_1stNullItemsMiddleCount > 0 &&
10483  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10484  {
10485  --m_1stNullItemsMiddleCount;
10486  suballocations1st.pop_back();
10487  }
10488 
10489  // Find more null items at the end of 2nd vector.
10490  while(m_2ndNullItemsCount > 0 &&
10491  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10492  {
10493  --m_2ndNullItemsCount;
10494  suballocations2nd.pop_back();
10495  }
10496 
10497  // Find more null items at the beginning of 2nd vector.
10498  while(m_2ndNullItemsCount > 0 &&
10499  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10500  {
10501  --m_2ndNullItemsCount;
10502  suballocations2nd.remove(0);
10503  }
10504 
10505  if(ShouldCompact1st())
10506  {
10507  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10508  size_t srcIndex = m_1stNullItemsBeginCount;
10509  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10510  {
10511  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10512  {
10513  ++srcIndex;
10514  }
10515  if(dstIndex != srcIndex)
10516  {
10517  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10518  }
10519  ++srcIndex;
10520  }
10521  suballocations1st.resize(nonNullItemCount);
10522  m_1stNullItemsBeginCount = 0;
10523  m_1stNullItemsMiddleCount = 0;
10524  }
10525 
10526  // 2nd vector became empty.
10527  if(suballocations2nd.empty())
10528  {
10529  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10530  }
10531 
10532  // 1st vector became empty.
10533  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10534  {
10535  suballocations1st.clear();
10536  m_1stNullItemsBeginCount = 0;
10537 
10538  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10539  {
10540  // Swap 1st with 2nd. Now 2nd is empty.
10541  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10542  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10543  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10544  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10545  {
10546  ++m_1stNullItemsBeginCount;
10547  --m_1stNullItemsMiddleCount;
10548  }
10549  m_2ndNullItemsCount = 0;
10550  m_1stVectorIndex ^= 1;
10551  }
10552  }
10553  }
10554 
10555  VMA_HEAVY_ASSERT(Validate());
10556 }
10557 
10558 
10560 // class VmaBlockMetadata_Buddy
10561 
10562 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10563  VmaBlockMetadata(hAllocator),
10564  m_Root(VMA_NULL),
10565  m_AllocationCount(0),
10566  m_FreeCount(1),
10567  m_SumFreeSize(0)
10568 {
10569  memset(m_FreeList, 0, sizeof(m_FreeList));
10570 }
10571 
10572 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10573 {
10574  DeleteNode(m_Root);
10575 }
10576 
10577 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10578 {
10579  VmaBlockMetadata::Init(size);
10580 
10581  m_UsableSize = VmaPrevPow2(size);
10582  m_SumFreeSize = m_UsableSize;
10583 
10584  // Calculate m_LevelCount.
10585  m_LevelCount = 1;
10586  while(m_LevelCount < MAX_LEVELS &&
10587  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10588  {
10589  ++m_LevelCount;
10590  }
10591 
10592  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10593  rootNode->offset = 0;
10594  rootNode->type = Node::TYPE_FREE;
10595  rootNode->parent = VMA_NULL;
10596  rootNode->buddy = VMA_NULL;
10597 
10598  m_Root = rootNode;
10599  AddToFreeListFront(0, rootNode);
10600 }
10601 
10602 bool VmaBlockMetadata_Buddy::Validate() const
10603 {
10604  // Validate tree.
10605  ValidationContext ctx;
10606  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10607  {
10608  VMA_VALIDATE(false && "ValidateNode failed.");
10609  }
10610  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10611  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10612 
10613  // Validate free node lists.
10614  for(uint32_t level = 0; level < m_LevelCount; ++level)
10615  {
10616  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10617  m_FreeList[level].front->free.prev == VMA_NULL);
10618 
10619  for(Node* node = m_FreeList[level].front;
10620  node != VMA_NULL;
10621  node = node->free.next)
10622  {
10623  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10624 
10625  if(node->free.next == VMA_NULL)
10626  {
10627  VMA_VALIDATE(m_FreeList[level].back == node);
10628  }
10629  else
10630  {
10631  VMA_VALIDATE(node->free.next->free.prev == node);
10632  }
10633  }
10634  }
10635 
10636  // Validate that free lists ar higher levels are empty.
10637  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10638  {
10639  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10640  }
10641 
10642  return true;
10643 }
10644 
10645 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10646 {
10647  for(uint32_t level = 0; level < m_LevelCount; ++level)
10648  {
10649  if(m_FreeList[level].front != VMA_NULL)
10650  {
10651  return LevelToNodeSize(level);
10652  }
10653  }
10654  return 0;
10655 }
10656 
10657 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10658 {
10659  const VkDeviceSize unusableSize = GetUnusableSize();
10660 
10661  outInfo.blockCount = 1;
10662 
10663  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10664  outInfo.usedBytes = outInfo.unusedBytes = 0;
10665 
10666  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10667  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10668  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10669 
10670  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10671 
10672  if(unusableSize > 0)
10673  {
10674  ++outInfo.unusedRangeCount;
10675  outInfo.unusedBytes += unusableSize;
10676  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10677  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10678  }
10679 }
10680 
10681 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10682 {
10683  const VkDeviceSize unusableSize = GetUnusableSize();
10684 
10685  inoutStats.size += GetSize();
10686  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10687  inoutStats.allocationCount += m_AllocationCount;
10688  inoutStats.unusedRangeCount += m_FreeCount;
10689  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10690 
10691  if(unusableSize > 0)
10692  {
10693  ++inoutStats.unusedRangeCount;
10694  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10695  }
10696 }
10697 
10698 #if VMA_STATS_STRING_ENABLED
10699 
10700 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10701 {
10702  // TODO optimize
10703  VmaStatInfo stat;
10704  CalcAllocationStatInfo(stat);
10705 
10706  PrintDetailedMap_Begin(
10707  json,
10708  stat.unusedBytes,
10709  stat.allocationCount,
10710  stat.unusedRangeCount);
10711 
10712  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10713 
10714  const VkDeviceSize unusableSize = GetUnusableSize();
10715  if(unusableSize > 0)
10716  {
10717  PrintDetailedMap_UnusedRange(json,
10718  m_UsableSize, // offset
10719  unusableSize); // size
10720  }
10721 
10722  PrintDetailedMap_End(json);
10723 }
10724 
10725 #endif // #if VMA_STATS_STRING_ENABLED
10726 
10727 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10728  uint32_t currentFrameIndex,
10729  uint32_t frameInUseCount,
10730  VkDeviceSize bufferImageGranularity,
10731  VkDeviceSize allocSize,
10732  VkDeviceSize allocAlignment,
10733  bool upperAddress,
10734  VmaSuballocationType allocType,
10735  bool canMakeOtherLost,
10736  uint32_t strategy,
10737  VmaAllocationRequest* pAllocationRequest)
10738 {
10739  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10740 
10741  // Simple way to respect bufferImageGranularity. May be optimized some day.
10742  // Whenever it might be an OPTIMAL image...
10743  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10744  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10745  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10746  {
10747  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10748  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10749  }
10750 
10751  if(allocSize > m_UsableSize)
10752  {
10753  return false;
10754  }
10755 
10756  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10757  for(uint32_t level = targetLevel + 1; level--; )
10758  {
10759  for(Node* freeNode = m_FreeList[level].front;
10760  freeNode != VMA_NULL;
10761  freeNode = freeNode->free.next)
10762  {
10763  if(freeNode->offset % allocAlignment == 0)
10764  {
10765  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10766  pAllocationRequest->offset = freeNode->offset;
10767  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10768  pAllocationRequest->sumItemSize = 0;
10769  pAllocationRequest->itemsToMakeLostCount = 0;
10770  pAllocationRequest->customData = (void*)(uintptr_t)level;
10771  return true;
10772  }
10773  }
10774  }
10775 
10776  return false;
10777 }
10778 
10779 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10780  uint32_t currentFrameIndex,
10781  uint32_t frameInUseCount,
10782  VmaAllocationRequest* pAllocationRequest)
10783 {
10784  /*
10785  Lost allocations are not supported in buddy allocator at the moment.
10786  Support might be added in the future.
10787  */
10788  return pAllocationRequest->itemsToMakeLostCount == 0;
10789 }
10790 
10791 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10792 {
10793  /*
10794  Lost allocations are not supported in buddy allocator at the moment.
10795  Support might be added in the future.
10796  */
10797  return 0;
10798 }
10799 
10800 void VmaBlockMetadata_Buddy::Alloc(
10801  const VmaAllocationRequest& request,
10802  VmaSuballocationType type,
10803  VkDeviceSize allocSize,
10804  VmaAllocation hAllocation)
10805 {
10806  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
10807 
10808  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10809  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10810 
10811  Node* currNode = m_FreeList[currLevel].front;
10812  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10813  while(currNode->offset != request.offset)
10814  {
10815  currNode = currNode->free.next;
10816  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10817  }
10818 
10819  // Go down, splitting free nodes.
10820  while(currLevel < targetLevel)
10821  {
10822  // currNode is already first free node at currLevel.
10823  // Remove it from list of free nodes at this currLevel.
10824  RemoveFromFreeList(currLevel, currNode);
10825 
10826  const uint32_t childrenLevel = currLevel + 1;
10827 
10828  // Create two free sub-nodes.
10829  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10830  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10831 
10832  leftChild->offset = currNode->offset;
10833  leftChild->type = Node::TYPE_FREE;
10834  leftChild->parent = currNode;
10835  leftChild->buddy = rightChild;
10836 
10837  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10838  rightChild->type = Node::TYPE_FREE;
10839  rightChild->parent = currNode;
10840  rightChild->buddy = leftChild;
10841 
10842  // Convert current currNode to split type.
10843  currNode->type = Node::TYPE_SPLIT;
10844  currNode->split.leftChild = leftChild;
10845 
10846  // Add child nodes to free list. Order is important!
10847  AddToFreeListFront(childrenLevel, rightChild);
10848  AddToFreeListFront(childrenLevel, leftChild);
10849 
10850  ++m_FreeCount;
10851  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10852  ++currLevel;
10853  currNode = m_FreeList[currLevel].front;
10854 
10855  /*
10856  We can be sure that currNode, as left child of node previously split,
10857  also fullfills the alignment requirement.
10858  */
10859  }
10860 
10861  // Remove from free list.
10862  VMA_ASSERT(currLevel == targetLevel &&
10863  currNode != VMA_NULL &&
10864  currNode->type == Node::TYPE_FREE);
10865  RemoveFromFreeList(currLevel, currNode);
10866 
10867  // Convert to allocation node.
10868  currNode->type = Node::TYPE_ALLOCATION;
10869  currNode->allocation.alloc = hAllocation;
10870 
10871  ++m_AllocationCount;
10872  --m_FreeCount;
10873  m_SumFreeSize -= allocSize;
10874 }
10875 
10876 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10877 {
10878  if(node->type == Node::TYPE_SPLIT)
10879  {
10880  DeleteNode(node->split.leftChild->buddy);
10881  DeleteNode(node->split.leftChild);
10882  }
10883 
10884  vma_delete(GetAllocationCallbacks(), node);
10885 }
10886 
10887 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10888 {
10889  VMA_VALIDATE(level < m_LevelCount);
10890  VMA_VALIDATE(curr->parent == parent);
10891  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10892  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10893  switch(curr->type)
10894  {
10895  case Node::TYPE_FREE:
10896  // curr->free.prev, next are validated separately.
10897  ctx.calculatedSumFreeSize += levelNodeSize;
10898  ++ctx.calculatedFreeCount;
10899  break;
10900  case Node::TYPE_ALLOCATION:
10901  ++ctx.calculatedAllocationCount;
10902  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10903  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10904  break;
10905  case Node::TYPE_SPLIT:
10906  {
10907  const uint32_t childrenLevel = level + 1;
10908  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10909  const Node* const leftChild = curr->split.leftChild;
10910  VMA_VALIDATE(leftChild != VMA_NULL);
10911  VMA_VALIDATE(leftChild->offset == curr->offset);
10912  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10913  {
10914  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10915  }
10916  const Node* const rightChild = leftChild->buddy;
10917  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10918  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10919  {
10920  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10921  }
10922  }
10923  break;
10924  default:
10925  return false;
10926  }
10927 
10928  return true;
10929 }
10930 
10931 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10932 {
10933  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10934  uint32_t level = 0;
10935  VkDeviceSize currLevelNodeSize = m_UsableSize;
10936  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10937  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10938  {
10939  ++level;
10940  currLevelNodeSize = nextLevelNodeSize;
10941  nextLevelNodeSize = currLevelNodeSize >> 1;
10942  }
10943  return level;
10944 }
10945 
10946 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10947 {
10948  // Find node and level.
10949  Node* node = m_Root;
10950  VkDeviceSize nodeOffset = 0;
10951  uint32_t level = 0;
10952  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10953  while(node->type == Node::TYPE_SPLIT)
10954  {
10955  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10956  if(offset < nodeOffset + nextLevelSize)
10957  {
10958  node = node->split.leftChild;
10959  }
10960  else
10961  {
10962  node = node->split.leftChild->buddy;
10963  nodeOffset += nextLevelSize;
10964  }
10965  ++level;
10966  levelNodeSize = nextLevelSize;
10967  }
10968 
10969  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10970  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10971 
10972  ++m_FreeCount;
10973  --m_AllocationCount;
10974  m_SumFreeSize += alloc->GetSize();
10975 
10976  node->type = Node::TYPE_FREE;
10977 
10978  // Join free nodes if possible.
10979  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10980  {
10981  RemoveFromFreeList(level, node->buddy);
10982  Node* const parent = node->parent;
10983 
10984  vma_delete(GetAllocationCallbacks(), node->buddy);
10985  vma_delete(GetAllocationCallbacks(), node);
10986  parent->type = Node::TYPE_FREE;
10987 
10988  node = parent;
10989  --level;
10990  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10991  --m_FreeCount;
10992  }
10993 
10994  AddToFreeListFront(level, node);
10995 }
10996 
10997 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10998 {
10999  switch(node->type)
11000  {
11001  case Node::TYPE_FREE:
11002  ++outInfo.unusedRangeCount;
11003  outInfo.unusedBytes += levelNodeSize;
11004  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11005  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11006  break;
11007  case Node::TYPE_ALLOCATION:
11008  {
11009  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11010  ++outInfo.allocationCount;
11011  outInfo.usedBytes += allocSize;
11012  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11013  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11014 
11015  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11016  if(unusedRangeSize > 0)
11017  {
11018  ++outInfo.unusedRangeCount;
11019  outInfo.unusedBytes += unusedRangeSize;
11020  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11021  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11022  }
11023  }
11024  break;
11025  case Node::TYPE_SPLIT:
11026  {
11027  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11028  const Node* const leftChild = node->split.leftChild;
11029  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11030  const Node* const rightChild = leftChild->buddy;
11031  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11032  }
11033  break;
11034  default:
11035  VMA_ASSERT(0);
11036  }
11037 }
11038 
11039 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11040 {
11041  VMA_ASSERT(node->type == Node::TYPE_FREE);
11042 
11043  // List is empty.
11044  Node* const frontNode = m_FreeList[level].front;
11045  if(frontNode == VMA_NULL)
11046  {
11047  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11048  node->free.prev = node->free.next = VMA_NULL;
11049  m_FreeList[level].front = m_FreeList[level].back = node;
11050  }
11051  else
11052  {
11053  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11054  node->free.prev = VMA_NULL;
11055  node->free.next = frontNode;
11056  frontNode->free.prev = node;
11057  m_FreeList[level].front = node;
11058  }
11059 }
11060 
11061 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11062 {
11063  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11064 
11065  // It is at the front.
11066  if(node->free.prev == VMA_NULL)
11067  {
11068  VMA_ASSERT(m_FreeList[level].front == node);
11069  m_FreeList[level].front = node->free.next;
11070  }
11071  else
11072  {
11073  Node* const prevFreeNode = node->free.prev;
11074  VMA_ASSERT(prevFreeNode->free.next == node);
11075  prevFreeNode->free.next = node->free.next;
11076  }
11077 
11078  // It is at the back.
11079  if(node->free.next == VMA_NULL)
11080  {
11081  VMA_ASSERT(m_FreeList[level].back == node);
11082  m_FreeList[level].back = node->free.prev;
11083  }
11084  else
11085  {
11086  Node* const nextFreeNode = node->free.next;
11087  VMA_ASSERT(nextFreeNode->free.prev == node);
11088  nextFreeNode->free.prev = node->free.prev;
11089  }
11090 }
11091 
11092 #if VMA_STATS_STRING_ENABLED
11093 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11094 {
11095  switch(node->type)
11096  {
11097  case Node::TYPE_FREE:
11098  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11099  break;
11100  case Node::TYPE_ALLOCATION:
11101  {
11102  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11103  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11104  if(allocSize < levelNodeSize)
11105  {
11106  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11107  }
11108  }
11109  break;
11110  case Node::TYPE_SPLIT:
11111  {
11112  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11113  const Node* const leftChild = node->split.leftChild;
11114  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11115  const Node* const rightChild = leftChild->buddy;
11116  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11117  }
11118  break;
11119  default:
11120  VMA_ASSERT(0);
11121  }
11122 }
11123 #endif // #if VMA_STATS_STRING_ENABLED
11124 
11125 
11127 // class VmaDeviceMemoryBlock
11128 
11129 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11130  m_pMetadata(VMA_NULL),
11131  m_MemoryTypeIndex(UINT32_MAX),
11132  m_Id(0),
11133  m_hMemory(VK_NULL_HANDLE),
11134  m_MapCount(0),
11135  m_pMappedData(VMA_NULL)
11136 {
11137 }
11138 
11139 void VmaDeviceMemoryBlock::Init(
11140  VmaAllocator hAllocator,
11141  VmaPool hParentPool,
11142  uint32_t newMemoryTypeIndex,
11143  VkDeviceMemory newMemory,
11144  VkDeviceSize newSize,
11145  uint32_t id,
11146  uint32_t algorithm)
11147 {
11148  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11149 
11150  m_hParentPool = hParentPool;
11151  m_MemoryTypeIndex = newMemoryTypeIndex;
11152  m_Id = id;
11153  m_hMemory = newMemory;
11154 
11155  switch(algorithm)
11156  {
11158  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11159  break;
11161  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11162  break;
11163  default:
11164  VMA_ASSERT(0);
11165  // Fall-through.
11166  case 0:
11167  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11168  }
11169  m_pMetadata->Init(newSize);
11170 }
11171 
11172 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11173 {
11174  // This is the most important assert in the entire library.
11175  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11176  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11177 
11178  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11179  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11180  m_hMemory = VK_NULL_HANDLE;
11181 
11182  vma_delete(allocator, m_pMetadata);
11183  m_pMetadata = VMA_NULL;
11184 }
11185 
11186 bool VmaDeviceMemoryBlock::Validate() const
11187 {
11188  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11189  (m_pMetadata->GetSize() != 0));
11190 
11191  return m_pMetadata->Validate();
11192 }
11193 
11194 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11195 {
11196  void* pData = nullptr;
11197  VkResult res = Map(hAllocator, 1, &pData);
11198  if(res != VK_SUCCESS)
11199  {
11200  return res;
11201  }
11202 
11203  res = m_pMetadata->CheckCorruption(pData);
11204 
11205  Unmap(hAllocator, 1);
11206 
11207  return res;
11208 }
11209 
11210 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11211 {
11212  if(count == 0)
11213  {
11214  return VK_SUCCESS;
11215  }
11216 
11217  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11218  if(m_MapCount != 0)
11219  {
11220  m_MapCount += count;
11221  VMA_ASSERT(m_pMappedData != VMA_NULL);
11222  if(ppData != VMA_NULL)
11223  {
11224  *ppData = m_pMappedData;
11225  }
11226  return VK_SUCCESS;
11227  }
11228  else
11229  {
11230  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11231  hAllocator->m_hDevice,
11232  m_hMemory,
11233  0, // offset
11234  VK_WHOLE_SIZE,
11235  0, // flags
11236  &m_pMappedData);
11237  if(result == VK_SUCCESS)
11238  {
11239  if(ppData != VMA_NULL)
11240  {
11241  *ppData = m_pMappedData;
11242  }
11243  m_MapCount = count;
11244  }
11245  return result;
11246  }
11247 }
11248 
11249 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11250 {
11251  if(count == 0)
11252  {
11253  return;
11254  }
11255 
11256  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11257  if(m_MapCount >= count)
11258  {
11259  m_MapCount -= count;
11260  if(m_MapCount == 0)
11261  {
11262  m_pMappedData = VMA_NULL;
11263  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11264  }
11265  }
11266  else
11267  {
11268  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11269  }
11270 }
11271 
11272 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11273 {
11274  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11275  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11276 
11277  void* pData;
11278  VkResult res = Map(hAllocator, 1, &pData);
11279  if(res != VK_SUCCESS)
11280  {
11281  return res;
11282  }
11283 
11284  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11285  VmaWriteMagicValue(pData, allocOffset + allocSize);
11286 
11287  Unmap(hAllocator, 1);
11288 
11289  return VK_SUCCESS;
11290 }
11291 
11292 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11293 {
11294  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11295  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11296 
11297  void* pData;
11298  VkResult res = Map(hAllocator, 1, &pData);
11299  if(res != VK_SUCCESS)
11300  {
11301  return res;
11302  }
11303 
11304  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11305  {
11306  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11307  }
11308  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11309  {
11310  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11311  }
11312 
11313  Unmap(hAllocator, 1);
11314 
11315  return VK_SUCCESS;
11316 }
11317 
11318 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11319  const VmaAllocator hAllocator,
11320  const VmaAllocation hAllocation,
11321  VkBuffer hBuffer)
11322 {
11323  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11324  hAllocation->GetBlock() == this);
11325  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11326  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11327  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11328  hAllocator->m_hDevice,
11329  hBuffer,
11330  m_hMemory,
11331  hAllocation->GetOffset());
11332 }
11333 
11334 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11335  const VmaAllocator hAllocator,
11336  const VmaAllocation hAllocation,
11337  VkImage hImage)
11338 {
11339  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11340  hAllocation->GetBlock() == this);
11341  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11342  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11343  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11344  hAllocator->m_hDevice,
11345  hImage,
11346  m_hMemory,
11347  hAllocation->GetOffset());
11348 }
11349 
11350 static void InitStatInfo(VmaStatInfo& outInfo)
11351 {
11352  memset(&outInfo, 0, sizeof(outInfo));
11353  outInfo.allocationSizeMin = UINT64_MAX;
11354  outInfo.unusedRangeSizeMin = UINT64_MAX;
11355 }
11356 
11357 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11358 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11359 {
11360  inoutInfo.blockCount += srcInfo.blockCount;
11361  inoutInfo.allocationCount += srcInfo.allocationCount;
11362  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11363  inoutInfo.usedBytes += srcInfo.usedBytes;
11364  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11365  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11366  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11367  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11368  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11369 }
11370 
11371 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11372 {
11373  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11374  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11375  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11376  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11377 }
11378 
11379 VmaPool_T::VmaPool_T(
11380  VmaAllocator hAllocator,
11381  const VmaPoolCreateInfo& createInfo,
11382  VkDeviceSize preferredBlockSize) :
11383  m_BlockVector(
11384  hAllocator,
11385  this, // hParentPool
11386  createInfo.memoryTypeIndex,
11387  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11388  createInfo.minBlockCount,
11389  createInfo.maxBlockCount,
11390  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11391  createInfo.frameInUseCount,
11392  true, // isCustomPool
11393  createInfo.blockSize != 0, // explicitBlockSize
11394  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11395  m_Id(0)
11396 {
11397 }
11398 
11399 VmaPool_T::~VmaPool_T()
11400 {
11401 }
11402 
11403 #if VMA_STATS_STRING_ENABLED
11404 
11405 #endif // #if VMA_STATS_STRING_ENABLED
11406 
11407 VmaBlockVector::VmaBlockVector(
11408  VmaAllocator hAllocator,
11409  VmaPool hParentPool,
11410  uint32_t memoryTypeIndex,
11411  VkDeviceSize preferredBlockSize,
11412  size_t minBlockCount,
11413  size_t maxBlockCount,
11414  VkDeviceSize bufferImageGranularity,
11415  uint32_t frameInUseCount,
11416  bool isCustomPool,
11417  bool explicitBlockSize,
11418  uint32_t algorithm) :
11419  m_hAllocator(hAllocator),
11420  m_hParentPool(hParentPool),
11421  m_MemoryTypeIndex(memoryTypeIndex),
11422  m_PreferredBlockSize(preferredBlockSize),
11423  m_MinBlockCount(minBlockCount),
11424  m_MaxBlockCount(maxBlockCount),
11425  m_BufferImageGranularity(bufferImageGranularity),
11426  m_FrameInUseCount(frameInUseCount),
11427  m_IsCustomPool(isCustomPool),
11428  m_ExplicitBlockSize(explicitBlockSize),
11429  m_Algorithm(algorithm),
11430  m_HasEmptyBlock(false),
11431  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11432  m_NextBlockId(0)
11433 {
11434 }
11435 
11436 VmaBlockVector::~VmaBlockVector()
11437 {
11438  for(size_t i = m_Blocks.size(); i--; )
11439  {
11440  m_Blocks[i]->Destroy(m_hAllocator);
11441  vma_delete(m_hAllocator, m_Blocks[i]);
11442  }
11443 }
11444 
11445 VkResult VmaBlockVector::CreateMinBlocks()
11446 {
11447  for(size_t i = 0; i < m_MinBlockCount; ++i)
11448  {
11449  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11450  if(res != VK_SUCCESS)
11451  {
11452  return res;
11453  }
11454  }
11455  return VK_SUCCESS;
11456 }
11457 
11458 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11459 {
11460  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11461 
11462  const size_t blockCount = m_Blocks.size();
11463 
11464  pStats->size = 0;
11465  pStats->unusedSize = 0;
11466  pStats->allocationCount = 0;
11467  pStats->unusedRangeCount = 0;
11468  pStats->unusedRangeSizeMax = 0;
11469  pStats->blockCount = blockCount;
11470 
11471  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11472  {
11473  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11474  VMA_ASSERT(pBlock);
11475  VMA_HEAVY_ASSERT(pBlock->Validate());
11476  pBlock->m_pMetadata->AddPoolStats(*pStats);
11477  }
11478 }
11479 
11480 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11481 {
11482  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11483  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11484  (VMA_DEBUG_MARGIN > 0) &&
11485  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11486  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11487 }
11488 
11489 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11490 
11491 VkResult VmaBlockVector::Allocate(
11492  uint32_t currentFrameIndex,
11493  VkDeviceSize size,
11494  VkDeviceSize alignment,
11495  const VmaAllocationCreateInfo& createInfo,
11496  VmaSuballocationType suballocType,
11497  size_t allocationCount,
11498  VmaAllocation* pAllocations)
11499 {
11500  size_t allocIndex;
11501  VkResult res = VK_SUCCESS;
11502 
11503  if(IsCorruptionDetectionEnabled())
11504  {
11505  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11506  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11507  }
11508 
11509  {
11510  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11511  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11512  {
11513  res = AllocatePage(
11514  currentFrameIndex,
11515  size,
11516  alignment,
11517  createInfo,
11518  suballocType,
11519  pAllocations + allocIndex);
11520  if(res != VK_SUCCESS)
11521  {
11522  break;
11523  }
11524  }
11525  }
11526 
11527  if(res != VK_SUCCESS)
11528  {
11529  // Free all already created allocations.
11530  while(allocIndex--)
11531  {
11532  Free(pAllocations[allocIndex]);
11533  }
11534  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11535  }
11536 
11537  return res;
11538 }
11539 
11540 VkResult VmaBlockVector::AllocatePage(
11541  uint32_t currentFrameIndex,
11542  VkDeviceSize size,
11543  VkDeviceSize alignment,
11544  const VmaAllocationCreateInfo& createInfo,
11545  VmaSuballocationType suballocType,
11546  VmaAllocation* pAllocation)
11547 {
11548  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11549  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11550  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11551  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11552  const bool canCreateNewBlock =
11553  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11554  (m_Blocks.size() < m_MaxBlockCount);
11555  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11556 
11557  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11558  // Which in turn is available only when maxBlockCount = 1.
11559  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11560  {
11561  canMakeOtherLost = false;
11562  }
11563 
11564  // Upper address can only be used with linear allocator and within single memory block.
11565  if(isUpperAddress &&
11566  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11567  {
11568  return VK_ERROR_FEATURE_NOT_PRESENT;
11569  }
11570 
11571  // Validate strategy.
11572  switch(strategy)
11573  {
11574  case 0:
11576  break;
11580  break;
11581  default:
11582  return VK_ERROR_FEATURE_NOT_PRESENT;
11583  }
11584 
11585  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11586  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11587  {
11588  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11589  }
11590 
11591  /*
11592  Under certain condition, this whole section can be skipped for optimization, so
11593  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11594  e.g. for custom pools with linear algorithm.
11595  */
11596  if(!canMakeOtherLost || canCreateNewBlock)
11597  {
11598  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11599  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11601 
11602  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11603  {
11604  // Use only last block.
11605  if(!m_Blocks.empty())
11606  {
11607  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11608  VMA_ASSERT(pCurrBlock);
11609  VkResult res = AllocateFromBlock(
11610  pCurrBlock,
11611  currentFrameIndex,
11612  size,
11613  alignment,
11614  allocFlagsCopy,
11615  createInfo.pUserData,
11616  suballocType,
11617  strategy,
11618  pAllocation);
11619  if(res == VK_SUCCESS)
11620  {
11621  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11622  return VK_SUCCESS;
11623  }
11624  }
11625  }
11626  else
11627  {
11629  {
11630  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11631  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11632  {
11633  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11634  VMA_ASSERT(pCurrBlock);
11635  VkResult res = AllocateFromBlock(
11636  pCurrBlock,
11637  currentFrameIndex,
11638  size,
11639  alignment,
11640  allocFlagsCopy,
11641  createInfo.pUserData,
11642  suballocType,
11643  strategy,
11644  pAllocation);
11645  if(res == VK_SUCCESS)
11646  {
11647  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11648  return VK_SUCCESS;
11649  }
11650  }
11651  }
11652  else // WORST_FIT, FIRST_FIT
11653  {
11654  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11655  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11656  {
11657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11658  VMA_ASSERT(pCurrBlock);
11659  VkResult res = AllocateFromBlock(
11660  pCurrBlock,
11661  currentFrameIndex,
11662  size,
11663  alignment,
11664  allocFlagsCopy,
11665  createInfo.pUserData,
11666  suballocType,
11667  strategy,
11668  pAllocation);
11669  if(res == VK_SUCCESS)
11670  {
11671  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11672  return VK_SUCCESS;
11673  }
11674  }
11675  }
11676  }
11677 
11678  // 2. Try to create new block.
11679  if(canCreateNewBlock)
11680  {
11681  // Calculate optimal size for new block.
11682  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11683  uint32_t newBlockSizeShift = 0;
11684  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11685 
11686  if(!m_ExplicitBlockSize)
11687  {
11688  // Allocate 1/8, 1/4, 1/2 as first blocks.
11689  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11690  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11691  {
11692  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11693  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11694  {
11695  newBlockSize = smallerNewBlockSize;
11696  ++newBlockSizeShift;
11697  }
11698  else
11699  {
11700  break;
11701  }
11702  }
11703  }
11704 
11705  size_t newBlockIndex = 0;
11706  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11707  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11708  if(!m_ExplicitBlockSize)
11709  {
11710  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11711  {
11712  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11713  if(smallerNewBlockSize >= size)
11714  {
11715  newBlockSize = smallerNewBlockSize;
11716  ++newBlockSizeShift;
11717  res = CreateBlock(newBlockSize, &newBlockIndex);
11718  }
11719  else
11720  {
11721  break;
11722  }
11723  }
11724  }
11725 
11726  if(res == VK_SUCCESS)
11727  {
11728  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11729  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11730 
11731  res = AllocateFromBlock(
11732  pBlock,
11733  currentFrameIndex,
11734  size,
11735  alignment,
11736  allocFlagsCopy,
11737  createInfo.pUserData,
11738  suballocType,
11739  strategy,
11740  pAllocation);
11741  if(res == VK_SUCCESS)
11742  {
11743  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11744  return VK_SUCCESS;
11745  }
11746  else
11747  {
11748  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11749  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11750  }
11751  }
11752  }
11753  }
11754 
11755  // 3. Try to allocate from existing blocks with making other allocations lost.
11756  if(canMakeOtherLost)
11757  {
11758  uint32_t tryIndex = 0;
11759  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11760  {
11761  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11762  VmaAllocationRequest bestRequest = {};
11763  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11764 
11765  // 1. Search existing allocations.
11767  {
11768  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11769  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11770  {
11771  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11772  VMA_ASSERT(pCurrBlock);
11773  VmaAllocationRequest currRequest = {};
11774  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11775  currentFrameIndex,
11776  m_FrameInUseCount,
11777  m_BufferImageGranularity,
11778  size,
11779  alignment,
11780  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11781  suballocType,
11782  canMakeOtherLost,
11783  strategy,
11784  &currRequest))
11785  {
11786  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11787  if(pBestRequestBlock == VMA_NULL ||
11788  currRequestCost < bestRequestCost)
11789  {
11790  pBestRequestBlock = pCurrBlock;
11791  bestRequest = currRequest;
11792  bestRequestCost = currRequestCost;
11793 
11794  if(bestRequestCost == 0)
11795  {
11796  break;
11797  }
11798  }
11799  }
11800  }
11801  }
11802  else // WORST_FIT, FIRST_FIT
11803  {
11804  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11805  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11806  {
11807  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11808  VMA_ASSERT(pCurrBlock);
11809  VmaAllocationRequest currRequest = {};
11810  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11811  currentFrameIndex,
11812  m_FrameInUseCount,
11813  m_BufferImageGranularity,
11814  size,
11815  alignment,
11816  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11817  suballocType,
11818  canMakeOtherLost,
11819  strategy,
11820  &currRequest))
11821  {
11822  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11823  if(pBestRequestBlock == VMA_NULL ||
11824  currRequestCost < bestRequestCost ||
11826  {
11827  pBestRequestBlock = pCurrBlock;
11828  bestRequest = currRequest;
11829  bestRequestCost = currRequestCost;
11830 
11831  if(bestRequestCost == 0 ||
11833  {
11834  break;
11835  }
11836  }
11837  }
11838  }
11839  }
11840 
11841  if(pBestRequestBlock != VMA_NULL)
11842  {
11843  if(mapped)
11844  {
11845  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11846  if(res != VK_SUCCESS)
11847  {
11848  return res;
11849  }
11850  }
11851 
11852  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11853  currentFrameIndex,
11854  m_FrameInUseCount,
11855  &bestRequest))
11856  {
11857  // We no longer have an empty Allocation.
11858  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11859  {
11860  m_HasEmptyBlock = false;
11861  }
11862  // Allocate from this pBlock.
11863  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
11864  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
11865  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
11866  (*pAllocation)->InitBlockAllocation(
11867  pBestRequestBlock,
11868  bestRequest.offset,
11869  alignment,
11870  size,
11871  suballocType,
11872  mapped,
11873  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11874  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11875  VMA_DEBUG_LOG(" Returned from existing block");
11876  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11877  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11878  {
11879  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11880  }
11881  if(IsCorruptionDetectionEnabled())
11882  {
11883  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11884  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11885  }
11886  return VK_SUCCESS;
11887  }
11888  // else: Some allocations must have been touched while we are here. Next try.
11889  }
11890  else
11891  {
11892  // Could not find place in any of the blocks - break outer loop.
11893  break;
11894  }
11895  }
11896  /* Maximum number of tries exceeded - a very unlike event when many other
11897  threads are simultaneously touching allocations making it impossible to make
11898  lost at the same time as we try to allocate. */
11899  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11900  {
11901  return VK_ERROR_TOO_MANY_OBJECTS;
11902  }
11903  }
11904 
11905  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11906 }
11907 
11908 void VmaBlockVector::Free(
11909  VmaAllocation hAllocation)
11910 {
11911  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11912 
11913  // Scope for lock.
11914  {
11915  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11916 
11917  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11918 
11919  if(IsCorruptionDetectionEnabled())
11920  {
11921  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11922  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11923  }
11924 
11925  if(hAllocation->IsPersistentMap())
11926  {
11927  pBlock->Unmap(m_hAllocator, 1);
11928  }
11929 
11930  pBlock->m_pMetadata->Free(hAllocation);
11931  VMA_HEAVY_ASSERT(pBlock->Validate());
11932 
11933  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
11934 
11935  // pBlock became empty after this deallocation.
11936  if(pBlock->m_pMetadata->IsEmpty())
11937  {
11938  // Already has empty Allocation. We don't want to have two, so delete this one.
11939  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11940  {
11941  pBlockToDelete = pBlock;
11942  Remove(pBlock);
11943  }
11944  // We now have first empty block.
11945  else
11946  {
11947  m_HasEmptyBlock = true;
11948  }
11949  }
11950  // pBlock didn't become empty, but we have another empty block - find and free that one.
11951  // (This is optional, heuristics.)
11952  else if(m_HasEmptyBlock)
11953  {
11954  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11955  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11956  {
11957  pBlockToDelete = pLastBlock;
11958  m_Blocks.pop_back();
11959  m_HasEmptyBlock = false;
11960  }
11961  }
11962 
11963  IncrementallySortBlocks();
11964  }
11965 
11966  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11967  // lock, for performance reason.
11968  if(pBlockToDelete != VMA_NULL)
11969  {
11970  VMA_DEBUG_LOG(" Deleted empty allocation");
11971  pBlockToDelete->Destroy(m_hAllocator);
11972  vma_delete(m_hAllocator, pBlockToDelete);
11973  }
11974 }
11975 
11976 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11977 {
11978  VkDeviceSize result = 0;
11979  for(size_t i = m_Blocks.size(); i--; )
11980  {
11981  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11982  if(result >= m_PreferredBlockSize)
11983  {
11984  break;
11985  }
11986  }
11987  return result;
11988 }
11989 
11990 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11991 {
11992  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11993  {
11994  if(m_Blocks[blockIndex] == pBlock)
11995  {
11996  VmaVectorRemove(m_Blocks, blockIndex);
11997  return;
11998  }
11999  }
12000  VMA_ASSERT(0);
12001 }
12002 
12003 void VmaBlockVector::IncrementallySortBlocks()
12004 {
12005  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12006  {
12007  // Bubble sort only until first swap.
12008  for(size_t i = 1; i < m_Blocks.size(); ++i)
12009  {
12010  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12011  {
12012  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12013  return;
12014  }
12015  }
12016  }
12017 }
12018 
12019 VkResult VmaBlockVector::AllocateFromBlock(
12020  VmaDeviceMemoryBlock* pBlock,
12021  uint32_t currentFrameIndex,
12022  VkDeviceSize size,
12023  VkDeviceSize alignment,
12024  VmaAllocationCreateFlags allocFlags,
12025  void* pUserData,
12026  VmaSuballocationType suballocType,
12027  uint32_t strategy,
12028  VmaAllocation* pAllocation)
12029 {
12030  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12031  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12032  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12033  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12034 
12035  VmaAllocationRequest currRequest = {};
12036  if(pBlock->m_pMetadata->CreateAllocationRequest(
12037  currentFrameIndex,
12038  m_FrameInUseCount,
12039  m_BufferImageGranularity,
12040  size,
12041  alignment,
12042  isUpperAddress,
12043  suballocType,
12044  false, // canMakeOtherLost
12045  strategy,
12046  &currRequest))
12047  {
12048  // Allocate from pCurrBlock.
12049  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12050 
12051  if(mapped)
12052  {
12053  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12054  if(res != VK_SUCCESS)
12055  {
12056  return res;
12057  }
12058  }
12059 
12060  // We no longer have an empty Allocation.
12061  if(pBlock->m_pMetadata->IsEmpty())
12062  {
12063  m_HasEmptyBlock = false;
12064  }
12065 
12066  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12067  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12068  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12069  (*pAllocation)->InitBlockAllocation(
12070  pBlock,
12071  currRequest.offset,
12072  alignment,
12073  size,
12074  suballocType,
12075  mapped,
12076  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12077  VMA_HEAVY_ASSERT(pBlock->Validate());
12078  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12079  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12080  {
12081  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12082  }
12083  if(IsCorruptionDetectionEnabled())
12084  {
12085  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12086  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12087  }
12088  return VK_SUCCESS;
12089  }
12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12091 }
12092 
12093 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12094 {
12095  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12096  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12097  allocInfo.allocationSize = blockSize;
12098  VkDeviceMemory mem = VK_NULL_HANDLE;
12099  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12100  if(res < 0)
12101  {
12102  return res;
12103  }
12104 
12105  // New VkDeviceMemory successfully created.
12106 
12107  // Create new Allocation for it.
12108  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12109  pBlock->Init(
12110  m_hAllocator,
12111  m_hParentPool,
12112  m_MemoryTypeIndex,
12113  mem,
12114  allocInfo.allocationSize,
12115  m_NextBlockId++,
12116  m_Algorithm);
12117 
12118  m_Blocks.push_back(pBlock);
12119  if(pNewBlockIndex != VMA_NULL)
12120  {
12121  *pNewBlockIndex = m_Blocks.size() - 1;
12122  }
12123 
12124  return VK_SUCCESS;
12125 }
12126 
12127 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12128  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12129  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12130 {
12131  const size_t blockCount = m_Blocks.size();
12132  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12133 
12134  enum BLOCK_FLAG
12135  {
12136  BLOCK_FLAG_USED = 0x00000001,
12137  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12138  };
12139 
12140  struct BlockInfo
12141  {
12142  uint32_t flags;
12143  void* pMappedData;
12144  };
12145  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12146  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12147  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12148 
12149  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12150  const size_t moveCount = moves.size();
12151  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12152  {
12153  const VmaDefragmentationMove& move = moves[moveIndex];
12154  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12155  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12156  }
12157 
12158  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12159 
12160  // Go over all blocks. Get mapped pointer or map if necessary.
12161  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12162  {
12163  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12164  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12165  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12166  {
12167  currBlockInfo.pMappedData = pBlock->GetMappedData();
12168  // It is not originally mapped - map it.
12169  if(currBlockInfo.pMappedData == VMA_NULL)
12170  {
12171  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12172  if(pDefragCtx->res == VK_SUCCESS)
12173  {
12174  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12175  }
12176  }
12177  }
12178  }
12179 
12180  // Go over all moves. Do actual data transfer.
12181  if(pDefragCtx->res == VK_SUCCESS)
12182  {
12183  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12184  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12185 
12186  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12187  {
12188  const VmaDefragmentationMove& move = moves[moveIndex];
12189 
12190  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12191  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12192 
12193  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12194 
12195  // Invalidate source.
12196  if(isNonCoherent)
12197  {
12198  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12199  memRange.memory = pSrcBlock->GetDeviceMemory();
12200  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12201  memRange.size = VMA_MIN(
12202  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12203  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12204  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12205  }
12206 
12207  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12208  memmove(
12209  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12210  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12211  static_cast<size_t>(move.size));
12212 
12213  if(IsCorruptionDetectionEnabled())
12214  {
12215  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12216  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12217  }
12218 
12219  // Flush destination.
12220  if(isNonCoherent)
12221  {
12222  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12223  memRange.memory = pDstBlock->GetDeviceMemory();
12224  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12225  memRange.size = VMA_MIN(
12226  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12227  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12228  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12229  }
12230  }
12231  }
12232 
12233  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12234  // Regardless of pCtx->res == VK_SUCCESS.
12235  for(size_t blockIndex = blockCount; blockIndex--; )
12236  {
12237  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12238  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12239  {
12240  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12241  pBlock->Unmap(m_hAllocator, 1);
12242  }
12243  }
12244 }
12245 
12246 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12247  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12248  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12249  VkCommandBuffer commandBuffer)
12250 {
12251  const size_t blockCount = m_Blocks.size();
12252 
12253  pDefragCtx->blockContexts.resize(blockCount);
12254  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12255 
12256  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12257  const size_t moveCount = moves.size();
12258  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12259  {
12260  const VmaDefragmentationMove& move = moves[moveIndex];
12261  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12262  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12263  }
12264 
12265  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12266 
12267  // Go over all blocks. Create and bind buffer for whole block if necessary.
12268  {
12269  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
12270  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
12271  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
12272 
12273  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12274  {
12275  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12276  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12277  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12278  {
12279  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12280  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12281  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12282  if(pDefragCtx->res == VK_SUCCESS)
12283  {
12284  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12285  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12286  }
12287  }
12288  }
12289  }
12290 
12291  // Go over all moves. Post data transfer commands to command buffer.
12292  if(pDefragCtx->res == VK_SUCCESS)
12293  {
12294  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12295  {
12296  const VmaDefragmentationMove& move = moves[moveIndex];
12297 
12298  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12299  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12300 
12301  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12302 
12303  VkBufferCopy region = {
12304  move.srcOffset,
12305  move.dstOffset,
12306  move.size };
12307  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12308  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12309  }
12310  }
12311 
12312  // Save buffers to defrag context for later destruction.
12313  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12314  {
12315  pDefragCtx->res = VK_NOT_READY;
12316  }
12317 }
12318 
12319 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12320 {
12321  m_HasEmptyBlock = false;
12322  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12323  {
12324  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12325  if(pBlock->m_pMetadata->IsEmpty())
12326  {
12327  if(m_Blocks.size() > m_MinBlockCount)
12328  {
12329  if(pDefragmentationStats != VMA_NULL)
12330  {
12331  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12332  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12333  }
12334 
12335  VmaVectorRemove(m_Blocks, blockIndex);
12336  pBlock->Destroy(m_hAllocator);
12337  vma_delete(m_hAllocator, pBlock);
12338  }
12339  else
12340  {
12341  m_HasEmptyBlock = true;
12342  }
12343  }
12344  }
12345 }
12346 
12347 #if VMA_STATS_STRING_ENABLED
12348 
12349 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12350 {
12351  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12352 
12353  json.BeginObject();
12354 
12355  if(m_IsCustomPool)
12356  {
12357  json.WriteString("MemoryTypeIndex");
12358  json.WriteNumber(m_MemoryTypeIndex);
12359 
12360  json.WriteString("BlockSize");
12361  json.WriteNumber(m_PreferredBlockSize);
12362 
12363  json.WriteString("BlockCount");
12364  json.BeginObject(true);
12365  if(m_MinBlockCount > 0)
12366  {
12367  json.WriteString("Min");
12368  json.WriteNumber((uint64_t)m_MinBlockCount);
12369  }
12370  if(m_MaxBlockCount < SIZE_MAX)
12371  {
12372  json.WriteString("Max");
12373  json.WriteNumber((uint64_t)m_MaxBlockCount);
12374  }
12375  json.WriteString("Cur");
12376  json.WriteNumber((uint64_t)m_Blocks.size());
12377  json.EndObject();
12378 
12379  if(m_FrameInUseCount > 0)
12380  {
12381  json.WriteString("FrameInUseCount");
12382  json.WriteNumber(m_FrameInUseCount);
12383  }
12384 
12385  if(m_Algorithm != 0)
12386  {
12387  json.WriteString("Algorithm");
12388  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12389  }
12390  }
12391  else
12392  {
12393  json.WriteString("PreferredBlockSize");
12394  json.WriteNumber(m_PreferredBlockSize);
12395  }
12396 
12397  json.WriteString("Blocks");
12398  json.BeginObject();
12399  for(size_t i = 0; i < m_Blocks.size(); ++i)
12400  {
12401  json.BeginString();
12402  json.ContinueString(m_Blocks[i]->GetId());
12403  json.EndString();
12404 
12405  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12406  }
12407  json.EndObject();
12408 
12409  json.EndObject();
12410 }
12411 
12412 #endif // #if VMA_STATS_STRING_ENABLED
12413 
12414 void VmaBlockVector::Defragment(
12415  class VmaBlockVectorDefragmentationContext* pCtx,
12416  VmaDefragmentationStats* pStats,
12417  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12418  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12419  VkCommandBuffer commandBuffer)
12420 {
12421  pCtx->res = VK_SUCCESS;
12422 
12423  const VkMemoryPropertyFlags memPropFlags =
12424  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12425  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12426  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12427 
12428  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12429  isHostVisible;
12430  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12431  !IsCorruptionDetectionEnabled();
12432 
12433  // There are options to defragment this memory type.
12434  if(canDefragmentOnCpu || canDefragmentOnGpu)
12435  {
12436  bool defragmentOnGpu;
12437  // There is only one option to defragment this memory type.
12438  if(canDefragmentOnGpu != canDefragmentOnCpu)
12439  {
12440  defragmentOnGpu = canDefragmentOnGpu;
12441  }
12442  // Both options are available: Heuristics to choose the best one.
12443  else
12444  {
12445  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12446  m_hAllocator->IsIntegratedGpu();
12447  }
12448 
12449  bool overlappingMoveSupported = !defragmentOnGpu;
12450 
12451  if(m_hAllocator->m_UseMutex)
12452  {
12453  m_Mutex.LockWrite();
12454  pCtx->mutexLocked = true;
12455  }
12456 
12457  pCtx->Begin(overlappingMoveSupported);
12458 
12459  // Defragment.
12460 
12461  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12462  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12463  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12464  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12465  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12466 
12467  // Accumulate statistics.
12468  if(pStats != VMA_NULL)
12469  {
12470  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12471  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12472  pStats->bytesMoved += bytesMoved;
12473  pStats->allocationsMoved += allocationsMoved;
12474  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12475  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12476  if(defragmentOnGpu)
12477  {
12478  maxGpuBytesToMove -= bytesMoved;
12479  maxGpuAllocationsToMove -= allocationsMoved;
12480  }
12481  else
12482  {
12483  maxCpuBytesToMove -= bytesMoved;
12484  maxCpuAllocationsToMove -= allocationsMoved;
12485  }
12486  }
12487 
12488  if(pCtx->res >= VK_SUCCESS)
12489  {
12490  if(defragmentOnGpu)
12491  {
12492  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12493  }
12494  else
12495  {
12496  ApplyDefragmentationMovesCpu(pCtx, moves);
12497  }
12498  }
12499  }
12500 }
12501 
12502 void VmaBlockVector::DefragmentationEnd(
12503  class VmaBlockVectorDefragmentationContext* pCtx,
12504  VmaDefragmentationStats* pStats)
12505 {
12506  // Destroy buffers.
12507  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12508  {
12509  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12510  if(blockCtx.hBuffer)
12511  {
12512  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12513  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12514  }
12515  }
12516 
12517  if(pCtx->res >= VK_SUCCESS)
12518  {
12519  FreeEmptyBlocks(pStats);
12520  }
12521 
12522  if(pCtx->mutexLocked)
12523  {
12524  VMA_ASSERT(m_hAllocator->m_UseMutex);
12525  m_Mutex.UnlockWrite();
12526  }
12527 }
12528 
12529 size_t VmaBlockVector::CalcAllocationCount() const
12530 {
12531  size_t result = 0;
12532  for(size_t i = 0; i < m_Blocks.size(); ++i)
12533  {
12534  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12535  }
12536  return result;
12537 }
12538 
12539 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12540 {
12541  if(m_BufferImageGranularity == 1)
12542  {
12543  return false;
12544  }
12545  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12546  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12547  {
12548  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12549  VMA_ASSERT(m_Algorithm == 0);
12550  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12551  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12552  {
12553  return true;
12554  }
12555  }
12556  return false;
12557 }
12558 
12559 void VmaBlockVector::MakePoolAllocationsLost(
12560  uint32_t currentFrameIndex,
12561  size_t* pLostAllocationCount)
12562 {
12563  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12564  size_t lostAllocationCount = 0;
12565  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12566  {
12567  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12568  VMA_ASSERT(pBlock);
12569  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12570  }
12571  if(pLostAllocationCount != VMA_NULL)
12572  {
12573  *pLostAllocationCount = lostAllocationCount;
12574  }
12575 }
12576 
12577 VkResult VmaBlockVector::CheckCorruption()
12578 {
12579  if(!IsCorruptionDetectionEnabled())
12580  {
12581  return VK_ERROR_FEATURE_NOT_PRESENT;
12582  }
12583 
12584  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12585  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12586  {
12587  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12588  VMA_ASSERT(pBlock);
12589  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12590  if(res != VK_SUCCESS)
12591  {
12592  return res;
12593  }
12594  }
12595  return VK_SUCCESS;
12596 }
12597 
12598 void VmaBlockVector::AddStats(VmaStats* pStats)
12599 {
12600  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12601  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12602 
12603  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12604 
12605  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12606  {
12607  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12608  VMA_ASSERT(pBlock);
12609  VMA_HEAVY_ASSERT(pBlock->Validate());
12610  VmaStatInfo allocationStatInfo;
12611  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12612  VmaAddStatInfo(pStats->total, allocationStatInfo);
12613  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12614  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12615  }
12616 }
12617 
12619 // VmaDefragmentationAlgorithm_Generic members definition
12620 
12621 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12622  VmaAllocator hAllocator,
12623  VmaBlockVector* pBlockVector,
12624  uint32_t currentFrameIndex,
12625  bool overlappingMoveSupported) :
12626  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12627  m_AllocationCount(0),
12628  m_AllAllocations(false),
12629  m_BytesMoved(0),
12630  m_AllocationsMoved(0),
12631  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12632 {
12633  // Create block info for each block.
12634  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12635  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12636  {
12637  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12638  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12639  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12640  m_Blocks.push_back(pBlockInfo);
12641  }
12642 
12643  // Sort them by m_pBlock pointer value.
12644  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12645 }
12646 
12647 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12648 {
12649  for(size_t i = m_Blocks.size(); i--; )
12650  {
12651  vma_delete(m_hAllocator, m_Blocks[i]);
12652  }
12653 }
12654 
12655 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12656 {
12657  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12658  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12659  {
12660  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12661  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12662  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12663  {
12664  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12665  (*it)->m_Allocations.push_back(allocInfo);
12666  }
12667  else
12668  {
12669  VMA_ASSERT(0);
12670  }
12671 
12672  ++m_AllocationCount;
12673  }
12674 }
12675 
12676 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12677  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12678  VkDeviceSize maxBytesToMove,
12679  uint32_t maxAllocationsToMove)
12680 {
12681  if(m_Blocks.empty())
12682  {
12683  return VK_SUCCESS;
12684  }
12685 
12686  // This is a choice based on research.
12687  // Option 1:
12688  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12689  // Option 2:
12690  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12691  // Option 3:
12692  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12693 
12694  size_t srcBlockMinIndex = 0;
12695  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12696  /*
12697  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12698  {
12699  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12700  if(blocksWithNonMovableCount > 0)
12701  {
12702  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12703  }
12704  }
12705  */
12706 
12707  size_t srcBlockIndex = m_Blocks.size() - 1;
12708  size_t srcAllocIndex = SIZE_MAX;
12709  for(;;)
12710  {
12711  // 1. Find next allocation to move.
12712  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12713  // 1.2. Then start from last to first m_Allocations.
12714  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12715  {
12716  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12717  {
12718  // Finished: no more allocations to process.
12719  if(srcBlockIndex == srcBlockMinIndex)
12720  {
12721  return VK_SUCCESS;
12722  }
12723  else
12724  {
12725  --srcBlockIndex;
12726  srcAllocIndex = SIZE_MAX;
12727  }
12728  }
12729  else
12730  {
12731  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12732  }
12733  }
12734 
12735  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12736  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12737 
12738  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12739  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12740  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12741  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12742 
12743  // 2. Try to find new place for this allocation in preceding or current block.
12744  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12745  {
12746  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12747  VmaAllocationRequest dstAllocRequest;
12748  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12749  m_CurrentFrameIndex,
12750  m_pBlockVector->GetFrameInUseCount(),
12751  m_pBlockVector->GetBufferImageGranularity(),
12752  size,
12753  alignment,
12754  false, // upperAddress
12755  suballocType,
12756  false, // canMakeOtherLost
12757  strategy,
12758  &dstAllocRequest) &&
12759  MoveMakesSense(
12760  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12761  {
12762  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12763 
12764  // Reached limit on number of allocations or bytes to move.
12765  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12766  (m_BytesMoved + size > maxBytesToMove))
12767  {
12768  return VK_SUCCESS;
12769  }
12770 
12771  VmaDefragmentationMove move;
12772  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12773  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12774  move.srcOffset = srcOffset;
12775  move.dstOffset = dstAllocRequest.offset;
12776  move.size = size;
12777  moves.push_back(move);
12778 
12779  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12780  dstAllocRequest,
12781  suballocType,
12782  size,
12783  allocInfo.m_hAllocation);
12784  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12785 
12786  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12787 
12788  if(allocInfo.m_pChanged != VMA_NULL)
12789  {
12790  *allocInfo.m_pChanged = VK_TRUE;
12791  }
12792 
12793  ++m_AllocationsMoved;
12794  m_BytesMoved += size;
12795 
12796  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12797 
12798  break;
12799  }
12800  }
12801 
12802  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12803 
12804  if(srcAllocIndex > 0)
12805  {
12806  --srcAllocIndex;
12807  }
12808  else
12809  {
12810  if(srcBlockIndex > 0)
12811  {
12812  --srcBlockIndex;
12813  srcAllocIndex = SIZE_MAX;
12814  }
12815  else
12816  {
12817  return VK_SUCCESS;
12818  }
12819  }
12820  }
12821 }
12822 
12823 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12824 {
12825  size_t result = 0;
12826  for(size_t i = 0; i < m_Blocks.size(); ++i)
12827  {
12828  if(m_Blocks[i]->m_HasNonMovableAllocations)
12829  {
12830  ++result;
12831  }
12832  }
12833  return result;
12834 }
12835 
12836 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12837  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12838  VkDeviceSize maxBytesToMove,
12839  uint32_t maxAllocationsToMove)
12840 {
12841  if(!m_AllAllocations && m_AllocationCount == 0)
12842  {
12843  return VK_SUCCESS;
12844  }
12845 
12846  const size_t blockCount = m_Blocks.size();
12847  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12848  {
12849  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12850 
12851  if(m_AllAllocations)
12852  {
12853  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12854  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12855  it != pMetadata->m_Suballocations.end();
12856  ++it)
12857  {
12858  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12859  {
12860  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12861  pBlockInfo->m_Allocations.push_back(allocInfo);
12862  }
12863  }
12864  }
12865 
12866  pBlockInfo->CalcHasNonMovableAllocations();
12867 
12868  // This is a choice based on research.
12869  // Option 1:
12870  pBlockInfo->SortAllocationsByOffsetDescending();
12871  // Option 2:
12872  //pBlockInfo->SortAllocationsBySizeDescending();
12873  }
12874 
12875  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12876  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12877 
12878  // This is a choice based on research.
12879  const uint32_t roundCount = 2;
12880 
12881  // Execute defragmentation rounds (the main part).
12882  VkResult result = VK_SUCCESS;
12883  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12884  {
12885  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12886  }
12887 
12888  return result;
12889 }
12890 
12891 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12892  size_t dstBlockIndex, VkDeviceSize dstOffset,
12893  size_t srcBlockIndex, VkDeviceSize srcOffset)
12894 {
12895  if(dstBlockIndex < srcBlockIndex)
12896  {
12897  return true;
12898  }
12899  if(dstBlockIndex > srcBlockIndex)
12900  {
12901  return false;
12902  }
12903  if(dstOffset < srcOffset)
12904  {
12905  return true;
12906  }
12907  return false;
12908 }
12909 
12911 // VmaDefragmentationAlgorithm_Fast
12912 
12913 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12914  VmaAllocator hAllocator,
12915  VmaBlockVector* pBlockVector,
12916  uint32_t currentFrameIndex,
12917  bool overlappingMoveSupported) :
12918  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12919  m_OverlappingMoveSupported(overlappingMoveSupported),
12920  m_AllocationCount(0),
12921  m_AllAllocations(false),
12922  m_BytesMoved(0),
12923  m_AllocationsMoved(0),
12924  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12925 {
12926  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12927 
12928 }
12929 
12930 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12931 {
12932 }
12933 
12934 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12935  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12936  VkDeviceSize maxBytesToMove,
12937  uint32_t maxAllocationsToMove)
12938 {
12939  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12940 
12941  const size_t blockCount = m_pBlockVector->GetBlockCount();
12942  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12943  {
12944  return VK_SUCCESS;
12945  }
12946 
12947  PreprocessMetadata();
12948 
12949  // Sort blocks in order from most destination.
12950 
12951  m_BlockInfos.resize(blockCount);
12952  for(size_t i = 0; i < blockCount; ++i)
12953  {
12954  m_BlockInfos[i].origBlockIndex = i;
12955  }
12956 
12957  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12958  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12959  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12960  });
12961 
12962  // THE MAIN ALGORITHM
12963 
12964  FreeSpaceDatabase freeSpaceDb;
12965 
12966  size_t dstBlockInfoIndex = 0;
12967  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12968  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12969  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12970  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12971  VkDeviceSize dstOffset = 0;
12972 
12973  bool end = false;
12974  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12975  {
12976  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12977  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12978  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12979  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12980  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12981  {
12982  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12983  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12984  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12985  if(m_AllocationsMoved == maxAllocationsToMove ||
12986  m_BytesMoved + srcAllocSize > maxBytesToMove)
12987  {
12988  end = true;
12989  break;
12990  }
12991  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12992 
12993  // Try to place it in one of free spaces from the database.
12994  size_t freeSpaceInfoIndex;
12995  VkDeviceSize dstAllocOffset;
12996  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12997  freeSpaceInfoIndex, dstAllocOffset))
12998  {
12999  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13000  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13001  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13002 
13003  // Same block
13004  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13005  {
13006  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13007 
13008  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13009 
13010  VmaSuballocation suballoc = *srcSuballocIt;
13011  suballoc.offset = dstAllocOffset;
13012  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13013  m_BytesMoved += srcAllocSize;
13014  ++m_AllocationsMoved;
13015 
13016  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13017  ++nextSuballocIt;
13018  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13019  srcSuballocIt = nextSuballocIt;
13020 
13021  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13022 
13023  VmaDefragmentationMove move = {
13024  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13025  srcAllocOffset, dstAllocOffset,
13026  srcAllocSize };
13027  moves.push_back(move);
13028  }
13029  // Different block
13030  else
13031  {
13032  // MOVE OPTION 2: Move the allocation to a different block.
13033 
13034  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13035 
13036  VmaSuballocation suballoc = *srcSuballocIt;
13037  suballoc.offset = dstAllocOffset;
13038  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13039  m_BytesMoved += srcAllocSize;
13040  ++m_AllocationsMoved;
13041 
13042  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13043  ++nextSuballocIt;
13044  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13045  srcSuballocIt = nextSuballocIt;
13046 
13047  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13048 
13049  VmaDefragmentationMove move = {
13050  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13051  srcAllocOffset, dstAllocOffset,
13052  srcAllocSize };
13053  moves.push_back(move);
13054  }
13055  }
13056  else
13057  {
13058  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13059 
13060  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13061  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13062  dstAllocOffset + srcAllocSize > dstBlockSize)
13063  {
13064  // But before that, register remaining free space at the end of dst block.
13065  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13066 
13067  ++dstBlockInfoIndex;
13068  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13069  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13070  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13071  dstBlockSize = pDstMetadata->GetSize();
13072  dstOffset = 0;
13073  dstAllocOffset = 0;
13074  }
13075 
13076  // Same block
13077  if(dstBlockInfoIndex == srcBlockInfoIndex)
13078  {
13079  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13080 
13081  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13082 
13083  bool skipOver = overlap;
13084  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13085  {
13086  // If destination and source place overlap, skip if it would move it
13087  // by only < 1/64 of its size.
13088  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13089  }
13090 
13091  if(skipOver)
13092  {
13093  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13094 
13095  dstOffset = srcAllocOffset + srcAllocSize;
13096  ++srcSuballocIt;
13097  }
13098  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13099  else
13100  {
13101  srcSuballocIt->offset = dstAllocOffset;
13102  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13103  dstOffset = dstAllocOffset + srcAllocSize;
13104  m_BytesMoved += srcAllocSize;
13105  ++m_AllocationsMoved;
13106  ++srcSuballocIt;
13107  VmaDefragmentationMove move = {
13108  srcOrigBlockIndex, dstOrigBlockIndex,
13109  srcAllocOffset, dstAllocOffset,
13110  srcAllocSize };
13111  moves.push_back(move);
13112  }
13113  }
13114  // Different block
13115  else
13116  {
13117  // MOVE OPTION 2: Move the allocation to a different block.
13118 
13119  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13120  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13121 
13122  VmaSuballocation suballoc = *srcSuballocIt;
13123  suballoc.offset = dstAllocOffset;
13124  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13125  dstOffset = dstAllocOffset + srcAllocSize;
13126  m_BytesMoved += srcAllocSize;
13127  ++m_AllocationsMoved;
13128 
13129  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13130  ++nextSuballocIt;
13131  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13132  srcSuballocIt = nextSuballocIt;
13133 
13134  pDstMetadata->m_Suballocations.push_back(suballoc);
13135 
13136  VmaDefragmentationMove move = {
13137  srcOrigBlockIndex, dstOrigBlockIndex,
13138  srcAllocOffset, dstAllocOffset,
13139  srcAllocSize };
13140  moves.push_back(move);
13141  }
13142  }
13143  }
13144  }
13145 
13146  m_BlockInfos.clear();
13147 
13148  PostprocessMetadata();
13149 
13150  return VK_SUCCESS;
13151 }
13152 
13153 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13154 {
13155  const size_t blockCount = m_pBlockVector->GetBlockCount();
13156  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13157  {
13158  VmaBlockMetadata_Generic* const pMetadata =
13159  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13160  pMetadata->m_FreeCount = 0;
13161  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13162  pMetadata->m_FreeSuballocationsBySize.clear();
13163  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13164  it != pMetadata->m_Suballocations.end(); )
13165  {
13166  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13167  {
13168  VmaSuballocationList::iterator nextIt = it;
13169  ++nextIt;
13170  pMetadata->m_Suballocations.erase(it);
13171  it = nextIt;
13172  }
13173  else
13174  {
13175  ++it;
13176  }
13177  }
13178  }
13179 }
13180 
13181 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13182 {
13183  const size_t blockCount = m_pBlockVector->GetBlockCount();
13184  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13185  {
13186  VmaBlockMetadata_Generic* const pMetadata =
13187  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13188  const VkDeviceSize blockSize = pMetadata->GetSize();
13189 
13190  // No allocations in this block - entire area is free.
13191  if(pMetadata->m_Suballocations.empty())
13192  {
13193  pMetadata->m_FreeCount = 1;
13194  //pMetadata->m_SumFreeSize is already set to blockSize.
13195  VmaSuballocation suballoc = {
13196  0, // offset
13197  blockSize, // size
13198  VMA_NULL, // hAllocation
13199  VMA_SUBALLOCATION_TYPE_FREE };
13200  pMetadata->m_Suballocations.push_back(suballoc);
13201  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13202  }
13203  // There are some allocations in this block.
13204  else
13205  {
13206  VkDeviceSize offset = 0;
13207  VmaSuballocationList::iterator it;
13208  for(it = pMetadata->m_Suballocations.begin();
13209  it != pMetadata->m_Suballocations.end();
13210  ++it)
13211  {
13212  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13213  VMA_ASSERT(it->offset >= offset);
13214 
13215  // Need to insert preceding free space.
13216  if(it->offset > offset)
13217  {
13218  ++pMetadata->m_FreeCount;
13219  const VkDeviceSize freeSize = it->offset - offset;
13220  VmaSuballocation suballoc = {
13221  offset, // offset
13222  freeSize, // size
13223  VMA_NULL, // hAllocation
13224  VMA_SUBALLOCATION_TYPE_FREE };
13225  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13226  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13227  {
13228  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13229  }
13230  }
13231 
13232  pMetadata->m_SumFreeSize -= it->size;
13233  offset = it->offset + it->size;
13234  }
13235 
13236  // Need to insert trailing free space.
13237  if(offset < blockSize)
13238  {
13239  ++pMetadata->m_FreeCount;
13240  const VkDeviceSize freeSize = blockSize - offset;
13241  VmaSuballocation suballoc = {
13242  offset, // offset
13243  freeSize, // size
13244  VMA_NULL, // hAllocation
13245  VMA_SUBALLOCATION_TYPE_FREE };
13246  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13247  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13248  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13249  {
13250  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13251  }
13252  }
13253 
13254  VMA_SORT(
13255  pMetadata->m_FreeSuballocationsBySize.begin(),
13256  pMetadata->m_FreeSuballocationsBySize.end(),
13257  VmaSuballocationItemSizeLess());
13258  }
13259 
13260  VMA_HEAVY_ASSERT(pMetadata->Validate());
13261  }
13262 }
13263 
13264 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13265 {
13266  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13267  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13268  while(it != pMetadata->m_Suballocations.end())
13269  {
13270  if(it->offset < suballoc.offset)
13271  {
13272  ++it;
13273  }
13274  }
13275  pMetadata->m_Suballocations.insert(it, suballoc);
13276 }
13277 
13279 // VmaBlockVectorDefragmentationContext
13280 
13281 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13282  VmaAllocator hAllocator,
13283  VmaPool hCustomPool,
13284  VmaBlockVector* pBlockVector,
13285  uint32_t currFrameIndex,
13286  uint32_t algorithmFlags) :
13287  res(VK_SUCCESS),
13288  mutexLocked(false),
13289  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13290  m_hAllocator(hAllocator),
13291  m_hCustomPool(hCustomPool),
13292  m_pBlockVector(pBlockVector),
13293  m_CurrFrameIndex(currFrameIndex),
13294  m_AlgorithmFlags(algorithmFlags),
13295  m_pAlgorithm(VMA_NULL),
13296  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13297  m_AllAllocations(false)
13298 {
13299 }
13300 
13301 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13302 {
13303  vma_delete(m_hAllocator, m_pAlgorithm);
13304 }
13305 
13306 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13307 {
13308  AllocInfo info = { hAlloc, pChanged };
13309  m_Allocations.push_back(info);
13310 }
13311 
13312 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13313 {
13314  const bool allAllocations = m_AllAllocations ||
13315  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13316 
13317  /********************************
13318  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13319  ********************************/
13320 
13321  /*
13322  Fast algorithm is supported only when certain criteria are met:
13323  - VMA_DEBUG_MARGIN is 0.
13324  - All allocations in this block vector are moveable.
13325  - There is no possibility of image/buffer granularity conflict.
13326  */
13327  if(VMA_DEBUG_MARGIN == 0 &&
13328  allAllocations &&
13329  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13330  {
13331  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13332  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13333  }
13334  else
13335  {
13336  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13337  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13338  }
13339 
13340  if(allAllocations)
13341  {
13342  m_pAlgorithm->AddAll();
13343  }
13344  else
13345  {
13346  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13347  {
13348  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13349  }
13350  }
13351 }
13352 
13354 // VmaDefragmentationContext
13355 
13356 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13357  VmaAllocator hAllocator,
13358  uint32_t currFrameIndex,
13359  uint32_t flags,
13360  VmaDefragmentationStats* pStats) :
13361  m_hAllocator(hAllocator),
13362  m_CurrFrameIndex(currFrameIndex),
13363  m_Flags(flags),
13364  m_pStats(pStats),
13365  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13366 {
13367  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13368 }
13369 
13370 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13371 {
13372  for(size_t i = m_CustomPoolContexts.size(); i--; )
13373  {
13374  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13375  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13376  vma_delete(m_hAllocator, pBlockVectorCtx);
13377  }
13378  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13379  {
13380  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13381  if(pBlockVectorCtx)
13382  {
13383  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13384  vma_delete(m_hAllocator, pBlockVectorCtx);
13385  }
13386  }
13387 }
13388 
13389 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13390 {
13391  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13392  {
13393  VmaPool pool = pPools[poolIndex];
13394  VMA_ASSERT(pool);
13395  // Pools with algorithm other than default are not defragmented.
13396  if(pool->m_BlockVector.GetAlgorithm() == 0)
13397  {
13398  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13399 
13400  for(size_t i = m_CustomPoolContexts.size(); i--; )
13401  {
13402  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13403  {
13404  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13405  break;
13406  }
13407  }
13408 
13409  if(!pBlockVectorDefragCtx)
13410  {
13411  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13412  m_hAllocator,
13413  pool,
13414  &pool->m_BlockVector,
13415  m_CurrFrameIndex,
13416  m_Flags);
13417  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13418  }
13419 
13420  pBlockVectorDefragCtx->AddAll();
13421  }
13422  }
13423 }
13424 
13425 void VmaDefragmentationContext_T::AddAllocations(
13426  uint32_t allocationCount,
13427  VmaAllocation* pAllocations,
13428  VkBool32* pAllocationsChanged)
13429 {
13430  // Dispatch pAllocations among defragmentators. Create them when necessary.
13431  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13432  {
13433  const VmaAllocation hAlloc = pAllocations[allocIndex];
13434  VMA_ASSERT(hAlloc);
13435  // DedicatedAlloc cannot be defragmented.
13436  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13437  // Lost allocation cannot be defragmented.
13438  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13439  {
13440  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13441 
13442  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13443  // This allocation belongs to custom pool.
13444  if(hAllocPool != VK_NULL_HANDLE)
13445  {
13446  // Pools with algorithm other than default are not defragmented.
13447  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13448  {
13449  for(size_t i = m_CustomPoolContexts.size(); i--; )
13450  {
13451  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13452  {
13453  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13454  break;
13455  }
13456  }
13457  if(!pBlockVectorDefragCtx)
13458  {
13459  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13460  m_hAllocator,
13461  hAllocPool,
13462  &hAllocPool->m_BlockVector,
13463  m_CurrFrameIndex,
13464  m_Flags);
13465  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13466  }
13467  }
13468  }
13469  // This allocation belongs to default pool.
13470  else
13471  {
13472  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13473  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13474  if(!pBlockVectorDefragCtx)
13475  {
13476  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13477  m_hAllocator,
13478  VMA_NULL, // hCustomPool
13479  m_hAllocator->m_pBlockVectors[memTypeIndex],
13480  m_CurrFrameIndex,
13481  m_Flags);
13482  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13483  }
13484  }
13485 
13486  if(pBlockVectorDefragCtx)
13487  {
13488  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13489  &pAllocationsChanged[allocIndex] : VMA_NULL;
13490  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13491  }
13492  }
13493  }
13494 }
13495 
13496 VkResult VmaDefragmentationContext_T::Defragment(
13497  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13498  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13499  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13500 {
13501  if(pStats)
13502  {
13503  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13504  }
13505 
13506  if(commandBuffer == VK_NULL_HANDLE)
13507  {
13508  maxGpuBytesToMove = 0;
13509  maxGpuAllocationsToMove = 0;
13510  }
13511 
13512  VkResult res = VK_SUCCESS;
13513 
13514  // Process default pools.
13515  for(uint32_t memTypeIndex = 0;
13516  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13517  ++memTypeIndex)
13518  {
13519  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13520  if(pBlockVectorCtx)
13521  {
13522  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13523  pBlockVectorCtx->GetBlockVector()->Defragment(
13524  pBlockVectorCtx,
13525  pStats,
13526  maxCpuBytesToMove, maxCpuAllocationsToMove,
13527  maxGpuBytesToMove, maxGpuAllocationsToMove,
13528  commandBuffer);
13529  if(pBlockVectorCtx->res != VK_SUCCESS)
13530  {
13531  res = pBlockVectorCtx->res;
13532  }
13533  }
13534  }
13535 
13536  // Process custom pools.
13537  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13538  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13539  ++customCtxIndex)
13540  {
13541  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13542  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13543  pBlockVectorCtx->GetBlockVector()->Defragment(
13544  pBlockVectorCtx,
13545  pStats,
13546  maxCpuBytesToMove, maxCpuAllocationsToMove,
13547  maxGpuBytesToMove, maxGpuAllocationsToMove,
13548  commandBuffer);
13549  if(pBlockVectorCtx->res != VK_SUCCESS)
13550  {
13551  res = pBlockVectorCtx->res;
13552  }
13553  }
13554 
13555  return res;
13556 }
13557 
13559 // VmaRecorder
13560 
13561 #if VMA_RECORDING_ENABLED
13562 
13563 VmaRecorder::VmaRecorder() :
13564  m_UseMutex(true),
13565  m_Flags(0),
13566  m_File(VMA_NULL),
13567  m_Freq(INT64_MAX),
13568  m_StartCounter(INT64_MAX)
13569 {
13570 }
13571 
13572 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13573 {
13574  m_UseMutex = useMutex;
13575  m_Flags = settings.flags;
13576 
13577  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13578  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13579 
13580  // Open file for writing.
13581  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13582  if(err != 0)
13583  {
13584  return VK_ERROR_INITIALIZATION_FAILED;
13585  }
13586 
13587  // Write header.
13588  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13589  fprintf(m_File, "%s\n", "1,5");
13590 
13591  return VK_SUCCESS;
13592 }
13593 
13594 VmaRecorder::~VmaRecorder()
13595 {
13596  if(m_File != VMA_NULL)
13597  {
13598  fclose(m_File);
13599  }
13600 }
13601 
13602 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13603 {
13604  CallParams callParams;
13605  GetBasicParams(callParams);
13606 
13607  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13608  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13609  Flush();
13610 }
13611 
13612 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13613 {
13614  CallParams callParams;
13615  GetBasicParams(callParams);
13616 
13617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13618  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13619  Flush();
13620 }
13621 
13622 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13623 {
13624  CallParams callParams;
13625  GetBasicParams(callParams);
13626 
13627  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13628  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13629  createInfo.memoryTypeIndex,
13630  createInfo.flags,
13631  createInfo.blockSize,
13632  (uint64_t)createInfo.minBlockCount,
13633  (uint64_t)createInfo.maxBlockCount,
13634  createInfo.frameInUseCount,
13635  pool);
13636  Flush();
13637 }
13638 
13639 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13640 {
13641  CallParams callParams;
13642  GetBasicParams(callParams);
13643 
13644  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13645  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13646  pool);
13647  Flush();
13648 }
13649 
13650 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13651  const VkMemoryRequirements& vkMemReq,
13652  const VmaAllocationCreateInfo& createInfo,
13653  VmaAllocation allocation)
13654 {
13655  CallParams callParams;
13656  GetBasicParams(callParams);
13657 
13658  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13659  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13660  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13661  vkMemReq.size,
13662  vkMemReq.alignment,
13663  vkMemReq.memoryTypeBits,
13664  createInfo.flags,
13665  createInfo.usage,
13666  createInfo.requiredFlags,
13667  createInfo.preferredFlags,
13668  createInfo.memoryTypeBits,
13669  createInfo.pool,
13670  allocation,
13671  userDataStr.GetString());
13672  Flush();
13673 }
13674 
13675 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13676  const VkMemoryRequirements& vkMemReq,
13677  const VmaAllocationCreateInfo& createInfo,
13678  uint64_t allocationCount,
13679  const VmaAllocation* pAllocations)
13680 {
13681  CallParams callParams;
13682  GetBasicParams(callParams);
13683 
13684  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13685  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13686  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13687  vkMemReq.size,
13688  vkMemReq.alignment,
13689  vkMemReq.memoryTypeBits,
13690  createInfo.flags,
13691  createInfo.usage,
13692  createInfo.requiredFlags,
13693  createInfo.preferredFlags,
13694  createInfo.memoryTypeBits,
13695  createInfo.pool);
13696  PrintPointerList(allocationCount, pAllocations);
13697  fprintf(m_File, ",%s\n", userDataStr.GetString());
13698  Flush();
13699 }
13700 
13701 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13702  const VkMemoryRequirements& vkMemReq,
13703  bool requiresDedicatedAllocation,
13704  bool prefersDedicatedAllocation,
13705  const VmaAllocationCreateInfo& createInfo,
13706  VmaAllocation allocation)
13707 {
13708  CallParams callParams;
13709  GetBasicParams(callParams);
13710 
13711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13712  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13713  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13714  vkMemReq.size,
13715  vkMemReq.alignment,
13716  vkMemReq.memoryTypeBits,
13717  requiresDedicatedAllocation ? 1 : 0,
13718  prefersDedicatedAllocation ? 1 : 0,
13719  createInfo.flags,
13720  createInfo.usage,
13721  createInfo.requiredFlags,
13722  createInfo.preferredFlags,
13723  createInfo.memoryTypeBits,
13724  createInfo.pool,
13725  allocation,
13726  userDataStr.GetString());
13727  Flush();
13728 }
13729 
13730 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13731  const VkMemoryRequirements& vkMemReq,
13732  bool requiresDedicatedAllocation,
13733  bool prefersDedicatedAllocation,
13734  const VmaAllocationCreateInfo& createInfo,
13735  VmaAllocation allocation)
13736 {
13737  CallParams callParams;
13738  GetBasicParams(callParams);
13739 
13740  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13741  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13742  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13743  vkMemReq.size,
13744  vkMemReq.alignment,
13745  vkMemReq.memoryTypeBits,
13746  requiresDedicatedAllocation ? 1 : 0,
13747  prefersDedicatedAllocation ? 1 : 0,
13748  createInfo.flags,
13749  createInfo.usage,
13750  createInfo.requiredFlags,
13751  createInfo.preferredFlags,
13752  createInfo.memoryTypeBits,
13753  createInfo.pool,
13754  allocation,
13755  userDataStr.GetString());
13756  Flush();
13757 }
13758 
13759 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13760  VmaAllocation allocation)
13761 {
13762  CallParams callParams;
13763  GetBasicParams(callParams);
13764 
13765  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13766  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13767  allocation);
13768  Flush();
13769 }
13770 
13771 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
13772  uint64_t allocationCount,
13773  const VmaAllocation* pAllocations)
13774 {
13775  CallParams callParams;
13776  GetBasicParams(callParams);
13777 
13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13779  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
13780  PrintPointerList(allocationCount, pAllocations);
13781  fprintf(m_File, "\n");
13782  Flush();
13783 }
13784 
13785 void VmaRecorder::RecordResizeAllocation(
13786  uint32_t frameIndex,
13787  VmaAllocation allocation,
13788  VkDeviceSize newSize)
13789 {
13790  CallParams callParams;
13791  GetBasicParams(callParams);
13792 
13793  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13794  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13795  allocation, newSize);
13796  Flush();
13797 }
13798 
13799 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13800  VmaAllocation allocation,
13801  const void* pUserData)
13802 {
13803  CallParams callParams;
13804  GetBasicParams(callParams);
13805 
13806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13807  UserDataString userDataStr(
13808  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13809  pUserData);
13810  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13811  allocation,
13812  userDataStr.GetString());
13813  Flush();
13814 }
13815 
13816 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13817  VmaAllocation allocation)
13818 {
13819  CallParams callParams;
13820  GetBasicParams(callParams);
13821 
13822  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13823  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13824  allocation);
13825  Flush();
13826 }
13827 
13828 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13829  VmaAllocation allocation)
13830 {
13831  CallParams callParams;
13832  GetBasicParams(callParams);
13833 
13834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13835  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13836  allocation);
13837  Flush();
13838 }
13839 
13840 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13841  VmaAllocation allocation)
13842 {
13843  CallParams callParams;
13844  GetBasicParams(callParams);
13845 
13846  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13847  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13848  allocation);
13849  Flush();
13850 }
13851 
13852 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13853  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13854 {
13855  CallParams callParams;
13856  GetBasicParams(callParams);
13857 
13858  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13859  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13860  allocation,
13861  offset,
13862  size);
13863  Flush();
13864 }
13865 
13866 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13867  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13868 {
13869  CallParams callParams;
13870  GetBasicParams(callParams);
13871 
13872  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13873  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13874  allocation,
13875  offset,
13876  size);
13877  Flush();
13878 }
13879 
13880 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13881  const VkBufferCreateInfo& bufCreateInfo,
13882  const VmaAllocationCreateInfo& allocCreateInfo,
13883  VmaAllocation allocation)
13884 {
13885  CallParams callParams;
13886  GetBasicParams(callParams);
13887 
13888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13889  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13890  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13891  bufCreateInfo.flags,
13892  bufCreateInfo.size,
13893  bufCreateInfo.usage,
13894  bufCreateInfo.sharingMode,
13895  allocCreateInfo.flags,
13896  allocCreateInfo.usage,
13897  allocCreateInfo.requiredFlags,
13898  allocCreateInfo.preferredFlags,
13899  allocCreateInfo.memoryTypeBits,
13900  allocCreateInfo.pool,
13901  allocation,
13902  userDataStr.GetString());
13903  Flush();
13904 }
13905 
13906 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13907  const VkImageCreateInfo& imageCreateInfo,
13908  const VmaAllocationCreateInfo& allocCreateInfo,
13909  VmaAllocation allocation)
13910 {
13911  CallParams callParams;
13912  GetBasicParams(callParams);
13913 
13914  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13915  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13916  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13917  imageCreateInfo.flags,
13918  imageCreateInfo.imageType,
13919  imageCreateInfo.format,
13920  imageCreateInfo.extent.width,
13921  imageCreateInfo.extent.height,
13922  imageCreateInfo.extent.depth,
13923  imageCreateInfo.mipLevels,
13924  imageCreateInfo.arrayLayers,
13925  imageCreateInfo.samples,
13926  imageCreateInfo.tiling,
13927  imageCreateInfo.usage,
13928  imageCreateInfo.sharingMode,
13929  imageCreateInfo.initialLayout,
13930  allocCreateInfo.flags,
13931  allocCreateInfo.usage,
13932  allocCreateInfo.requiredFlags,
13933  allocCreateInfo.preferredFlags,
13934  allocCreateInfo.memoryTypeBits,
13935  allocCreateInfo.pool,
13936  allocation,
13937  userDataStr.GetString());
13938  Flush();
13939 }
13940 
13941 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13942  VmaAllocation allocation)
13943 {
13944  CallParams callParams;
13945  GetBasicParams(callParams);
13946 
13947  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13948  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13949  allocation);
13950  Flush();
13951 }
13952 
13953 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13954  VmaAllocation allocation)
13955 {
13956  CallParams callParams;
13957  GetBasicParams(callParams);
13958 
13959  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13960  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13961  allocation);
13962  Flush();
13963 }
13964 
13965 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13966  VmaAllocation allocation)
13967 {
13968  CallParams callParams;
13969  GetBasicParams(callParams);
13970 
13971  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13972  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13973  allocation);
13974  Flush();
13975 }
13976 
13977 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13978  VmaAllocation allocation)
13979 {
13980  CallParams callParams;
13981  GetBasicParams(callParams);
13982 
13983  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13984  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13985  allocation);
13986  Flush();
13987 }
13988 
13989 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13990  VmaPool pool)
13991 {
13992  CallParams callParams;
13993  GetBasicParams(callParams);
13994 
13995  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13996  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13997  pool);
13998  Flush();
13999 }
14000 
14001 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14002  const VmaDefragmentationInfo2& info,
14004 {
14005  CallParams callParams;
14006  GetBasicParams(callParams);
14007 
14008  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14009  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14010  info.flags);
14011  PrintPointerList(info.allocationCount, info.pAllocations);
14012  fprintf(m_File, ",");
14013  PrintPointerList(info.poolCount, info.pPools);
14014  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14015  info.maxCpuBytesToMove,
14017  info.maxGpuBytesToMove,
14019  info.commandBuffer,
14020  ctx);
14021  Flush();
14022 }
14023 
14024 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14026 {
14027  CallParams callParams;
14028  GetBasicParams(callParams);
14029 
14030  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14031  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14032  ctx);
14033  Flush();
14034 }
14035 
14036 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14037 {
14038  if(pUserData != VMA_NULL)
14039  {
14040  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14041  {
14042  m_Str = (const char*)pUserData;
14043  }
14044  else
14045  {
14046  sprintf_s(m_PtrStr, "%p", pUserData);
14047  m_Str = m_PtrStr;
14048  }
14049  }
14050  else
14051  {
14052  m_Str = "";
14053  }
14054 }
14055 
14056 void VmaRecorder::WriteConfiguration(
14057  const VkPhysicalDeviceProperties& devProps,
14058  const VkPhysicalDeviceMemoryProperties& memProps,
14059  bool dedicatedAllocationExtensionEnabled)
14060 {
14061  fprintf(m_File, "Config,Begin\n");
14062 
14063  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14064  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14065  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14066  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14067  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14068  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14069 
14070  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14071  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14072  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14073 
14074  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14075  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14076  {
14077  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14078  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14079  }
14080  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14081  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14082  {
14083  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14084  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14085  }
14086 
14087  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14088 
14089  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14090  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14091  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14092  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14093  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14094  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14095  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14096  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14097  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14098 
14099  fprintf(m_File, "Config,End\n");
14100 }
14101 
14102 void VmaRecorder::GetBasicParams(CallParams& outParams)
14103 {
14104  outParams.threadId = GetCurrentThreadId();
14105 
14106  LARGE_INTEGER counter;
14107  QueryPerformanceCounter(&counter);
14108  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14109 }
14110 
14111 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14112 {
14113  if(count)
14114  {
14115  fprintf(m_File, "%p", pItems[0]);
14116  for(uint64_t i = 1; i < count; ++i)
14117  {
14118  fprintf(m_File, " %p", pItems[i]);
14119  }
14120  }
14121 }
14122 
14123 void VmaRecorder::Flush()
14124 {
14125  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14126  {
14127  fflush(m_File);
14128  }
14129 }
14130 
14131 #endif // #if VMA_RECORDING_ENABLED
14132 
14134 // VmaAllocationObjectAllocator
14135 
14136 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14137  m_Allocator(pAllocationCallbacks, 1024)
14138 {
14139 }
14140 
14141 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14142 {
14143  VmaMutexLock mutexLock(m_Mutex);
14144  return m_Allocator.Alloc();
14145 }
14146 
14147 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14148 {
14149  VmaMutexLock mutexLock(m_Mutex);
14150  m_Allocator.Free(hAlloc);
14151 }
14152 
14154 // VmaAllocator_T
14155 
14156 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14157  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14158  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14159  m_hDevice(pCreateInfo->device),
14160  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14161  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14162  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14163  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14164  m_PreferredLargeHeapBlockSize(0),
14165  m_PhysicalDevice(pCreateInfo->physicalDevice),
14166  m_CurrentFrameIndex(0),
14167  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14168  m_NextPoolId(0)
14170  ,m_pRecorder(VMA_NULL)
14171 #endif
14172 {
14173  if(VMA_DEBUG_DETECT_CORRUPTION)
14174  {
14175  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14176  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14177  }
14178 
14179  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14180 
14181 #if !(VMA_DEDICATED_ALLOCATION)
14183  {
14184  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14185  }
14186 #endif
14187 
14188  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14189  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14190  memset(&m_MemProps, 0, sizeof(m_MemProps));
14191 
14192  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14193  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14194 
14195  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14196  {
14197  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
14198  }
14199 
14200  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14201  {
14202  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14203  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14204  }
14205 
14206  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14207 
14208  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14209  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14210 
14211  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14212  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14213  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14214  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14215 
14216  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14217  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14218 
14219  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14220  {
14221  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14222  {
14223  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14224  if(limit != VK_WHOLE_SIZE)
14225  {
14226  m_HeapSizeLimit[heapIndex] = limit;
14227  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14228  {
14229  m_MemProps.memoryHeaps[heapIndex].size = limit;
14230  }
14231  }
14232  }
14233  }
14234 
14235  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14236  {
14237  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14238 
14239  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14240  this,
14241  VK_NULL_HANDLE, // hParentPool
14242  memTypeIndex,
14243  preferredBlockSize,
14244  0,
14245  SIZE_MAX,
14246  GetBufferImageGranularity(),
14247  pCreateInfo->frameInUseCount,
14248  false, // isCustomPool
14249  false, // explicitBlockSize
14250  false); // linearAlgorithm
14251  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14252  // becase minBlockCount is 0.
14253  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14254 
14255  }
14256 }
14257 
14258 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14259 {
14260  VkResult res = VK_SUCCESS;
14261 
14262  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14263  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14264  {
14265 #if VMA_RECORDING_ENABLED
14266  m_pRecorder = vma_new(this, VmaRecorder)();
14267  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14268  if(res != VK_SUCCESS)
14269  {
14270  return res;
14271  }
14272  m_pRecorder->WriteConfiguration(
14273  m_PhysicalDeviceProperties,
14274  m_MemProps,
14275  m_UseKhrDedicatedAllocation);
14276  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14277 #else
14278  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14279  return VK_ERROR_FEATURE_NOT_PRESENT;
14280 #endif
14281  }
14282 
14283  return res;
14284 }
14285 
14286 VmaAllocator_T::~VmaAllocator_T()
14287 {
14288 #if VMA_RECORDING_ENABLED
14289  if(m_pRecorder != VMA_NULL)
14290  {
14291  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14292  vma_delete(this, m_pRecorder);
14293  }
14294 #endif
14295 
14296  VMA_ASSERT(m_Pools.empty());
14297 
14298  for(size_t i = GetMemoryTypeCount(); i--; )
14299  {
14300  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14301  {
14302  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14303  }
14304 
14305  vma_delete(this, m_pDedicatedAllocations[i]);
14306  vma_delete(this, m_pBlockVectors[i]);
14307  }
14308 }
14309 
14310 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14311 {
14312 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14313  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14314  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14315  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14316  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14317  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14318  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14319  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14320  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14321  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14322  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14323  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14324  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14325  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14326  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14327  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14328  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14329  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14330 #if VMA_DEDICATED_ALLOCATION
14331  if(m_UseKhrDedicatedAllocation)
14332  {
14333  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14334  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14335  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14336  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14337  }
14338 #endif // #if VMA_DEDICATED_ALLOCATION
14339 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14340 
14341 #define VMA_COPY_IF_NOT_NULL(funcName) \
14342  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14343 
14344  if(pVulkanFunctions != VMA_NULL)
14345  {
14346  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14347  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14348  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14349  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14350  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14351  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14352  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14353  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14354  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14355  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14356  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14357  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14358  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14359  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14360  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14361  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14362  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14363 #if VMA_DEDICATED_ALLOCATION
14364  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14365  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14366 #endif
14367  }
14368 
14369 #undef VMA_COPY_IF_NOT_NULL
14370 
14371  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14372  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14373  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14374  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14375  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14376  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14377  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14378  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14379  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14380  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14381  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14382  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14383  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14384  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14385  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14386  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14387  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14388  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14389  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14390 #if VMA_DEDICATED_ALLOCATION
14391  if(m_UseKhrDedicatedAllocation)
14392  {
14393  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14394  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14395  }
14396 #endif
14397 }
14398 
14399 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14400 {
14401  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14402  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14403  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14404  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14405 }
14406 
14407 VkResult VmaAllocator_T::AllocateMemoryOfType(
14408  VkDeviceSize size,
14409  VkDeviceSize alignment,
14410  bool dedicatedAllocation,
14411  VkBuffer dedicatedBuffer,
14412  VkImage dedicatedImage,
14413  const VmaAllocationCreateInfo& createInfo,
14414  uint32_t memTypeIndex,
14415  VmaSuballocationType suballocType,
14416  size_t allocationCount,
14417  VmaAllocation* pAllocations)
14418 {
14419  VMA_ASSERT(pAllocations != VMA_NULL);
14420  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14421 
14422  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14423 
14424  // If memory type is not HOST_VISIBLE, disable MAPPED.
14425  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14426  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14427  {
14428  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14429  }
14430 
14431  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14432  VMA_ASSERT(blockVector);
14433 
14434  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14435  bool preferDedicatedMemory =
14436  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14437  dedicatedAllocation ||
14438  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14439  size > preferredBlockSize / 2;
14440 
14441  if(preferDedicatedMemory &&
14442  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14443  finalCreateInfo.pool == VK_NULL_HANDLE)
14444  {
14446  }
14447 
14448  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14449  {
14450  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14451  {
14452  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14453  }
14454  else
14455  {
14456  return AllocateDedicatedMemory(
14457  size,
14458  suballocType,
14459  memTypeIndex,
14460  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14461  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14462  finalCreateInfo.pUserData,
14463  dedicatedBuffer,
14464  dedicatedImage,
14465  allocationCount,
14466  pAllocations);
14467  }
14468  }
14469  else
14470  {
14471  VkResult res = blockVector->Allocate(
14472  m_CurrentFrameIndex.load(),
14473  size,
14474  alignment,
14475  finalCreateInfo,
14476  suballocType,
14477  allocationCount,
14478  pAllocations);
14479  if(res == VK_SUCCESS)
14480  {
14481  return res;
14482  }
14483 
14484  // 5. Try dedicated memory.
14485  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14486  {
14487  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14488  }
14489  else
14490  {
14491  res = AllocateDedicatedMemory(
14492  size,
14493  suballocType,
14494  memTypeIndex,
14495  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14496  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14497  finalCreateInfo.pUserData,
14498  dedicatedBuffer,
14499  dedicatedImage,
14500  allocationCount,
14501  pAllocations);
14502  if(res == VK_SUCCESS)
14503  {
14504  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14505  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14506  return VK_SUCCESS;
14507  }
14508  else
14509  {
14510  // Everything failed: Return error code.
14511  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14512  return res;
14513  }
14514  }
14515  }
14516 }
14517 
14518 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14519  VkDeviceSize size,
14520  VmaSuballocationType suballocType,
14521  uint32_t memTypeIndex,
14522  bool map,
14523  bool isUserDataString,
14524  void* pUserData,
14525  VkBuffer dedicatedBuffer,
14526  VkImage dedicatedImage,
14527  size_t allocationCount,
14528  VmaAllocation* pAllocations)
14529 {
14530  VMA_ASSERT(allocationCount > 0 && pAllocations);
14531 
14532  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14533  allocInfo.memoryTypeIndex = memTypeIndex;
14534  allocInfo.allocationSize = size;
14535 
14536 #if VMA_DEDICATED_ALLOCATION
14537  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14538  if(m_UseKhrDedicatedAllocation)
14539  {
14540  if(dedicatedBuffer != VK_NULL_HANDLE)
14541  {
14542  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14543  dedicatedAllocInfo.buffer = dedicatedBuffer;
14544  allocInfo.pNext = &dedicatedAllocInfo;
14545  }
14546  else if(dedicatedImage != VK_NULL_HANDLE)
14547  {
14548  dedicatedAllocInfo.image = dedicatedImage;
14549  allocInfo.pNext = &dedicatedAllocInfo;
14550  }
14551  }
14552 #endif // #if VMA_DEDICATED_ALLOCATION
14553 
14554  size_t allocIndex;
14555  VkResult res = VK_SUCCESS;
14556  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14557  {
14558  res = AllocateDedicatedMemoryPage(
14559  size,
14560  suballocType,
14561  memTypeIndex,
14562  allocInfo,
14563  map,
14564  isUserDataString,
14565  pUserData,
14566  pAllocations + allocIndex);
14567  if(res != VK_SUCCESS)
14568  {
14569  break;
14570  }
14571  }
14572 
14573  if(res == VK_SUCCESS)
14574  {
14575  // Register them in m_pDedicatedAllocations.
14576  {
14577  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14578  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14579  VMA_ASSERT(pDedicatedAllocations);
14580  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14581  {
14582  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14583  }
14584  }
14585 
14586  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14587  }
14588  else
14589  {
14590  // Free all already created allocations.
14591  while(allocIndex--)
14592  {
14593  VmaAllocation currAlloc = pAllocations[allocIndex];
14594  VkDeviceMemory hMemory = currAlloc->GetMemory();
14595 
14596  /*
14597  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14598  before vkFreeMemory.
14599 
14600  if(currAlloc->GetMappedData() != VMA_NULL)
14601  {
14602  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14603  }
14604  */
14605 
14606  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14607 
14608  currAlloc->SetUserData(this, VMA_NULL);
14609  currAlloc->Dtor();
14610  m_AllocationObjectAllocator.Free(currAlloc);
14611  }
14612 
14613  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14614  }
14615 
14616  return res;
14617 }
14618 
14619 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14620  VkDeviceSize size,
14621  VmaSuballocationType suballocType,
14622  uint32_t memTypeIndex,
14623  const VkMemoryAllocateInfo& allocInfo,
14624  bool map,
14625  bool isUserDataString,
14626  void* pUserData,
14627  VmaAllocation* pAllocation)
14628 {
14629  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14630  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14631  if(res < 0)
14632  {
14633  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14634  return res;
14635  }
14636 
14637  void* pMappedData = VMA_NULL;
14638  if(map)
14639  {
14640  res = (*m_VulkanFunctions.vkMapMemory)(
14641  m_hDevice,
14642  hMemory,
14643  0,
14644  VK_WHOLE_SIZE,
14645  0,
14646  &pMappedData);
14647  if(res < 0)
14648  {
14649  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14650  FreeVulkanMemory(memTypeIndex, size, hMemory);
14651  return res;
14652  }
14653  }
14654 
14655  *pAllocation = m_AllocationObjectAllocator.Allocate();
14656  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14657  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14658  (*pAllocation)->SetUserData(this, pUserData);
14659  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14660  {
14661  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14662  }
14663 
14664  return VK_SUCCESS;
14665 }
14666 
14667 void VmaAllocator_T::GetBufferMemoryRequirements(
14668  VkBuffer hBuffer,
14669  VkMemoryRequirements& memReq,
14670  bool& requiresDedicatedAllocation,
14671  bool& prefersDedicatedAllocation) const
14672 {
14673 #if VMA_DEDICATED_ALLOCATION
14674  if(m_UseKhrDedicatedAllocation)
14675  {
14676  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14677  memReqInfo.buffer = hBuffer;
14678 
14679  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14680 
14681  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14682  memReq2.pNext = &memDedicatedReq;
14683 
14684  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14685 
14686  memReq = memReq2.memoryRequirements;
14687  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14688  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14689  }
14690  else
14691 #endif // #if VMA_DEDICATED_ALLOCATION
14692  {
14693  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14694  requiresDedicatedAllocation = false;
14695  prefersDedicatedAllocation = false;
14696  }
14697 }
14698 
14699 void VmaAllocator_T::GetImageMemoryRequirements(
14700  VkImage hImage,
14701  VkMemoryRequirements& memReq,
14702  bool& requiresDedicatedAllocation,
14703  bool& prefersDedicatedAllocation) const
14704 {
14705 #if VMA_DEDICATED_ALLOCATION
14706  if(m_UseKhrDedicatedAllocation)
14707  {
14708  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14709  memReqInfo.image = hImage;
14710 
14711  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14712 
14713  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14714  memReq2.pNext = &memDedicatedReq;
14715 
14716  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14717 
14718  memReq = memReq2.memoryRequirements;
14719  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14720  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14721  }
14722  else
14723 #endif // #if VMA_DEDICATED_ALLOCATION
14724  {
14725  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14726  requiresDedicatedAllocation = false;
14727  prefersDedicatedAllocation = false;
14728  }
14729 }
14730 
14731 VkResult VmaAllocator_T::AllocateMemory(
14732  const VkMemoryRequirements& vkMemReq,
14733  bool requiresDedicatedAllocation,
14734  bool prefersDedicatedAllocation,
14735  VkBuffer dedicatedBuffer,
14736  VkImage dedicatedImage,
14737  const VmaAllocationCreateInfo& createInfo,
14738  VmaSuballocationType suballocType,
14739  size_t allocationCount,
14740  VmaAllocation* pAllocations)
14741 {
14742  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14743 
14744  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14745 
14746  if(vkMemReq.size == 0)
14747  {
14748  return VK_ERROR_VALIDATION_FAILED_EXT;
14749  }
14750  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14751  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14752  {
14753  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14754  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14755  }
14756  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14758  {
14759  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14760  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14761  }
14762  if(requiresDedicatedAllocation)
14763  {
14764  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14765  {
14766  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14767  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14768  }
14769  if(createInfo.pool != VK_NULL_HANDLE)
14770  {
14771  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14772  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14773  }
14774  }
14775  if((createInfo.pool != VK_NULL_HANDLE) &&
14776  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14777  {
14778  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14780  }
14781 
14782  if(createInfo.pool != VK_NULL_HANDLE)
14783  {
14784  const VkDeviceSize alignmentForPool = VMA_MAX(
14785  vkMemReq.alignment,
14786  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14787  return createInfo.pool->m_BlockVector.Allocate(
14788  m_CurrentFrameIndex.load(),
14789  vkMemReq.size,
14790  alignmentForPool,
14791  createInfo,
14792  suballocType,
14793  allocationCount,
14794  pAllocations);
14795  }
14796  else
14797  {
14798  // Bit mask of memory Vulkan types acceptable for this allocation.
14799  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14800  uint32_t memTypeIndex = UINT32_MAX;
14801  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14802  if(res == VK_SUCCESS)
14803  {
14804  VkDeviceSize alignmentForMemType = VMA_MAX(
14805  vkMemReq.alignment,
14806  GetMemoryTypeMinAlignment(memTypeIndex));
14807 
14808  res = AllocateMemoryOfType(
14809  vkMemReq.size,
14810  alignmentForMemType,
14811  requiresDedicatedAllocation || prefersDedicatedAllocation,
14812  dedicatedBuffer,
14813  dedicatedImage,
14814  createInfo,
14815  memTypeIndex,
14816  suballocType,
14817  allocationCount,
14818  pAllocations);
14819  // Succeeded on first try.
14820  if(res == VK_SUCCESS)
14821  {
14822  return res;
14823  }
14824  // Allocation from this memory type failed. Try other compatible memory types.
14825  else
14826  {
14827  for(;;)
14828  {
14829  // Remove old memTypeIndex from list of possibilities.
14830  memoryTypeBits &= ~(1u << memTypeIndex);
14831  // Find alternative memTypeIndex.
14832  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14833  if(res == VK_SUCCESS)
14834  {
14835  alignmentForMemType = VMA_MAX(
14836  vkMemReq.alignment,
14837  GetMemoryTypeMinAlignment(memTypeIndex));
14838 
14839  res = AllocateMemoryOfType(
14840  vkMemReq.size,
14841  alignmentForMemType,
14842  requiresDedicatedAllocation || prefersDedicatedAllocation,
14843  dedicatedBuffer,
14844  dedicatedImage,
14845  createInfo,
14846  memTypeIndex,
14847  suballocType,
14848  allocationCount,
14849  pAllocations);
14850  // Allocation from this alternative memory type succeeded.
14851  if(res == VK_SUCCESS)
14852  {
14853  return res;
14854  }
14855  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14856  }
14857  // No other matching memory type index could be found.
14858  else
14859  {
14860  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14861  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14862  }
14863  }
14864  }
14865  }
14866  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14867  else
14868  return res;
14869  }
14870 }
14871 
14872 void VmaAllocator_T::FreeMemory(
14873  size_t allocationCount,
14874  const VmaAllocation* pAllocations)
14875 {
14876  VMA_ASSERT(pAllocations);
14877 
14878  for(size_t allocIndex = allocationCount; allocIndex--; )
14879  {
14880  VmaAllocation allocation = pAllocations[allocIndex];
14881 
14882  if(allocation != VK_NULL_HANDLE)
14883  {
14884  if(TouchAllocation(allocation))
14885  {
14886  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14887  {
14888  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14889  }
14890 
14891  switch(allocation->GetType())
14892  {
14893  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14894  {
14895  VmaBlockVector* pBlockVector = VMA_NULL;
14896  VmaPool hPool = allocation->GetBlock()->GetParentPool();
14897  if(hPool != VK_NULL_HANDLE)
14898  {
14899  pBlockVector = &hPool->m_BlockVector;
14900  }
14901  else
14902  {
14903  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14904  pBlockVector = m_pBlockVectors[memTypeIndex];
14905  }
14906  pBlockVector->Free(allocation);
14907  }
14908  break;
14909  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14910  FreeDedicatedMemory(allocation);
14911  break;
14912  default:
14913  VMA_ASSERT(0);
14914  }
14915  }
14916 
14917  allocation->SetUserData(this, VMA_NULL);
14918  allocation->Dtor();
14919  m_AllocationObjectAllocator.Free(allocation);
14920  }
14921  }
14922 }
14923 
14924 VkResult VmaAllocator_T::ResizeAllocation(
14925  const VmaAllocation alloc,
14926  VkDeviceSize newSize)
14927 {
14928  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14929  {
14930  return VK_ERROR_VALIDATION_FAILED_EXT;
14931  }
14932  if(newSize == alloc->GetSize())
14933  {
14934  return VK_SUCCESS;
14935  }
14936 
14937  switch(alloc->GetType())
14938  {
14939  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14940  return VK_ERROR_FEATURE_NOT_PRESENT;
14941  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14942  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14943  {
14944  alloc->ChangeSize(newSize);
14945  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14946  return VK_SUCCESS;
14947  }
14948  else
14949  {
14950  return VK_ERROR_OUT_OF_POOL_MEMORY;
14951  }
14952  default:
14953  VMA_ASSERT(0);
14954  return VK_ERROR_VALIDATION_FAILED_EXT;
14955  }
14956 }
14957 
14958 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14959 {
14960  // Initialize.
14961  InitStatInfo(pStats->total);
14962  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14963  InitStatInfo(pStats->memoryType[i]);
14964  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14965  InitStatInfo(pStats->memoryHeap[i]);
14966 
14967  // Process default pools.
14968  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14969  {
14970  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14971  VMA_ASSERT(pBlockVector);
14972  pBlockVector->AddStats(pStats);
14973  }
14974 
14975  // Process custom pools.
14976  {
14977  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14978  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14979  {
14980  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14981  }
14982  }
14983 
14984  // Process dedicated allocations.
14985  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14986  {
14987  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14988  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14989  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14990  VMA_ASSERT(pDedicatedAllocVector);
14991  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14992  {
14993  VmaStatInfo allocationStatInfo;
14994  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14995  VmaAddStatInfo(pStats->total, allocationStatInfo);
14996  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14997  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14998  }
14999  }
15000 
15001  // Postprocess.
15002  VmaPostprocessCalcStatInfo(pStats->total);
15003  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15004  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15005  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15006  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15007 }
15008 
15009 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15010 
15011 VkResult VmaAllocator_T::DefragmentationBegin(
15012  const VmaDefragmentationInfo2& info,
15013  VmaDefragmentationStats* pStats,
15014  VmaDefragmentationContext* pContext)
15015 {
15016  if(info.pAllocationsChanged != VMA_NULL)
15017  {
15018  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15019  }
15020 
15021  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15022  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15023 
15024  (*pContext)->AddPools(info.poolCount, info.pPools);
15025  (*pContext)->AddAllocations(
15027 
15028  VkResult res = (*pContext)->Defragment(
15031  info.commandBuffer, pStats);
15032 
15033  if(res != VK_NOT_READY)
15034  {
15035  vma_delete(this, *pContext);
15036  *pContext = VMA_NULL;
15037  }
15038 
15039  return res;
15040 }
15041 
15042 VkResult VmaAllocator_T::DefragmentationEnd(
15043  VmaDefragmentationContext context)
15044 {
15045  vma_delete(this, context);
15046  return VK_SUCCESS;
15047 }
15048 
15049 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15050 {
15051  if(hAllocation->CanBecomeLost())
15052  {
15053  /*
15054  Warning: This is a carefully designed algorithm.
15055  Do not modify unless you really know what you're doing :)
15056  */
15057  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15058  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15059  for(;;)
15060  {
15061  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15062  {
15063  pAllocationInfo->memoryType = UINT32_MAX;
15064  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15065  pAllocationInfo->offset = 0;
15066  pAllocationInfo->size = hAllocation->GetSize();
15067  pAllocationInfo->pMappedData = VMA_NULL;
15068  pAllocationInfo->pUserData = hAllocation->GetUserData();
15069  return;
15070  }
15071  else if(localLastUseFrameIndex == localCurrFrameIndex)
15072  {
15073  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15074  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15075  pAllocationInfo->offset = hAllocation->GetOffset();
15076  pAllocationInfo->size = hAllocation->GetSize();
15077  pAllocationInfo->pMappedData = VMA_NULL;
15078  pAllocationInfo->pUserData = hAllocation->GetUserData();
15079  return;
15080  }
15081  else // Last use time earlier than current time.
15082  {
15083  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15084  {
15085  localLastUseFrameIndex = localCurrFrameIndex;
15086  }
15087  }
15088  }
15089  }
15090  else
15091  {
15092 #if VMA_STATS_STRING_ENABLED
15093  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15094  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15095  for(;;)
15096  {
15097  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15098  if(localLastUseFrameIndex == localCurrFrameIndex)
15099  {
15100  break;
15101  }
15102  else // Last use time earlier than current time.
15103  {
15104  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15105  {
15106  localLastUseFrameIndex = localCurrFrameIndex;
15107  }
15108  }
15109  }
15110 #endif
15111 
15112  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15113  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15114  pAllocationInfo->offset = hAllocation->GetOffset();
15115  pAllocationInfo->size = hAllocation->GetSize();
15116  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15117  pAllocationInfo->pUserData = hAllocation->GetUserData();
15118  }
15119 }
15120 
15121 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15122 {
15123  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15124  if(hAllocation->CanBecomeLost())
15125  {
15126  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15127  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15128  for(;;)
15129  {
15130  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15131  {
15132  return false;
15133  }
15134  else if(localLastUseFrameIndex == localCurrFrameIndex)
15135  {
15136  return true;
15137  }
15138  else // Last use time earlier than current time.
15139  {
15140  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15141  {
15142  localLastUseFrameIndex = localCurrFrameIndex;
15143  }
15144  }
15145  }
15146  }
15147  else
15148  {
15149 #if VMA_STATS_STRING_ENABLED
15150  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15151  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15152  for(;;)
15153  {
15154  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15155  if(localLastUseFrameIndex == localCurrFrameIndex)
15156  {
15157  break;
15158  }
15159  else // Last use time earlier than current time.
15160  {
15161  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15162  {
15163  localLastUseFrameIndex = localCurrFrameIndex;
15164  }
15165  }
15166  }
15167 #endif
15168 
15169  return true;
15170  }
15171 }
15172 
15173 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15174 {
15175  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15176 
15177  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15178 
15179  if(newCreateInfo.maxBlockCount == 0)
15180  {
15181  newCreateInfo.maxBlockCount = SIZE_MAX;
15182  }
15183  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15184  {
15185  return VK_ERROR_INITIALIZATION_FAILED;
15186  }
15187 
15188  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15189 
15190  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15191 
15192  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15193  if(res != VK_SUCCESS)
15194  {
15195  vma_delete(this, *pPool);
15196  *pPool = VMA_NULL;
15197  return res;
15198  }
15199 
15200  // Add to m_Pools.
15201  {
15202  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15203  (*pPool)->SetId(m_NextPoolId++);
15204  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15205  }
15206 
15207  return VK_SUCCESS;
15208 }
15209 
15210 void VmaAllocator_T::DestroyPool(VmaPool pool)
15211 {
15212  // Remove from m_Pools.
15213  {
15214  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15215  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15216  VMA_ASSERT(success && "Pool not found in Allocator.");
15217  }
15218 
15219  vma_delete(this, pool);
15220 }
15221 
15222 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15223 {
15224  pool->m_BlockVector.GetPoolStats(pPoolStats);
15225 }
15226 
15227 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15228 {
15229  m_CurrentFrameIndex.store(frameIndex);
15230 }
15231 
15232 void VmaAllocator_T::MakePoolAllocationsLost(
15233  VmaPool hPool,
15234  size_t* pLostAllocationCount)
15235 {
15236  hPool->m_BlockVector.MakePoolAllocationsLost(
15237  m_CurrentFrameIndex.load(),
15238  pLostAllocationCount);
15239 }
15240 
15241 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15242 {
15243  return hPool->m_BlockVector.CheckCorruption();
15244 }
15245 
15246 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15247 {
15248  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15249 
15250  // Process default pools.
15251  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15252  {
15253  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15254  {
15255  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15256  VMA_ASSERT(pBlockVector);
15257  VkResult localRes = pBlockVector->CheckCorruption();
15258  switch(localRes)
15259  {
15260  case VK_ERROR_FEATURE_NOT_PRESENT:
15261  break;
15262  case VK_SUCCESS:
15263  finalRes = VK_SUCCESS;
15264  break;
15265  default:
15266  return localRes;
15267  }
15268  }
15269  }
15270 
15271  // Process custom pools.
15272  {
15273  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15274  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15275  {
15276  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15277  {
15278  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15279  switch(localRes)
15280  {
15281  case VK_ERROR_FEATURE_NOT_PRESENT:
15282  break;
15283  case VK_SUCCESS:
15284  finalRes = VK_SUCCESS;
15285  break;
15286  default:
15287  return localRes;
15288  }
15289  }
15290  }
15291  }
15292 
15293  return finalRes;
15294 }
15295 
15296 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15297 {
15298  *pAllocation = m_AllocationObjectAllocator.Allocate();
15299  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15300  (*pAllocation)->InitLost();
15301 }
15302 
15303 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15304 {
15305  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15306 
15307  VkResult res;
15308  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15309  {
15310  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15311  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
15312  {
15313  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15314  if(res == VK_SUCCESS)
15315  {
15316  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
15317  }
15318  }
15319  else
15320  {
15321  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
15322  }
15323  }
15324  else
15325  {
15326  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15327  }
15328 
15329  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15330  {
15331  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15332  }
15333 
15334  return res;
15335 }
15336 
15337 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15338 {
15339  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15340  {
15341  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15342  }
15343 
15344  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15345 
15346  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15347  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
15348  {
15349  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
15350  m_HeapSizeLimit[heapIndex] += size;
15351  }
15352 }
15353 
15354 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15355 {
15356  if(hAllocation->CanBecomeLost())
15357  {
15358  return VK_ERROR_MEMORY_MAP_FAILED;
15359  }
15360 
15361  switch(hAllocation->GetType())
15362  {
15363  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15364  {
15365  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15366  char *pBytes = VMA_NULL;
15367  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15368  if(res == VK_SUCCESS)
15369  {
15370  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15371  hAllocation->BlockAllocMap();
15372  }
15373  return res;
15374  }
15375  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15376  return hAllocation->DedicatedAllocMap(this, ppData);
15377  default:
15378  VMA_ASSERT(0);
15379  return VK_ERROR_MEMORY_MAP_FAILED;
15380  }
15381 }
15382 
15383 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15384 {
15385  switch(hAllocation->GetType())
15386  {
15387  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15388  {
15389  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15390  hAllocation->BlockAllocUnmap();
15391  pBlock->Unmap(this, 1);
15392  }
15393  break;
15394  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15395  hAllocation->DedicatedAllocUnmap(this);
15396  break;
15397  default:
15398  VMA_ASSERT(0);
15399  }
15400 }
15401 
15402 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
15403 {
15404  VkResult res = VK_SUCCESS;
15405  switch(hAllocation->GetType())
15406  {
15407  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15408  res = GetVulkanFunctions().vkBindBufferMemory(
15409  m_hDevice,
15410  hBuffer,
15411  hAllocation->GetMemory(),
15412  0); //memoryOffset
15413  break;
15414  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15415  {
15416  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15417  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15418  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
15419  break;
15420  }
15421  default:
15422  VMA_ASSERT(0);
15423  }
15424  return res;
15425 }
15426 
15427 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
15428 {
15429  VkResult res = VK_SUCCESS;
15430  switch(hAllocation->GetType())
15431  {
15432  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15433  res = GetVulkanFunctions().vkBindImageMemory(
15434  m_hDevice,
15435  hImage,
15436  hAllocation->GetMemory(),
15437  0); //memoryOffset
15438  break;
15439  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15440  {
15441  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15442  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15443  res = pBlock->BindImageMemory(this, hAllocation, hImage);
15444  break;
15445  }
15446  default:
15447  VMA_ASSERT(0);
15448  }
15449  return res;
15450 }
15451 
15452 void VmaAllocator_T::FlushOrInvalidateAllocation(
15453  VmaAllocation hAllocation,
15454  VkDeviceSize offset, VkDeviceSize size,
15455  VMA_CACHE_OPERATION op)
15456 {
15457  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15458  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15459  {
15460  const VkDeviceSize allocationSize = hAllocation->GetSize();
15461  VMA_ASSERT(offset <= allocationSize);
15462 
15463  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15464 
15465  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15466  memRange.memory = hAllocation->GetMemory();
15467 
15468  switch(hAllocation->GetType())
15469  {
15470  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15471  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15472  if(size == VK_WHOLE_SIZE)
15473  {
15474  memRange.size = allocationSize - memRange.offset;
15475  }
15476  else
15477  {
15478  VMA_ASSERT(offset + size <= allocationSize);
15479  memRange.size = VMA_MIN(
15480  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15481  allocationSize - memRange.offset);
15482  }
15483  break;
15484 
15485  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15486  {
15487  // 1. Still within this allocation.
15488  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15489  if(size == VK_WHOLE_SIZE)
15490  {
15491  size = allocationSize - offset;
15492  }
15493  else
15494  {
15495  VMA_ASSERT(offset + size <= allocationSize);
15496  }
15497  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15498 
15499  // 2. Adjust to whole block.
15500  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15501  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15502  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15503  memRange.offset += allocationOffset;
15504  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15505 
15506  break;
15507  }
15508 
15509  default:
15510  VMA_ASSERT(0);
15511  }
15512 
15513  switch(op)
15514  {
15515  case VMA_CACHE_FLUSH:
15516  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15517  break;
15518  case VMA_CACHE_INVALIDATE:
15519  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15520  break;
15521  default:
15522  VMA_ASSERT(0);
15523  }
15524  }
15525  // else: Just ignore this call.
15526 }
15527 
15528 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15529 {
15530  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15531 
15532  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15533  {
15534  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15535  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15536  VMA_ASSERT(pDedicatedAllocations);
15537  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15538  VMA_ASSERT(success);
15539  }
15540 
15541  VkDeviceMemory hMemory = allocation->GetMemory();
15542 
15543  /*
15544  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15545  before vkFreeMemory.
15546 
15547  if(allocation->GetMappedData() != VMA_NULL)
15548  {
15549  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15550  }
15551  */
15552 
15553  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15554 
15555  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15556 }
15557 
15558 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15559 {
15560  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15561  !hAllocation->CanBecomeLost() &&
15562  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15563  {
15564  void* pData = VMA_NULL;
15565  VkResult res = Map(hAllocation, &pData);
15566  if(res == VK_SUCCESS)
15567  {
15568  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15569  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15570  Unmap(hAllocation);
15571  }
15572  else
15573  {
15574  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15575  }
15576  }
15577 }
15578 
15579 #if VMA_STATS_STRING_ENABLED
15580 
15581 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15582 {
15583  bool dedicatedAllocationsStarted = false;
15584  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15585  {
15586  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15587  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15588  VMA_ASSERT(pDedicatedAllocVector);
15589  if(pDedicatedAllocVector->empty() == false)
15590  {
15591  if(dedicatedAllocationsStarted == false)
15592  {
15593  dedicatedAllocationsStarted = true;
15594  json.WriteString("DedicatedAllocations");
15595  json.BeginObject();
15596  }
15597 
15598  json.BeginString("Type ");
15599  json.ContinueString(memTypeIndex);
15600  json.EndString();
15601 
15602  json.BeginArray();
15603 
15604  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15605  {
15606  json.BeginObject(true);
15607  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15608  hAlloc->PrintParameters(json);
15609  json.EndObject();
15610  }
15611 
15612  json.EndArray();
15613  }
15614  }
15615  if(dedicatedAllocationsStarted)
15616  {
15617  json.EndObject();
15618  }
15619 
15620  {
15621  bool allocationsStarted = false;
15622  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15623  {
15624  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15625  {
15626  if(allocationsStarted == false)
15627  {
15628  allocationsStarted = true;
15629  json.WriteString("DefaultPools");
15630  json.BeginObject();
15631  }
15632 
15633  json.BeginString("Type ");
15634  json.ContinueString(memTypeIndex);
15635  json.EndString();
15636 
15637  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15638  }
15639  }
15640  if(allocationsStarted)
15641  {
15642  json.EndObject();
15643  }
15644  }
15645 
15646  // Custom pools
15647  {
15648  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15649  const size_t poolCount = m_Pools.size();
15650  if(poolCount > 0)
15651  {
15652  json.WriteString("Pools");
15653  json.BeginObject();
15654  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15655  {
15656  json.BeginString();
15657  json.ContinueString(m_Pools[poolIndex]->GetId());
15658  json.EndString();
15659 
15660  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15661  }
15662  json.EndObject();
15663  }
15664  }
15665 }
15666 
15667 #endif // #if VMA_STATS_STRING_ENABLED
15668 
15670 // Public interface
15671 
15672 VkResult vmaCreateAllocator(
15673  const VmaAllocatorCreateInfo* pCreateInfo,
15674  VmaAllocator* pAllocator)
15675 {
15676  VMA_ASSERT(pCreateInfo && pAllocator);
15677  VMA_DEBUG_LOG("vmaCreateAllocator");
15678  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15679  return (*pAllocator)->Init(pCreateInfo);
15680 }
15681 
15682 void vmaDestroyAllocator(
15683  VmaAllocator allocator)
15684 {
15685  if(allocator != VK_NULL_HANDLE)
15686  {
15687  VMA_DEBUG_LOG("vmaDestroyAllocator");
15688  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15689  vma_delete(&allocationCallbacks, allocator);
15690  }
15691 }
15692 
15694  VmaAllocator allocator,
15695  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15696 {
15697  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15698  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15699 }
15700 
15702  VmaAllocator allocator,
15703  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15704 {
15705  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15706  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15707 }
15708 
15710  VmaAllocator allocator,
15711  uint32_t memoryTypeIndex,
15712  VkMemoryPropertyFlags* pFlags)
15713 {
15714  VMA_ASSERT(allocator && pFlags);
15715  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15716  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15717 }
15718 
15720  VmaAllocator allocator,
15721  uint32_t frameIndex)
15722 {
15723  VMA_ASSERT(allocator);
15724  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15725 
15726  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15727 
15728  allocator->SetCurrentFrameIndex(frameIndex);
15729 }
15730 
15731 void vmaCalculateStats(
15732  VmaAllocator allocator,
15733  VmaStats* pStats)
15734 {
15735  VMA_ASSERT(allocator && pStats);
15736  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15737  allocator->CalculateStats(pStats);
15738 }
15739 
15740 #if VMA_STATS_STRING_ENABLED
15741 
15742 void vmaBuildStatsString(
15743  VmaAllocator allocator,
15744  char** ppStatsString,
15745  VkBool32 detailedMap)
15746 {
15747  VMA_ASSERT(allocator && ppStatsString);
15748  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15749 
15750  VmaStringBuilder sb(allocator);
15751  {
15752  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15753  json.BeginObject();
15754 
15755  VmaStats stats;
15756  allocator->CalculateStats(&stats);
15757 
15758  json.WriteString("Total");
15759  VmaPrintStatInfo(json, stats.total);
15760 
15761  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15762  {
15763  json.BeginString("Heap ");
15764  json.ContinueString(heapIndex);
15765  json.EndString();
15766  json.BeginObject();
15767 
15768  json.WriteString("Size");
15769  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15770 
15771  json.WriteString("Flags");
15772  json.BeginArray(true);
15773  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15774  {
15775  json.WriteString("DEVICE_LOCAL");
15776  }
15777  json.EndArray();
15778 
15779  if(stats.memoryHeap[heapIndex].blockCount > 0)
15780  {
15781  json.WriteString("Stats");
15782  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15783  }
15784 
15785  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15786  {
15787  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15788  {
15789  json.BeginString("Type ");
15790  json.ContinueString(typeIndex);
15791  json.EndString();
15792 
15793  json.BeginObject();
15794 
15795  json.WriteString("Flags");
15796  json.BeginArray(true);
15797  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15798  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15799  {
15800  json.WriteString("DEVICE_LOCAL");
15801  }
15802  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15803  {
15804  json.WriteString("HOST_VISIBLE");
15805  }
15806  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15807  {
15808  json.WriteString("HOST_COHERENT");
15809  }
15810  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15811  {
15812  json.WriteString("HOST_CACHED");
15813  }
15814  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15815  {
15816  json.WriteString("LAZILY_ALLOCATED");
15817  }
15818  json.EndArray();
15819 
15820  if(stats.memoryType[typeIndex].blockCount > 0)
15821  {
15822  json.WriteString("Stats");
15823  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15824  }
15825 
15826  json.EndObject();
15827  }
15828  }
15829 
15830  json.EndObject();
15831  }
15832  if(detailedMap == VK_TRUE)
15833  {
15834  allocator->PrintDetailedMap(json);
15835  }
15836 
15837  json.EndObject();
15838  }
15839 
15840  const size_t len = sb.GetLength();
15841  char* const pChars = vma_new_array(allocator, char, len + 1);
15842  if(len > 0)
15843  {
15844  memcpy(pChars, sb.GetData(), len);
15845  }
15846  pChars[len] = '\0';
15847  *ppStatsString = pChars;
15848 }
15849 
15850 void vmaFreeStatsString(
15851  VmaAllocator allocator,
15852  char* pStatsString)
15853 {
15854  if(pStatsString != VMA_NULL)
15855  {
15856  VMA_ASSERT(allocator);
15857  size_t len = strlen(pStatsString);
15858  vma_delete_array(allocator, pStatsString, len + 1);
15859  }
15860 }
15861 
15862 #endif // #if VMA_STATS_STRING_ENABLED
15863 
15864 /*
15865 This function is not protected by any mutex because it just reads immutable data.
15866 */
15867 VkResult vmaFindMemoryTypeIndex(
15868  VmaAllocator allocator,
15869  uint32_t memoryTypeBits,
15870  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15871  uint32_t* pMemoryTypeIndex)
15872 {
15873  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15874  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15875  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15876 
15877  if(pAllocationCreateInfo->memoryTypeBits != 0)
15878  {
15879  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15880  }
15881 
15882  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15883  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15884 
15885  // Convert usage to requiredFlags and preferredFlags.
15886  switch(pAllocationCreateInfo->usage)
15887  {
15889  break;
15891  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15892  {
15893  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15894  }
15895  break;
15897  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15898  break;
15900  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15901  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15902  {
15903  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15904  }
15905  break;
15907  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15908  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15909  break;
15910  default:
15911  break;
15912  }
15913 
15914  *pMemoryTypeIndex = UINT32_MAX;
15915  uint32_t minCost = UINT32_MAX;
15916  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15917  memTypeIndex < allocator->GetMemoryTypeCount();
15918  ++memTypeIndex, memTypeBit <<= 1)
15919  {
15920  // This memory type is acceptable according to memoryTypeBits bitmask.
15921  if((memTypeBit & memoryTypeBits) != 0)
15922  {
15923  const VkMemoryPropertyFlags currFlags =
15924  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15925  // This memory type contains requiredFlags.
15926  if((requiredFlags & ~currFlags) == 0)
15927  {
15928  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15929  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15930  // Remember memory type with lowest cost.
15931  if(currCost < minCost)
15932  {
15933  *pMemoryTypeIndex = memTypeIndex;
15934  if(currCost == 0)
15935  {
15936  return VK_SUCCESS;
15937  }
15938  minCost = currCost;
15939  }
15940  }
15941  }
15942  }
15943  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15944 }
15945 
15947  VmaAllocator allocator,
15948  const VkBufferCreateInfo* pBufferCreateInfo,
15949  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15950  uint32_t* pMemoryTypeIndex)
15951 {
15952  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15953  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15954  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15955  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15956 
15957  const VkDevice hDev = allocator->m_hDevice;
15958  VkBuffer hBuffer = VK_NULL_HANDLE;
15959  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15960  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15961  if(res == VK_SUCCESS)
15962  {
15963  VkMemoryRequirements memReq = {};
15964  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15965  hDev, hBuffer, &memReq);
15966 
15967  res = vmaFindMemoryTypeIndex(
15968  allocator,
15969  memReq.memoryTypeBits,
15970  pAllocationCreateInfo,
15971  pMemoryTypeIndex);
15972 
15973  allocator->GetVulkanFunctions().vkDestroyBuffer(
15974  hDev, hBuffer, allocator->GetAllocationCallbacks());
15975  }
15976  return res;
15977 }
15978 
15980  VmaAllocator allocator,
15981  const VkImageCreateInfo* pImageCreateInfo,
15982  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15983  uint32_t* pMemoryTypeIndex)
15984 {
15985  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15986  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15987  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15988  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15989 
15990  const VkDevice hDev = allocator->m_hDevice;
15991  VkImage hImage = VK_NULL_HANDLE;
15992  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15993  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15994  if(res == VK_SUCCESS)
15995  {
15996  VkMemoryRequirements memReq = {};
15997  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15998  hDev, hImage, &memReq);
15999 
16000  res = vmaFindMemoryTypeIndex(
16001  allocator,
16002  memReq.memoryTypeBits,
16003  pAllocationCreateInfo,
16004  pMemoryTypeIndex);
16005 
16006  allocator->GetVulkanFunctions().vkDestroyImage(
16007  hDev, hImage, allocator->GetAllocationCallbacks());
16008  }
16009  return res;
16010 }
16011 
16012 VkResult vmaCreatePool(
16013  VmaAllocator allocator,
16014  const VmaPoolCreateInfo* pCreateInfo,
16015  VmaPool* pPool)
16016 {
16017  VMA_ASSERT(allocator && pCreateInfo && pPool);
16018 
16019  VMA_DEBUG_LOG("vmaCreatePool");
16020 
16021  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16022 
16023  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16024 
16025 #if VMA_RECORDING_ENABLED
16026  if(allocator->GetRecorder() != VMA_NULL)
16027  {
16028  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16029  }
16030 #endif
16031 
16032  return res;
16033 }
16034 
16035 void vmaDestroyPool(
16036  VmaAllocator allocator,
16037  VmaPool pool)
16038 {
16039  VMA_ASSERT(allocator);
16040 
16041  if(pool == VK_NULL_HANDLE)
16042  {
16043  return;
16044  }
16045 
16046  VMA_DEBUG_LOG("vmaDestroyPool");
16047 
16048  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16049 
16050 #if VMA_RECORDING_ENABLED
16051  if(allocator->GetRecorder() != VMA_NULL)
16052  {
16053  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16054  }
16055 #endif
16056 
16057  allocator->DestroyPool(pool);
16058 }
16059 
16060 void vmaGetPoolStats(
16061  VmaAllocator allocator,
16062  VmaPool pool,
16063  VmaPoolStats* pPoolStats)
16064 {
16065  VMA_ASSERT(allocator && pool && pPoolStats);
16066 
16067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16068 
16069  allocator->GetPoolStats(pool, pPoolStats);
16070 }
16071 
16073  VmaAllocator allocator,
16074  VmaPool pool,
16075  size_t* pLostAllocationCount)
16076 {
16077  VMA_ASSERT(allocator && pool);
16078 
16079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16080 
16081 #if VMA_RECORDING_ENABLED
16082  if(allocator->GetRecorder() != VMA_NULL)
16083  {
16084  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16085  }
16086 #endif
16087 
16088  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16089 }
16090 
16091 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
16092 {
16093  VMA_ASSERT(allocator && pool);
16094 
16095  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16096 
16097  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16098 
16099  return allocator->CheckPoolCorruption(pool);
16100 }
16101 
16102 VkResult vmaAllocateMemory(
16103  VmaAllocator allocator,
16104  const VkMemoryRequirements* pVkMemoryRequirements,
16105  const VmaAllocationCreateInfo* pCreateInfo,
16106  VmaAllocation* pAllocation,
16107  VmaAllocationInfo* pAllocationInfo)
16108 {
16109  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16110 
16111  VMA_DEBUG_LOG("vmaAllocateMemory");
16112 
16113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16114 
16115  VkResult result = allocator->AllocateMemory(
16116  *pVkMemoryRequirements,
16117  false, // requiresDedicatedAllocation
16118  false, // prefersDedicatedAllocation
16119  VK_NULL_HANDLE, // dedicatedBuffer
16120  VK_NULL_HANDLE, // dedicatedImage
16121  *pCreateInfo,
16122  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16123  1, // allocationCount
16124  pAllocation);
16125 
16126 #if VMA_RECORDING_ENABLED
16127  if(allocator->GetRecorder() != VMA_NULL)
16128  {
16129  allocator->GetRecorder()->RecordAllocateMemory(
16130  allocator->GetCurrentFrameIndex(),
16131  *pVkMemoryRequirements,
16132  *pCreateInfo,
16133  *pAllocation);
16134  }
16135 #endif
16136 
16137  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16138  {
16139  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16140  }
16141 
16142  return result;
16143 }
16144 
16145 VkResult vmaAllocateMemoryPages(
16146  VmaAllocator allocator,
16147  const VkMemoryRequirements* pVkMemoryRequirements,
16148  const VmaAllocationCreateInfo* pCreateInfo,
16149  size_t allocationCount,
16150  VmaAllocation* pAllocations,
16151  VmaAllocationInfo* pAllocationInfo)
16152 {
16153  if(allocationCount == 0)
16154  {
16155  return VK_SUCCESS;
16156  }
16157 
16158  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16159 
16160  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16161 
16162  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16163 
16164  VkResult result = allocator->AllocateMemory(
16165  *pVkMemoryRequirements,
16166  false, // requiresDedicatedAllocation
16167  false, // prefersDedicatedAllocation
16168  VK_NULL_HANDLE, // dedicatedBuffer
16169  VK_NULL_HANDLE, // dedicatedImage
16170  *pCreateInfo,
16171  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16172  allocationCount,
16173  pAllocations);
16174 
16175 #if VMA_RECORDING_ENABLED
16176  if(allocator->GetRecorder() != VMA_NULL)
16177  {
16178  allocator->GetRecorder()->RecordAllocateMemoryPages(
16179  allocator->GetCurrentFrameIndex(),
16180  *pVkMemoryRequirements,
16181  *pCreateInfo,
16182  (uint64_t)allocationCount,
16183  pAllocations);
16184  }
16185 #endif
16186 
16187  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16188  {
16189  for(size_t i = 0; i < allocationCount; ++i)
16190  {
16191  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16192  }
16193  }
16194 
16195  return result;
16196 }
16197 
16199  VmaAllocator allocator,
16200  VkBuffer buffer,
16201  const VmaAllocationCreateInfo* pCreateInfo,
16202  VmaAllocation* pAllocation,
16203  VmaAllocationInfo* pAllocationInfo)
16204 {
16205  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16206 
16207  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16208 
16209  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16210 
16211  VkMemoryRequirements vkMemReq = {};
16212  bool requiresDedicatedAllocation = false;
16213  bool prefersDedicatedAllocation = false;
16214  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16215  requiresDedicatedAllocation,
16216  prefersDedicatedAllocation);
16217 
16218  VkResult result = allocator->AllocateMemory(
16219  vkMemReq,
16220  requiresDedicatedAllocation,
16221  prefersDedicatedAllocation,
16222  buffer, // dedicatedBuffer
16223  VK_NULL_HANDLE, // dedicatedImage
16224  *pCreateInfo,
16225  VMA_SUBALLOCATION_TYPE_BUFFER,
16226  1, // allocationCount
16227  pAllocation);
16228 
16229 #if VMA_RECORDING_ENABLED
16230  if(allocator->GetRecorder() != VMA_NULL)
16231  {
16232  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16233  allocator->GetCurrentFrameIndex(),
16234  vkMemReq,
16235  requiresDedicatedAllocation,
16236  prefersDedicatedAllocation,
16237  *pCreateInfo,
16238  *pAllocation);
16239  }
16240 #endif
16241 
16242  if(pAllocationInfo && result == VK_SUCCESS)
16243  {
16244  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16245  }
16246 
16247  return result;
16248 }
16249 
16250 VkResult vmaAllocateMemoryForImage(
16251  VmaAllocator allocator,
16252  VkImage image,
16253  const VmaAllocationCreateInfo* pCreateInfo,
16254  VmaAllocation* pAllocation,
16255  VmaAllocationInfo* pAllocationInfo)
16256 {
16257  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16258 
16259  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16260 
16261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16262 
16263  VkMemoryRequirements vkMemReq = {};
16264  bool requiresDedicatedAllocation = false;
16265  bool prefersDedicatedAllocation = false;
16266  allocator->GetImageMemoryRequirements(image, vkMemReq,
16267  requiresDedicatedAllocation, prefersDedicatedAllocation);
16268 
16269  VkResult result = allocator->AllocateMemory(
16270  vkMemReq,
16271  requiresDedicatedAllocation,
16272  prefersDedicatedAllocation,
16273  VK_NULL_HANDLE, // dedicatedBuffer
16274  image, // dedicatedImage
16275  *pCreateInfo,
16276  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16277  1, // allocationCount
16278  pAllocation);
16279 
16280 #if VMA_RECORDING_ENABLED
16281  if(allocator->GetRecorder() != VMA_NULL)
16282  {
16283  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16284  allocator->GetCurrentFrameIndex(),
16285  vkMemReq,
16286  requiresDedicatedAllocation,
16287  prefersDedicatedAllocation,
16288  *pCreateInfo,
16289  *pAllocation);
16290  }
16291 #endif
16292 
16293  if(pAllocationInfo && result == VK_SUCCESS)
16294  {
16295  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16296  }
16297 
16298  return result;
16299 }
16300 
16301 void vmaFreeMemory(
16302  VmaAllocator allocator,
16303  VmaAllocation allocation)
16304 {
16305  VMA_ASSERT(allocator);
16306 
16307  if(allocation == VK_NULL_HANDLE)
16308  {
16309  return;
16310  }
16311 
16312  VMA_DEBUG_LOG("vmaFreeMemory");
16313 
16314  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16315 
16316 #if VMA_RECORDING_ENABLED
16317  if(allocator->GetRecorder() != VMA_NULL)
16318  {
16319  allocator->GetRecorder()->RecordFreeMemory(
16320  allocator->GetCurrentFrameIndex(),
16321  allocation);
16322  }
16323 #endif
16324 
16325  allocator->FreeMemory(
16326  1, // allocationCount
16327  &allocation);
16328 }
16329 
16330 void vmaFreeMemoryPages(
16331  VmaAllocator allocator,
16332  size_t allocationCount,
16333  VmaAllocation* pAllocations)
16334 {
16335  if(allocationCount == 0)
16336  {
16337  return;
16338  }
16339 
16340  VMA_ASSERT(allocator);
16341 
16342  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16343 
16344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16345 
16346 #if VMA_RECORDING_ENABLED
16347  if(allocator->GetRecorder() != VMA_NULL)
16348  {
16349  allocator->GetRecorder()->RecordFreeMemoryPages(
16350  allocator->GetCurrentFrameIndex(),
16351  (uint64_t)allocationCount,
16352  pAllocations);
16353  }
16354 #endif
16355 
16356  allocator->FreeMemory(allocationCount, pAllocations);
16357 }
16358 
16359 VkResult vmaResizeAllocation(
16360  VmaAllocator allocator,
16361  VmaAllocation allocation,
16362  VkDeviceSize newSize)
16363 {
16364  VMA_ASSERT(allocator && allocation);
16365 
16366  VMA_DEBUG_LOG("vmaResizeAllocation");
16367 
16368  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16369 
16370 #if VMA_RECORDING_ENABLED
16371  if(allocator->GetRecorder() != VMA_NULL)
16372  {
16373  allocator->GetRecorder()->RecordResizeAllocation(
16374  allocator->GetCurrentFrameIndex(),
16375  allocation,
16376  newSize);
16377  }
16378 #endif
16379 
16380  return allocator->ResizeAllocation(allocation, newSize);
16381 }
16382 
16384  VmaAllocator allocator,
16385  VmaAllocation allocation,
16386  VmaAllocationInfo* pAllocationInfo)
16387 {
16388  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16389 
16390  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16391 
16392 #if VMA_RECORDING_ENABLED
16393  if(allocator->GetRecorder() != VMA_NULL)
16394  {
16395  allocator->GetRecorder()->RecordGetAllocationInfo(
16396  allocator->GetCurrentFrameIndex(),
16397  allocation);
16398  }
16399 #endif
16400 
16401  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16402 }
16403 
16404 VkBool32 vmaTouchAllocation(
16405  VmaAllocator allocator,
16406  VmaAllocation allocation)
16407 {
16408  VMA_ASSERT(allocator && allocation);
16409 
16410  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16411 
16412 #if VMA_RECORDING_ENABLED
16413  if(allocator->GetRecorder() != VMA_NULL)
16414  {
16415  allocator->GetRecorder()->RecordTouchAllocation(
16416  allocator->GetCurrentFrameIndex(),
16417  allocation);
16418  }
16419 #endif
16420 
16421  return allocator->TouchAllocation(allocation);
16422 }
16423 
16425  VmaAllocator allocator,
16426  VmaAllocation allocation,
16427  void* pUserData)
16428 {
16429  VMA_ASSERT(allocator && allocation);
16430 
16431  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16432 
16433  allocation->SetUserData(allocator, pUserData);
16434 
16435 #if VMA_RECORDING_ENABLED
16436  if(allocator->GetRecorder() != VMA_NULL)
16437  {
16438  allocator->GetRecorder()->RecordSetAllocationUserData(
16439  allocator->GetCurrentFrameIndex(),
16440  allocation,
16441  pUserData);
16442  }
16443 #endif
16444 }
16445 
16447  VmaAllocator allocator,
16448  VmaAllocation* pAllocation)
16449 {
16450  VMA_ASSERT(allocator && pAllocation);
16451 
16452  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16453 
16454  allocator->CreateLostAllocation(pAllocation);
16455 
16456 #if VMA_RECORDING_ENABLED
16457  if(allocator->GetRecorder() != VMA_NULL)
16458  {
16459  allocator->GetRecorder()->RecordCreateLostAllocation(
16460  allocator->GetCurrentFrameIndex(),
16461  *pAllocation);
16462  }
16463 #endif
16464 }
16465 
16466 VkResult vmaMapMemory(
16467  VmaAllocator allocator,
16468  VmaAllocation allocation,
16469  void** ppData)
16470 {
16471  VMA_ASSERT(allocator && allocation && ppData);
16472 
16473  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16474 
16475  VkResult res = allocator->Map(allocation, ppData);
16476 
16477 #if VMA_RECORDING_ENABLED
16478  if(allocator->GetRecorder() != VMA_NULL)
16479  {
16480  allocator->GetRecorder()->RecordMapMemory(
16481  allocator->GetCurrentFrameIndex(),
16482  allocation);
16483  }
16484 #endif
16485 
16486  return res;
16487 }
16488 
16489 void vmaUnmapMemory(
16490  VmaAllocator allocator,
16491  VmaAllocation allocation)
16492 {
16493  VMA_ASSERT(allocator && allocation);
16494 
16495  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16496 
16497 #if VMA_RECORDING_ENABLED
16498  if(allocator->GetRecorder() != VMA_NULL)
16499  {
16500  allocator->GetRecorder()->RecordUnmapMemory(
16501  allocator->GetCurrentFrameIndex(),
16502  allocation);
16503  }
16504 #endif
16505 
16506  allocator->Unmap(allocation);
16507 }
16508 
16509 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16510 {
16511  VMA_ASSERT(allocator && allocation);
16512 
16513  VMA_DEBUG_LOG("vmaFlushAllocation");
16514 
16515  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16516 
16517  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16518 
16519 #if VMA_RECORDING_ENABLED
16520  if(allocator->GetRecorder() != VMA_NULL)
16521  {
16522  allocator->GetRecorder()->RecordFlushAllocation(
16523  allocator->GetCurrentFrameIndex(),
16524  allocation, offset, size);
16525  }
16526 #endif
16527 }
16528 
16529 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16530 {
16531  VMA_ASSERT(allocator && allocation);
16532 
16533  VMA_DEBUG_LOG("vmaInvalidateAllocation");
16534 
16535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16536 
16537  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16538 
16539 #if VMA_RECORDING_ENABLED
16540  if(allocator->GetRecorder() != VMA_NULL)
16541  {
16542  allocator->GetRecorder()->RecordInvalidateAllocation(
16543  allocator->GetCurrentFrameIndex(),
16544  allocation, offset, size);
16545  }
16546 #endif
16547 }
16548 
16549 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16550 {
16551  VMA_ASSERT(allocator);
16552 
16553  VMA_DEBUG_LOG("vmaCheckCorruption");
16554 
16555  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16556 
16557  return allocator->CheckCorruption(memoryTypeBits);
16558 }
16559 
16560 VkResult vmaDefragment(
16561  VmaAllocator allocator,
16562  VmaAllocation* pAllocations,
16563  size_t allocationCount,
16564  VkBool32* pAllocationsChanged,
16565  const VmaDefragmentationInfo *pDefragmentationInfo,
16566  VmaDefragmentationStats* pDefragmentationStats)
16567 {
16568  // Deprecated interface, reimplemented using new one.
16569 
16570  VmaDefragmentationInfo2 info2 = {};
16571  info2.allocationCount = (uint32_t)allocationCount;
16572  info2.pAllocations = pAllocations;
16573  info2.pAllocationsChanged = pAllocationsChanged;
16574  if(pDefragmentationInfo != VMA_NULL)
16575  {
16576  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16577  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16578  }
16579  else
16580  {
16581  info2.maxCpuAllocationsToMove = UINT32_MAX;
16582  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16583  }
16584  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16585 
16587  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16588  if(res == VK_NOT_READY)
16589  {
16590  res = vmaDefragmentationEnd( allocator, ctx);
16591  }
16592  return res;
16593 }
16594 
16595 VkResult vmaDefragmentationBegin(
16596  VmaAllocator allocator,
16597  const VmaDefragmentationInfo2* pInfo,
16598  VmaDefragmentationStats* pStats,
16599  VmaDefragmentationContext *pContext)
16600 {
16601  VMA_ASSERT(allocator && pInfo && pContext);
16602 
16603  // Degenerate case: Nothing to defragment.
16604  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16605  {
16606  return VK_SUCCESS;
16607  }
16608 
16609  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16610  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16611  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16612  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16613 
16614  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16615 
16616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16617 
16618  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16619 
16620 #if VMA_RECORDING_ENABLED
16621  if(allocator->GetRecorder() != VMA_NULL)
16622  {
16623  allocator->GetRecorder()->RecordDefragmentationBegin(
16624  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16625  }
16626 #endif
16627 
16628  return res;
16629 }
16630 
16631 VkResult vmaDefragmentationEnd(
16632  VmaAllocator allocator,
16633  VmaDefragmentationContext context)
16634 {
16635  VMA_ASSERT(allocator);
16636 
16637  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16638 
16639  if(context != VK_NULL_HANDLE)
16640  {
16641  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16642 
16643 #if VMA_RECORDING_ENABLED
16644  if(allocator->GetRecorder() != VMA_NULL)
16645  {
16646  allocator->GetRecorder()->RecordDefragmentationEnd(
16647  allocator->GetCurrentFrameIndex(), context);
16648  }
16649 #endif
16650 
16651  return allocator->DefragmentationEnd(context);
16652  }
16653  else
16654  {
16655  return VK_SUCCESS;
16656  }
16657 }
16658 
16659 VkResult vmaBindBufferMemory(
16660  VmaAllocator allocator,
16661  VmaAllocation allocation,
16662  VkBuffer buffer)
16663 {
16664  VMA_ASSERT(allocator && allocation && buffer);
16665 
16666  VMA_DEBUG_LOG("vmaBindBufferMemory");
16667 
16668  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16669 
16670  return allocator->BindBufferMemory(allocation, buffer);
16671 }
16672 
16673 VkResult vmaBindImageMemory(
16674  VmaAllocator allocator,
16675  VmaAllocation allocation,
16676  VkImage image)
16677 {
16678  VMA_ASSERT(allocator && allocation && image);
16679 
16680  VMA_DEBUG_LOG("vmaBindImageMemory");
16681 
16682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16683 
16684  return allocator->BindImageMemory(allocation, image);
16685 }
16686 
16687 VkResult vmaCreateBuffer(
16688  VmaAllocator allocator,
16689  const VkBufferCreateInfo* pBufferCreateInfo,
16690  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16691  VkBuffer* pBuffer,
16692  VmaAllocation* pAllocation,
16693  VmaAllocationInfo* pAllocationInfo)
16694 {
16695  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16696 
16697  if(pBufferCreateInfo->size == 0)
16698  {
16699  return VK_ERROR_VALIDATION_FAILED_EXT;
16700  }
16701 
16702  VMA_DEBUG_LOG("vmaCreateBuffer");
16703 
16704  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16705 
16706  *pBuffer = VK_NULL_HANDLE;
16707  *pAllocation = VK_NULL_HANDLE;
16708 
16709  // 1. Create VkBuffer.
16710  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16711  allocator->m_hDevice,
16712  pBufferCreateInfo,
16713  allocator->GetAllocationCallbacks(),
16714  pBuffer);
16715  if(res >= 0)
16716  {
16717  // 2. vkGetBufferMemoryRequirements.
16718  VkMemoryRequirements vkMemReq = {};
16719  bool requiresDedicatedAllocation = false;
16720  bool prefersDedicatedAllocation = false;
16721  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16722  requiresDedicatedAllocation, prefersDedicatedAllocation);
16723 
16724  // Make sure alignment requirements for specific buffer usages reported
16725  // in Physical Device Properties are included in alignment reported by memory requirements.
16726  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16727  {
16728  VMA_ASSERT(vkMemReq.alignment %
16729  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16730  }
16731  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16732  {
16733  VMA_ASSERT(vkMemReq.alignment %
16734  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16735  }
16736  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16737  {
16738  VMA_ASSERT(vkMemReq.alignment %
16739  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16740  }
16741 
16742  // 3. Allocate memory using allocator.
16743  res = allocator->AllocateMemory(
16744  vkMemReq,
16745  requiresDedicatedAllocation,
16746  prefersDedicatedAllocation,
16747  *pBuffer, // dedicatedBuffer
16748  VK_NULL_HANDLE, // dedicatedImage
16749  *pAllocationCreateInfo,
16750  VMA_SUBALLOCATION_TYPE_BUFFER,
16751  1, // allocationCount
16752  pAllocation);
16753 
16754 #if VMA_RECORDING_ENABLED
16755  if(allocator->GetRecorder() != VMA_NULL)
16756  {
16757  allocator->GetRecorder()->RecordCreateBuffer(
16758  allocator->GetCurrentFrameIndex(),
16759  *pBufferCreateInfo,
16760  *pAllocationCreateInfo,
16761  *pAllocation);
16762  }
16763 #endif
16764 
16765  if(res >= 0)
16766  {
16767  // 3. Bind buffer with memory.
16768  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16769  {
16770  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16771  }
16772  if(res >= 0)
16773  {
16774  // All steps succeeded.
16775  #if VMA_STATS_STRING_ENABLED
16776  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16777  #endif
16778  if(pAllocationInfo != VMA_NULL)
16779  {
16780  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16781  }
16782 
16783  return VK_SUCCESS;
16784  }
16785  allocator->FreeMemory(
16786  1, // allocationCount
16787  pAllocation);
16788  *pAllocation = VK_NULL_HANDLE;
16789  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16790  *pBuffer = VK_NULL_HANDLE;
16791  return res;
16792  }
16793  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16794  *pBuffer = VK_NULL_HANDLE;
16795  return res;
16796  }
16797  return res;
16798 }
16799 
16800 void vmaDestroyBuffer(
16801  VmaAllocator allocator,
16802  VkBuffer buffer,
16803  VmaAllocation allocation)
16804 {
16805  VMA_ASSERT(allocator);
16806 
16807  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16808  {
16809  return;
16810  }
16811 
16812  VMA_DEBUG_LOG("vmaDestroyBuffer");
16813 
16814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16815 
16816 #if VMA_RECORDING_ENABLED
16817  if(allocator->GetRecorder() != VMA_NULL)
16818  {
16819  allocator->GetRecorder()->RecordDestroyBuffer(
16820  allocator->GetCurrentFrameIndex(),
16821  allocation);
16822  }
16823 #endif
16824 
16825  if(buffer != VK_NULL_HANDLE)
16826  {
16827  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16828  }
16829 
16830  if(allocation != VK_NULL_HANDLE)
16831  {
16832  allocator->FreeMemory(
16833  1, // allocationCount
16834  &allocation);
16835  }
16836 }
16837 
16838 VkResult vmaCreateImage(
16839  VmaAllocator allocator,
16840  const VkImageCreateInfo* pImageCreateInfo,
16841  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16842  VkImage* pImage,
16843  VmaAllocation* pAllocation,
16844  VmaAllocationInfo* pAllocationInfo)
16845 {
16846  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16847 
16848  if(pImageCreateInfo->extent.width == 0 ||
16849  pImageCreateInfo->extent.height == 0 ||
16850  pImageCreateInfo->extent.depth == 0 ||
16851  pImageCreateInfo->mipLevels == 0 ||
16852  pImageCreateInfo->arrayLayers == 0)
16853  {
16854  return VK_ERROR_VALIDATION_FAILED_EXT;
16855  }
16856 
16857  VMA_DEBUG_LOG("vmaCreateImage");
16858 
16859  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16860 
16861  *pImage = VK_NULL_HANDLE;
16862  *pAllocation = VK_NULL_HANDLE;
16863 
16864  // 1. Create VkImage.
16865  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16866  allocator->m_hDevice,
16867  pImageCreateInfo,
16868  allocator->GetAllocationCallbacks(),
16869  pImage);
16870  if(res >= 0)
16871  {
16872  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16873  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16874  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16875 
16876  // 2. Allocate memory using allocator.
16877  VkMemoryRequirements vkMemReq = {};
16878  bool requiresDedicatedAllocation = false;
16879  bool prefersDedicatedAllocation = false;
16880  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16881  requiresDedicatedAllocation, prefersDedicatedAllocation);
16882 
16883  res = allocator->AllocateMemory(
16884  vkMemReq,
16885  requiresDedicatedAllocation,
16886  prefersDedicatedAllocation,
16887  VK_NULL_HANDLE, // dedicatedBuffer
16888  *pImage, // dedicatedImage
16889  *pAllocationCreateInfo,
16890  suballocType,
16891  1, // allocationCount
16892  pAllocation);
16893 
16894 #if VMA_RECORDING_ENABLED
16895  if(allocator->GetRecorder() != VMA_NULL)
16896  {
16897  allocator->GetRecorder()->RecordCreateImage(
16898  allocator->GetCurrentFrameIndex(),
16899  *pImageCreateInfo,
16900  *pAllocationCreateInfo,
16901  *pAllocation);
16902  }
16903 #endif
16904 
16905  if(res >= 0)
16906  {
16907  // 3. Bind image with memory.
16908  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
16909  {
16910  res = allocator->BindImageMemory(*pAllocation, *pImage);
16911  }
16912  if(res >= 0)
16913  {
16914  // All steps succeeded.
16915  #if VMA_STATS_STRING_ENABLED
16916  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16917  #endif
16918  if(pAllocationInfo != VMA_NULL)
16919  {
16920  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16921  }
16922 
16923  return VK_SUCCESS;
16924  }
16925  allocator->FreeMemory(
16926  1, // allocationCount
16927  pAllocation);
16928  *pAllocation = VK_NULL_HANDLE;
16929  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16930  *pImage = VK_NULL_HANDLE;
16931  return res;
16932  }
16933  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16934  *pImage = VK_NULL_HANDLE;
16935  return res;
16936  }
16937  return res;
16938 }
16939 
16940 void vmaDestroyImage(
16941  VmaAllocator allocator,
16942  VkImage image,
16943  VmaAllocation allocation)
16944 {
16945  VMA_ASSERT(allocator);
16946 
16947  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16948  {
16949  return;
16950  }
16951 
16952  VMA_DEBUG_LOG("vmaDestroyImage");
16953 
16954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16955 
16956 #if VMA_RECORDING_ENABLED
16957  if(allocator->GetRecorder() != VMA_NULL)
16958  {
16959  allocator->GetRecorder()->RecordDestroyImage(
16960  allocator->GetCurrentFrameIndex(),
16961  allocation);
16962  }
16963 #endif
16964 
16965  if(image != VK_NULL_HANDLE)
16966  {
16967  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16968  }
16969  if(allocation != VK_NULL_HANDLE)
16970  {
16971  allocator->FreeMemory(
16972  1, // allocationCount
16973  &allocation);
16974  }
16975 }
16976 
16977 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1756
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2056
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1814
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:2867
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1788
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2387
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1768
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2018
Definition: vk_mem_alloc.h:2122
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2820
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1760
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2487
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1811
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2903
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2276
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1655
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2368
Definition: vk_mem_alloc.h:2093
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2823
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1749
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2175
Definition: vk_mem_alloc.h:2045
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1823
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2304
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:1877
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1808
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2049
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1949
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1765
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2857
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1948
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2907
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1840
VmaStatInfo total
Definition: vk_mem_alloc.h:1958
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2915
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2159
Definition: vk_mem_alloc.h:2117
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2898
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1766
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1691
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1817
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2318
Definition: vk_mem_alloc.h:2312
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1772
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1884
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2497
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1761
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1786
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2196
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2338
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2374
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1747
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2321
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2872
VmaMemoryUsage
Definition: vk_mem_alloc.h:1996
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2832
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2893
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:2911
Definition: vk_mem_alloc.h:2035
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2183
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1764
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1954
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1697
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2811
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2809
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2838
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1718
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1790
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1723
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2913
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2170
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2384
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1757
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1937
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2333
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1710
Definition: vk_mem_alloc.h:2308
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2100
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1950
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1714
Definition: vk_mem_alloc.h:2133
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2324
Definition: vk_mem_alloc.h:2044
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1763
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2165
Definition: vk_mem_alloc.h:2156
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1940
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1759
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2346
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1826
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2377
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2154
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2862
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2189
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1865
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1956
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2080
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1949
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1770
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1796
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:2808
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2886
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1712
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1769
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2360
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1762
Definition: vk_mem_alloc.h:2111
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1804
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2511
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1820
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1949
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1946
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2365
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2817
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
Definition: vk_mem_alloc.h:2126
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2492
Definition: vk_mem_alloc.h:2140
Definition: vk_mem_alloc.h:2152
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:2909
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1755
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1944
Definition: vk_mem_alloc.h:2001
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2314
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1793
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1942
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1767
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1771
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2067
Definition: vk_mem_alloc.h:2147
Definition: vk_mem_alloc.h:2028
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2506
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1745
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1758
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2293
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2473
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2137
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2258
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1950
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1780
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1957
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2371
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1950
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:2877
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2478
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2841