Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1760 /*
1761 Define this macro to 0/1 to disable/enable support for recording functionality,
1762 available through VmaAllocatorCreateInfo::pRecordSettings.
1763 */
1764 #ifndef VMA_RECORDING_ENABLED
1765  #define VMA_RECORDING_ENABLED 0
1766 #endif
1767 
1768 #ifndef NOMINMAX
1769  #define NOMINMAX // For windows.h
1770 #endif
1771 
1772 #ifndef VULKAN_H_
1773  #include <vulkan/vulkan.h>
1774 #endif
1775 
1776 #if VMA_RECORDING_ENABLED
1777  #include <windows.h>
1778 #endif
1779 
1780 #if !defined(VMA_DEDICATED_ALLOCATION)
1781  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1782  #define VMA_DEDICATED_ALLOCATION 1
1783  #else
1784  #define VMA_DEDICATED_ALLOCATION 0
1785  #endif
1786 #endif
1787 
1788 #if !defined(VMA_BIND_MEMORY2)
1789  #if VK_KHR_bind_memory2
1790  #define VMA_BIND_MEMORY2 1
1791  #else
1792  #define VMA_BIND_MEMORY2 0
1793  #endif
1794 #endif
1795 
1796 #if !defined(VMA_MEMORY_BUDGET)
1797  #if VK_EXT_memory_budget && VK_KHR_get_physical_device_properties2
1798  #define VMA_MEMORY_BUDGET 1
1799  #else
1800  #define VMA_MEMORY_BUDGET 0
1801  #endif
1802 #endif
1803 
1804 // Define these macros to decorate all public functions with additional code,
1805 // before and after returned type, appropriately. This may be useful for
1806 // exporing the functions when compiling VMA as a separate library. Example:
1807 // #define VMA_CALL_PRE __declspec(dllexport)
1808 // #define VMA_CALL_POST __cdecl
1809 #ifndef VMA_CALL_PRE
1810  #define VMA_CALL_PRE
1811 #endif
1812 #ifndef VMA_CALL_POST
1813  #define VMA_CALL_POST
1814 #endif
1815 
1825 VK_DEFINE_HANDLE(VmaAllocator)
1826 
1827 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1829  VmaAllocator allocator,
1830  uint32_t memoryType,
1831  VkDeviceMemory memory,
1832  VkDeviceSize size);
1834 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1835  VmaAllocator allocator,
1836  uint32_t memoryType,
1837  VkDeviceMemory memory,
1838  VkDeviceSize size);
1839 
1853 
1907 
1910 typedef VkFlags VmaAllocatorCreateFlags;
1911 
1916 typedef struct VmaVulkanFunctions {
1917  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1918  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1919  PFN_vkAllocateMemory vkAllocateMemory;
1920  PFN_vkFreeMemory vkFreeMemory;
1921  PFN_vkMapMemory vkMapMemory;
1922  PFN_vkUnmapMemory vkUnmapMemory;
1923  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1924  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1925  PFN_vkBindBufferMemory vkBindBufferMemory;
1926  PFN_vkBindImageMemory vkBindImageMemory;
1927  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1928  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1929  PFN_vkCreateBuffer vkCreateBuffer;
1930  PFN_vkDestroyBuffer vkDestroyBuffer;
1931  PFN_vkCreateImage vkCreateImage;
1932  PFN_vkDestroyImage vkDestroyImage;
1933  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1934 #if VMA_DEDICATED_ALLOCATION
1935  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1936  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1937 #endif
1938 #if VMA_BIND_MEMORY2
1939  PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
1940  PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
1941 #endif
1942 #if VMA_MEMORY_BUDGET
1943  PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
1944 #endif
1946 
1948 typedef enum VmaRecordFlagBits {
1955 
1958 typedef VkFlags VmaRecordFlags;
1959 
1961 typedef struct VmaRecordSettings
1962 {
1972  const char* pFilePath;
1974 
1977 {
1981 
1982  VkPhysicalDevice physicalDevice;
1984 
1985  VkDevice device;
1987 
1990 
1991  const VkAllocationCallbacks* pAllocationCallbacks;
1993 
2033  const VkDeviceSize* pHeapSizeLimit;
2057  VkInstance instance;
2059 
2062  const VmaAllocatorCreateInfo* pCreateInfo,
2063  VmaAllocator* pAllocator);
2064 
2067  VmaAllocator allocator);
2068 
2074  VmaAllocator allocator,
2075  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
2076 
2082  VmaAllocator allocator,
2083  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
2084 
2092  VmaAllocator allocator,
2093  uint32_t memoryTypeIndex,
2094  VkMemoryPropertyFlags* pFlags);
2095 
2105  VmaAllocator allocator,
2106  uint32_t frameIndex);
2107 
2110 typedef struct VmaStatInfo
2111 {
2113  uint32_t blockCount;
2119  VkDeviceSize usedBytes;
2121  VkDeviceSize unusedBytes;
2124 } VmaStatInfo;
2125 
2127 typedef struct VmaStats
2128 {
2129  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2130  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2132 } VmaStats;
2133 
2144  VmaAllocator allocator,
2145  VmaStats* pStats);
2146 
2149 typedef struct VmaBudget
2150 {
2153  VkDeviceSize blockBytes;
2154 
2161  VkDeviceSize allocationBytes;
2162 
2171  VkDeviceSize usage;
2172 
2182  VkDeviceSize budget;
2183 } VmaBudget;
2184 
2196  VmaAllocator allocator,
2197  VmaBudget* pBudget);
2198 
2199 #ifndef VMA_STATS_STRING_ENABLED
2200 #define VMA_STATS_STRING_ENABLED 1
2201 #endif
2202 
2203 #if VMA_STATS_STRING_ENABLED
2204 
2206 
2209  VmaAllocator allocator,
2210  char** ppStatsString,
2211  VkBool32 detailedMap);
2212 
2214  VmaAllocator allocator,
2215  char* pStatsString);
2216 
2217 #endif // #if VMA_STATS_STRING_ENABLED
2218 
2227 VK_DEFINE_HANDLE(VmaPool)
2228 
2229 typedef enum VmaMemoryUsage
2230 {
2279 } VmaMemoryUsage;
2280 
2290 
2355 
2371 
2381 
2388 
2392 
2394 {
2407  VkMemoryPropertyFlags requiredFlags;
2412  VkMemoryPropertyFlags preferredFlags;
2420  uint32_t memoryTypeBits;
2433  void* pUserData;
2435 
2453  VmaAllocator allocator,
2454  uint32_t memoryTypeBits,
2455  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2456  uint32_t* pMemoryTypeIndex);
2457 
2471  VmaAllocator allocator,
2472  const VkBufferCreateInfo* pBufferCreateInfo,
2473  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2474  uint32_t* pMemoryTypeIndex);
2475 
2489  VmaAllocator allocator,
2490  const VkImageCreateInfo* pImageCreateInfo,
2491  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2492  uint32_t* pMemoryTypeIndex);
2493 
2514 
2531 
2542 
2548 
2551 typedef VkFlags VmaPoolCreateFlags;
2552 
2555 typedef struct VmaPoolCreateInfo {
2570  VkDeviceSize blockSize;
2599 
2602 typedef struct VmaPoolStats {
2605  VkDeviceSize size;
2608  VkDeviceSize unusedSize;
2621  VkDeviceSize unusedRangeSizeMax;
2624  size_t blockCount;
2625 } VmaPoolStats;
2626 
2634  VmaAllocator allocator,
2635  const VmaPoolCreateInfo* pCreateInfo,
2636  VmaPool* pPool);
2637 
2641  VmaAllocator allocator,
2642  VmaPool pool);
2643 
2651  VmaAllocator allocator,
2652  VmaPool pool,
2653  VmaPoolStats* pPoolStats);
2654 
2662  VmaAllocator allocator,
2663  VmaPool pool,
2664  size_t* pLostAllocationCount);
2665 
2681 
2706 VK_DEFINE_HANDLE(VmaAllocation)
2707 
2708 
2710 typedef struct VmaAllocationInfo {
2715  uint32_t memoryType;
2724  VkDeviceMemory deviceMemory;
2729  VkDeviceSize offset;
2734  VkDeviceSize size;
2748  void* pUserData;
2750 
2762  VmaAllocator allocator,
2763  const VkMemoryRequirements* pVkMemoryRequirements,
2764  const VmaAllocationCreateInfo* pCreateInfo,
2765  VmaAllocation* pAllocation,
2766  VmaAllocationInfo* pAllocationInfo);
2767 
2788  VmaAllocator allocator,
2789  const VkMemoryRequirements* pVkMemoryRequirements,
2790  const VmaAllocationCreateInfo* pCreateInfo,
2791  size_t allocationCount,
2792  VmaAllocation* pAllocations,
2793  VmaAllocationInfo* pAllocationInfo);
2794 
2802  VmaAllocator allocator,
2803  VkBuffer buffer,
2804  const VmaAllocationCreateInfo* pCreateInfo,
2805  VmaAllocation* pAllocation,
2806  VmaAllocationInfo* pAllocationInfo);
2807 
2810  VmaAllocator allocator,
2811  VkImage image,
2812  const VmaAllocationCreateInfo* pCreateInfo,
2813  VmaAllocation* pAllocation,
2814  VmaAllocationInfo* pAllocationInfo);
2815 
2821  VmaAllocator allocator,
2822  VmaAllocation allocation);
2823 
2835  VmaAllocator allocator,
2836  size_t allocationCount,
2837  VmaAllocation* pAllocations);
2838 
2846  VmaAllocator allocator,
2847  VmaAllocation allocation,
2848  VkDeviceSize newSize);
2849 
2867  VmaAllocator allocator,
2868  VmaAllocation allocation,
2869  VmaAllocationInfo* pAllocationInfo);
2870 
2886  VmaAllocator allocator,
2887  VmaAllocation allocation);
2888 
2903  VmaAllocator allocator,
2904  VmaAllocation allocation,
2905  void* pUserData);
2906 
2918  VmaAllocator allocator,
2919  VmaAllocation* pAllocation);
2920 
2956  VmaAllocator allocator,
2957  VmaAllocation allocation,
2958  void** ppData);
2959 
2965  VmaAllocator allocator,
2966  VmaAllocation allocation);
2967 
2984 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2985 
3002 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
3003 
3020 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
3021 
3028 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3029 
3030 typedef enum VmaDefragmentationFlagBits {
3034 typedef VkFlags VmaDefragmentationFlags;
3035 
3040 typedef struct VmaDefragmentationInfo2 {
3064  uint32_t poolCount;
3085  VkDeviceSize maxCpuBytesToMove;
3095  VkDeviceSize maxGpuBytesToMove;
3109  VkCommandBuffer commandBuffer;
3111 
3116 typedef struct VmaDefragmentationInfo {
3121  VkDeviceSize maxBytesToMove;
3128 
3130 typedef struct VmaDefragmentationStats {
3132  VkDeviceSize bytesMoved;
3134  VkDeviceSize bytesFreed;
3140 
3171  VmaAllocator allocator,
3172  const VmaDefragmentationInfo2* pInfo,
3173  VmaDefragmentationStats* pStats,
3174  VmaDefragmentationContext *pContext);
3175 
3182  VmaAllocator allocator,
3183  VmaDefragmentationContext context);
3184 
3226  VmaAllocator allocator,
3227  VmaAllocation* pAllocations,
3228  size_t allocationCount,
3229  VkBool32* pAllocationsChanged,
3230  const VmaDefragmentationInfo *pDefragmentationInfo,
3231  VmaDefragmentationStats* pDefragmentationStats);
3232 
3246  VmaAllocator allocator,
3247  VmaAllocation allocation,
3248  VkBuffer buffer);
3249 
3261  VmaAllocator allocator,
3262  VmaAllocation allocation,
3263  VkDeviceSize allocationLocalOffset,
3264  VkBuffer buffer,
3265  const void* pNext);
3266 
3280  VmaAllocator allocator,
3281  VmaAllocation allocation,
3282  VkImage image);
3283 
3295  VmaAllocator allocator,
3296  VmaAllocation allocation,
3297  VkDeviceSize allocationLocalOffset,
3298  VkImage image,
3299  const void* pNext);
3300 
3328  VmaAllocator allocator,
3329  const VkBufferCreateInfo* pBufferCreateInfo,
3330  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3331  VkBuffer* pBuffer,
3332  VmaAllocation* pAllocation,
3333  VmaAllocationInfo* pAllocationInfo);
3334 
3347  VmaAllocator allocator,
3348  VkBuffer buffer,
3349  VmaAllocation allocation);
3350 
3353  VmaAllocator allocator,
3354  const VkImageCreateInfo* pImageCreateInfo,
3355  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3356  VkImage* pImage,
3357  VmaAllocation* pAllocation,
3358  VmaAllocationInfo* pAllocationInfo);
3359 
3372  VmaAllocator allocator,
3373  VkImage image,
3374  VmaAllocation allocation);
3375 
3376 #ifdef __cplusplus
3377 }
3378 #endif
3379 
3380 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3381 
3382 // For Visual Studio IntelliSense.
3383 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3384 #define VMA_IMPLEMENTATION
3385 #endif
3386 
3387 #ifdef VMA_IMPLEMENTATION
3388 #undef VMA_IMPLEMENTATION
3389 
3390 #include <cstdint>
3391 #include <cstdlib>
3392 #include <cstring>
3393 
3394 /*******************************************************************************
3395 CONFIGURATION SECTION
3396 
3397 Define some of these macros before each #include of this header or change them
3398 here if you need other then default behavior depending on your environment.
3399 */
3400 
3401 /*
3402 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3403 internally, like:
3404 
3405  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3406 
3407 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3408 VmaAllocatorCreateInfo::pVulkanFunctions.
3409 */
3410 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3411 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3412 #endif
3413 
3414 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3415 //#define VMA_USE_STL_CONTAINERS 1
3416 
3417 /* Set this macro to 1 to make the library including and using STL containers:
3418 std::pair, std::vector, std::list, std::unordered_map.
3419 
3420 Set it to 0 or undefined to make the library using its own implementation of
3421 the containers.
3422 */
3423 #if VMA_USE_STL_CONTAINERS
3424  #define VMA_USE_STL_VECTOR 1
3425  #define VMA_USE_STL_UNORDERED_MAP 1
3426  #define VMA_USE_STL_LIST 1
3427 #endif
3428 
3429 #ifndef VMA_USE_STL_SHARED_MUTEX
3430  // Compiler conforms to C++17.
3431  #if __cplusplus >= 201703L
3432  #define VMA_USE_STL_SHARED_MUTEX 1
3433  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
3434  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
3435  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
3436  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
3437  #define VMA_USE_STL_SHARED_MUTEX 1
3438  #else
3439  #define VMA_USE_STL_SHARED_MUTEX 0
3440  #endif
3441 #endif
3442 
3443 /*
3444 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
3445 Library has its own container implementation.
3446 */
3447 #if VMA_USE_STL_VECTOR
3448  #include <vector>
3449 #endif
3450 
3451 #if VMA_USE_STL_UNORDERED_MAP
3452  #include <unordered_map>
3453 #endif
3454 
3455 #if VMA_USE_STL_LIST
3456  #include <list>
3457 #endif
3458 
3459 /*
3460 Following headers are used in this CONFIGURATION section only, so feel free to
3461 remove them if not needed.
3462 */
3463 #include <cassert> // for assert
3464 #include <algorithm> // for min, max
3465 #include <mutex>
3466 
3467 #ifndef VMA_NULL
3468  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3469  #define VMA_NULL nullptr
3470 #endif
3471 
3472 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3473 #include <cstdlib>
3474 void *aligned_alloc(size_t alignment, size_t size)
3475 {
3476  // alignment must be >= sizeof(void*)
3477  if(alignment < sizeof(void*))
3478  {
3479  alignment = sizeof(void*);
3480  }
3481 
3482  return memalign(alignment, size);
3483 }
3484 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
3485 #include <cstdlib>
3486 void *aligned_alloc(size_t alignment, size_t size)
3487 {
3488  // alignment must be >= sizeof(void*)
3489  if(alignment < sizeof(void*))
3490  {
3491  alignment = sizeof(void*);
3492  }
3493 
3494  void *pointer;
3495  if(posix_memalign(&pointer, alignment, size) == 0)
3496  return pointer;
3497  return VMA_NULL;
3498 }
3499 #endif
3500 
3501 // If your compiler is not compatible with C++11 and definition of
3502 // aligned_alloc() function is missing, uncommeting following line may help:
3503 
3504 //#include <malloc.h>
3505 
3506 // Normal assert to check for programmer's errors, especially in Debug configuration.
3507 #ifndef VMA_ASSERT
3508  #ifdef _DEBUG
3509  #define VMA_ASSERT(expr) assert(expr)
3510  #else
3511  #define VMA_ASSERT(expr)
3512  #endif
3513 #endif
3514 
3515 // Assert that will be called very often, like inside data structures e.g. operator[].
3516 // Making it non-empty can make program slow.
3517 #ifndef VMA_HEAVY_ASSERT
3518  #ifdef _DEBUG
3519  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3520  #else
3521  #define VMA_HEAVY_ASSERT(expr)
3522  #endif
3523 #endif
3524 
3525 #ifndef VMA_ALIGN_OF
3526  #define VMA_ALIGN_OF(type) (__alignof(type))
3527 #endif
3528 
3529 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3530  #if defined(_WIN32)
3531  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3532  #else
3533  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3534  #endif
3535 #endif
3536 
3537 #ifndef VMA_SYSTEM_FREE
3538  #if defined(_WIN32)
3539  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3540  #else
3541  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3542  #endif
3543 #endif
3544 
3545 #ifndef VMA_MIN
3546  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3547 #endif
3548 
3549 #ifndef VMA_MAX
3550  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3551 #endif
3552 
3553 #ifndef VMA_SWAP
3554  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3555 #endif
3556 
3557 #ifndef VMA_SORT
3558  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3559 #endif
3560 
3561 #ifndef VMA_DEBUG_LOG
3562  #define VMA_DEBUG_LOG(format, ...)
3563  /*
3564  #define VMA_DEBUG_LOG(format, ...) do { \
3565  printf(format, __VA_ARGS__); \
3566  printf("\n"); \
3567  } while(false)
3568  */
3569 #endif
3570 
3571 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3572 #if VMA_STATS_STRING_ENABLED
3573  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3574  {
3575  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3576  }
3577  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3578  {
3579  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3580  }
3581  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3582  {
3583  snprintf(outStr, strLen, "%p", ptr);
3584  }
3585 #endif
3586 
3587 #ifndef VMA_MUTEX
3588  class VmaMutex
3589  {
3590  public:
3591  void Lock() { m_Mutex.lock(); }
3592  void Unlock() { m_Mutex.unlock(); }
3593  private:
3594  std::mutex m_Mutex;
3595  };
3596  #define VMA_MUTEX VmaMutex
3597 #endif
3598 
3599 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3600 #ifndef VMA_RW_MUTEX
3601  #if VMA_USE_STL_SHARED_MUTEX
3602  // Use std::shared_mutex from C++17.
3603  #include <shared_mutex>
3604  class VmaRWMutex
3605  {
3606  public:
3607  void LockRead() { m_Mutex.lock_shared(); }
3608  void UnlockRead() { m_Mutex.unlock_shared(); }
3609  void LockWrite() { m_Mutex.lock(); }
3610  void UnlockWrite() { m_Mutex.unlock(); }
3611  private:
3612  std::shared_mutex m_Mutex;
3613  };
3614  #define VMA_RW_MUTEX VmaRWMutex
3615  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
3616  // Use SRWLOCK from WinAPI.
3617  // Minimum supported client = Windows Vista, server = Windows Server 2008.
3618  class VmaRWMutex
3619  {
3620  public:
3621  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3622  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3623  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3624  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3625  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3626  private:
3627  SRWLOCK m_Lock;
3628  };
3629  #define VMA_RW_MUTEX VmaRWMutex
3630  #else
3631  // Less efficient fallback: Use normal mutex.
3632  class VmaRWMutex
3633  {
3634  public:
3635  void LockRead() { m_Mutex.Lock(); }
3636  void UnlockRead() { m_Mutex.Unlock(); }
3637  void LockWrite() { m_Mutex.Lock(); }
3638  void UnlockWrite() { m_Mutex.Unlock(); }
3639  private:
3640  VMA_MUTEX m_Mutex;
3641  };
3642  #define VMA_RW_MUTEX VmaRWMutex
3643  #endif // #if VMA_USE_STL_SHARED_MUTEX
3644 #endif // #ifndef VMA_RW_MUTEX
3645 
3646 /*
3647 If providing your own implementation, you need to implement a subset of std::atomic.
3648 */
3649 #ifndef VMA_ATOMIC_UINT32
3650  #include <atomic>
3651  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3652 #endif
3653 
3654 #ifndef VMA_ATOMIC_UINT64
3655  #include <atomic>
3656  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
3657 #endif
3658 
3659 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3660 
3664  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3665 #endif
3666 
3667 #ifndef VMA_DEBUG_ALIGNMENT
3668 
3672  #define VMA_DEBUG_ALIGNMENT (1)
3673 #endif
3674 
3675 #ifndef VMA_DEBUG_MARGIN
3676 
3680  #define VMA_DEBUG_MARGIN (0)
3681 #endif
3682 
3683 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3684 
3688  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3689 #endif
3690 
3691 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3692 
3697  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3698 #endif
3699 
3700 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3701 
3705  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3706 #endif
3707 
3708 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3709 
3713  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3714 #endif
3715 
3716 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3717  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3719 #endif
3720 
3721 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3722  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3724 #endif
3725 
3726 #ifndef VMA_CLASS_NO_COPY
3727  #define VMA_CLASS_NO_COPY(className) \
3728  private: \
3729  className(const className&) = delete; \
3730  className& operator=(const className&) = delete;
3731 #endif
3732 
3733 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3734 
3735 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3736 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3737 
3738 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3739 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3740 
3741 /*******************************************************************************
3742 END OF CONFIGURATION
3743 */
3744 
3745 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3746 
3747 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3748  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3749 
3750 // Returns number of bits set to 1 in (v).
3751 static inline uint32_t VmaCountBitsSet(uint32_t v)
3752 {
3753  uint32_t c = v - ((v >> 1) & 0x55555555);
3754  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3755  c = ((c >> 4) + c) & 0x0F0F0F0F;
3756  c = ((c >> 8) + c) & 0x00FF00FF;
3757  c = ((c >> 16) + c) & 0x0000FFFF;
3758  return c;
3759 }
3760 
3761 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3762 // Use types like uint32_t, uint64_t as T.
3763 template <typename T>
3764 static inline T VmaAlignUp(T val, T align)
3765 {
3766  return (val + align - 1) / align * align;
3767 }
3768 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3769 // Use types like uint32_t, uint64_t as T.
3770 template <typename T>
3771 static inline T VmaAlignDown(T val, T align)
3772 {
3773  return val / align * align;
3774 }
3775 
3776 // Division with mathematical rounding to nearest number.
3777 template <typename T>
3778 static inline T VmaRoundDiv(T x, T y)
3779 {
3780  return (x + (y / (T)2)) / y;
3781 }
3782 
3783 /*
3784 Returns true if given number is a power of two.
3785 T must be unsigned integer number or signed integer but always nonnegative.
3786 For 0 returns true.
3787 */
3788 template <typename T>
3789 inline bool VmaIsPow2(T x)
3790 {
3791  return (x & (x-1)) == 0;
3792 }
3793 
3794 // Returns smallest power of 2 greater or equal to v.
3795 static inline uint32_t VmaNextPow2(uint32_t v)
3796 {
3797  v--;
3798  v |= v >> 1;
3799  v |= v >> 2;
3800  v |= v >> 4;
3801  v |= v >> 8;
3802  v |= v >> 16;
3803  v++;
3804  return v;
3805 }
3806 static inline uint64_t VmaNextPow2(uint64_t v)
3807 {
3808  v--;
3809  v |= v >> 1;
3810  v |= v >> 2;
3811  v |= v >> 4;
3812  v |= v >> 8;
3813  v |= v >> 16;
3814  v |= v >> 32;
3815  v++;
3816  return v;
3817 }
3818 
3819 // Returns largest power of 2 less or equal to v.
3820 static inline uint32_t VmaPrevPow2(uint32_t v)
3821 {
3822  v |= v >> 1;
3823  v |= v >> 2;
3824  v |= v >> 4;
3825  v |= v >> 8;
3826  v |= v >> 16;
3827  v = v ^ (v >> 1);
3828  return v;
3829 }
3830 static inline uint64_t VmaPrevPow2(uint64_t v)
3831 {
3832  v |= v >> 1;
3833  v |= v >> 2;
3834  v |= v >> 4;
3835  v |= v >> 8;
3836  v |= v >> 16;
3837  v |= v >> 32;
3838  v = v ^ (v >> 1);
3839  return v;
3840 }
3841 
3842 static inline bool VmaStrIsEmpty(const char* pStr)
3843 {
3844  return pStr == VMA_NULL || *pStr == '\0';
3845 }
3846 
3847 #if VMA_STATS_STRING_ENABLED
3848 
3849 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3850 {
3851  switch(algorithm)
3852  {
3854  return "Linear";
3856  return "Buddy";
3857  case 0:
3858  return "Default";
3859  default:
3860  VMA_ASSERT(0);
3861  return "";
3862  }
3863 }
3864 
3865 #endif // #if VMA_STATS_STRING_ENABLED
3866 
3867 #ifndef VMA_SORT
3868 
3869 template<typename Iterator, typename Compare>
3870 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3871 {
3872  Iterator centerValue = end; --centerValue;
3873  Iterator insertIndex = beg;
3874  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3875  {
3876  if(cmp(*memTypeIndex, *centerValue))
3877  {
3878  if(insertIndex != memTypeIndex)
3879  {
3880  VMA_SWAP(*memTypeIndex, *insertIndex);
3881  }
3882  ++insertIndex;
3883  }
3884  }
3885  if(insertIndex != centerValue)
3886  {
3887  VMA_SWAP(*insertIndex, *centerValue);
3888  }
3889  return insertIndex;
3890 }
3891 
3892 template<typename Iterator, typename Compare>
3893 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3894 {
3895  if(beg < end)
3896  {
3897  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3898  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3899  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3900  }
3901 }
3902 
3903 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3904 
3905 #endif // #ifndef VMA_SORT
3906 
3907 /*
3908 Returns true if two memory blocks occupy overlapping pages.
3909 ResourceA must be in less memory offset than ResourceB.
3910 
3911 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3912 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3913 */
3914 static inline bool VmaBlocksOnSamePage(
3915  VkDeviceSize resourceAOffset,
3916  VkDeviceSize resourceASize,
3917  VkDeviceSize resourceBOffset,
3918  VkDeviceSize pageSize)
3919 {
3920  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3921  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3922  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3923  VkDeviceSize resourceBStart = resourceBOffset;
3924  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3925  return resourceAEndPage == resourceBStartPage;
3926 }
3927 
3928 enum VmaSuballocationType
3929 {
3930  VMA_SUBALLOCATION_TYPE_FREE = 0,
3931  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3932  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3933  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3934  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3935  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3936  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3937 };
3938 
3939 /*
3940 Returns true if given suballocation types could conflict and must respect
3941 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3942 or linear image and another one is optimal image. If type is unknown, behave
3943 conservatively.
3944 */
3945 static inline bool VmaIsBufferImageGranularityConflict(
3946  VmaSuballocationType suballocType1,
3947  VmaSuballocationType suballocType2)
3948 {
3949  if(suballocType1 > suballocType2)
3950  {
3951  VMA_SWAP(suballocType1, suballocType2);
3952  }
3953 
3954  switch(suballocType1)
3955  {
3956  case VMA_SUBALLOCATION_TYPE_FREE:
3957  return false;
3958  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3959  return true;
3960  case VMA_SUBALLOCATION_TYPE_BUFFER:
3961  return
3962  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3963  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3964  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3965  return
3966  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3967  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3968  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3969  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3970  return
3971  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3972  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3973  return false;
3974  default:
3975  VMA_ASSERT(0);
3976  return true;
3977  }
3978 }
3979 
3980 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3981 {
3982 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3983  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3984  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3985  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3986  {
3987  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3988  }
3989 #else
3990  // no-op
3991 #endif
3992 }
3993 
3994 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3995 {
3996 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3997  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3998  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3999  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4000  {
4001  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4002  {
4003  return false;
4004  }
4005  }
4006 #endif
4007  return true;
4008 }
4009 
4010 /*
4011 Fills structure with parameters of an example buffer to be used for transfers
4012 during GPU memory defragmentation.
4013 */
4014 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4015 {
4016  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4017  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4018  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4019  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4020 }
4021 
4022 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4023 struct VmaMutexLock
4024 {
4025  VMA_CLASS_NO_COPY(VmaMutexLock)
4026 public:
4027  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4028  m_pMutex(useMutex ? &mutex : VMA_NULL)
4029  { if(m_pMutex) { m_pMutex->Lock(); } }
4030  ~VmaMutexLock()
4031  { if(m_pMutex) { m_pMutex->Unlock(); } }
4032 private:
4033  VMA_MUTEX* m_pMutex;
4034 };
4035 
4036 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4037 struct VmaMutexLockRead
4038 {
4039  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4040 public:
4041  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4042  m_pMutex(useMutex ? &mutex : VMA_NULL)
4043  { if(m_pMutex) { m_pMutex->LockRead(); } }
4044  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4045 private:
4046  VMA_RW_MUTEX* m_pMutex;
4047 };
4048 
4049 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4050 struct VmaMutexLockWrite
4051 {
4052  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4053 public:
4054  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4055  m_pMutex(useMutex ? &mutex : VMA_NULL)
4056  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4057  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4058 private:
4059  VMA_RW_MUTEX* m_pMutex;
4060 };
4061 
4062 #if VMA_DEBUG_GLOBAL_MUTEX
4063  static VMA_MUTEX gDebugGlobalMutex;
4064  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4065 #else
4066  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4067 #endif
4068 
4069 // Minimum size of a free suballocation to register it in the free suballocation collection.
4070 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4071 
4072 /*
4073 Performs binary search and returns iterator to first element that is greater or
4074 equal to (key), according to comparison (cmp).
4075 
4076 Cmp should return true if first argument is less than second argument.
4077 
4078 Returned value is the found element, if present in the collection or place where
4079 new element with value (key) should be inserted.
4080 */
4081 template <typename CmpLess, typename IterT, typename KeyT>
4082 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4083 {
4084  size_t down = 0, up = (end - beg);
4085  while(down < up)
4086  {
4087  const size_t mid = (down + up) / 2;
4088  if(cmp(*(beg+mid), key))
4089  {
4090  down = mid + 1;
4091  }
4092  else
4093  {
4094  up = mid;
4095  }
4096  }
4097  return beg + down;
4098 }
4099 
4100 template<typename CmpLess, typename IterT, typename KeyT>
4101 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4102 {
4103  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4104  beg, end, value, cmp);
4105  if(it == end ||
4106  (!cmp(*it, value) && !cmp(value, *it)))
4107  {
4108  return it;
4109  }
4110  return end;
4111 }
4112 
4113 /*
4114 Returns true if all pointers in the array are not-null and unique.
4115 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4116 T must be pointer type, e.g. VmaAllocation, VmaPool.
4117 */
4118 template<typename T>
4119 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4120 {
4121  for(uint32_t i = 0; i < count; ++i)
4122  {
4123  const T iPtr = arr[i];
4124  if(iPtr == VMA_NULL)
4125  {
4126  return false;
4127  }
4128  for(uint32_t j = i + 1; j < count; ++j)
4129  {
4130  if(iPtr == arr[j])
4131  {
4132  return false;
4133  }
4134  }
4135  }
4136  return true;
4137 }
4138 
4140 // Memory allocation
4141 
4142 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4143 {
4144  if((pAllocationCallbacks != VMA_NULL) &&
4145  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4146  {
4147  return (*pAllocationCallbacks->pfnAllocation)(
4148  pAllocationCallbacks->pUserData,
4149  size,
4150  alignment,
4151  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4152  }
4153  else
4154  {
4155  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4156  }
4157 }
4158 
4159 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4160 {
4161  if((pAllocationCallbacks != VMA_NULL) &&
4162  (pAllocationCallbacks->pfnFree != VMA_NULL))
4163  {
4164  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4165  }
4166  else
4167  {
4168  VMA_SYSTEM_FREE(ptr);
4169  }
4170 }
4171 
4172 template<typename T>
4173 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4174 {
4175  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4176 }
4177 
4178 template<typename T>
4179 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4180 {
4181  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4182 }
4183 
4184 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4185 
4186 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4187 
4188 template<typename T>
4189 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4190 {
4191  ptr->~T();
4192  VmaFree(pAllocationCallbacks, ptr);
4193 }
4194 
4195 template<typename T>
4196 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4197 {
4198  if(ptr != VMA_NULL)
4199  {
4200  for(size_t i = count; i--; )
4201  {
4202  ptr[i].~T();
4203  }
4204  VmaFree(pAllocationCallbacks, ptr);
4205  }
4206 }
4207 
4208 // STL-compatible allocator.
4209 template<typename T>
4210 class VmaStlAllocator
4211 {
4212 public:
4213  const VkAllocationCallbacks* const m_pCallbacks;
4214  typedef T value_type;
4215 
4216  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4217  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4218 
4219  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4220  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4221 
4222  template<typename U>
4223  bool operator==(const VmaStlAllocator<U>& rhs) const
4224  {
4225  return m_pCallbacks == rhs.m_pCallbacks;
4226  }
4227  template<typename U>
4228  bool operator!=(const VmaStlAllocator<U>& rhs) const
4229  {
4230  return m_pCallbacks != rhs.m_pCallbacks;
4231  }
4232 
4233  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4234 };
4235 
4236 #if VMA_USE_STL_VECTOR
4237 
4238 #define VmaVector std::vector
4239 
4240 template<typename T, typename allocatorT>
4241 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4242 {
4243  vec.insert(vec.begin() + index, item);
4244 }
4245 
4246 template<typename T, typename allocatorT>
4247 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4248 {
4249  vec.erase(vec.begin() + index);
4250 }
4251 
4252 #else // #if VMA_USE_STL_VECTOR
4253 
4254 /* Class with interface compatible with subset of std::vector.
4255 T must be POD because constructors and destructors are not called and memcpy is
4256 used for these objects. */
4257 template<typename T, typename AllocatorT>
4258 class VmaVector
4259 {
4260 public:
4261  typedef T value_type;
4262 
4263  VmaVector(const AllocatorT& allocator) :
4264  m_Allocator(allocator),
4265  m_pArray(VMA_NULL),
4266  m_Count(0),
4267  m_Capacity(0)
4268  {
4269  }
4270 
4271  VmaVector(size_t count, const AllocatorT& allocator) :
4272  m_Allocator(allocator),
4273  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4274  m_Count(count),
4275  m_Capacity(count)
4276  {
4277  }
4278 
4279  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4280  // value is unused.
4281  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4282  : VmaVector(count, allocator) {}
4283 
4284  VmaVector(const VmaVector<T, AllocatorT>& src) :
4285  m_Allocator(src.m_Allocator),
4286  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4287  m_Count(src.m_Count),
4288  m_Capacity(src.m_Count)
4289  {
4290  if(m_Count != 0)
4291  {
4292  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4293  }
4294  }
4295 
4296  ~VmaVector()
4297  {
4298  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4299  }
4300 
4301  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4302  {
4303  if(&rhs != this)
4304  {
4305  resize(rhs.m_Count);
4306  if(m_Count != 0)
4307  {
4308  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4309  }
4310  }
4311  return *this;
4312  }
4313 
4314  bool empty() const { return m_Count == 0; }
4315  size_t size() const { return m_Count; }
4316  T* data() { return m_pArray; }
4317  const T* data() const { return m_pArray; }
4318 
4319  T& operator[](size_t index)
4320  {
4321  VMA_HEAVY_ASSERT(index < m_Count);
4322  return m_pArray[index];
4323  }
4324  const T& operator[](size_t index) const
4325  {
4326  VMA_HEAVY_ASSERT(index < m_Count);
4327  return m_pArray[index];
4328  }
4329 
4330  T& front()
4331  {
4332  VMA_HEAVY_ASSERT(m_Count > 0);
4333  return m_pArray[0];
4334  }
4335  const T& front() const
4336  {
4337  VMA_HEAVY_ASSERT(m_Count > 0);
4338  return m_pArray[0];
4339  }
4340  T& back()
4341  {
4342  VMA_HEAVY_ASSERT(m_Count > 0);
4343  return m_pArray[m_Count - 1];
4344  }
4345  const T& back() const
4346  {
4347  VMA_HEAVY_ASSERT(m_Count > 0);
4348  return m_pArray[m_Count - 1];
4349  }
4350 
4351  void reserve(size_t newCapacity, bool freeMemory = false)
4352  {
4353  newCapacity = VMA_MAX(newCapacity, m_Count);
4354 
4355  if((newCapacity < m_Capacity) && !freeMemory)
4356  {
4357  newCapacity = m_Capacity;
4358  }
4359 
4360  if(newCapacity != m_Capacity)
4361  {
4362  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4363  if(m_Count != 0)
4364  {
4365  memcpy(newArray, m_pArray, m_Count * sizeof(T));
4366  }
4367  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4368  m_Capacity = newCapacity;
4369  m_pArray = newArray;
4370  }
4371  }
4372 
4373  void resize(size_t newCount, bool freeMemory = false)
4374  {
4375  size_t newCapacity = m_Capacity;
4376  if(newCount > m_Capacity)
4377  {
4378  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4379  }
4380  else if(freeMemory)
4381  {
4382  newCapacity = newCount;
4383  }
4384 
4385  if(newCapacity != m_Capacity)
4386  {
4387  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4388  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4389  if(elementsToCopy != 0)
4390  {
4391  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4392  }
4393  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4394  m_Capacity = newCapacity;
4395  m_pArray = newArray;
4396  }
4397 
4398  m_Count = newCount;
4399  }
4400 
4401  void clear(bool freeMemory = false)
4402  {
4403  resize(0, freeMemory);
4404  }
4405 
4406  void insert(size_t index, const T& src)
4407  {
4408  VMA_HEAVY_ASSERT(index <= m_Count);
4409  const size_t oldCount = size();
4410  resize(oldCount + 1);
4411  if(index < oldCount)
4412  {
4413  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4414  }
4415  m_pArray[index] = src;
4416  }
4417 
4418  void remove(size_t index)
4419  {
4420  VMA_HEAVY_ASSERT(index < m_Count);
4421  const size_t oldCount = size();
4422  if(index < oldCount - 1)
4423  {
4424  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4425  }
4426  resize(oldCount - 1);
4427  }
4428 
4429  void push_back(const T& src)
4430  {
4431  const size_t newIndex = size();
4432  resize(newIndex + 1);
4433  m_pArray[newIndex] = src;
4434  }
4435 
4436  void pop_back()
4437  {
4438  VMA_HEAVY_ASSERT(m_Count > 0);
4439  resize(size() - 1);
4440  }
4441 
4442  void push_front(const T& src)
4443  {
4444  insert(0, src);
4445  }
4446 
4447  void pop_front()
4448  {
4449  VMA_HEAVY_ASSERT(m_Count > 0);
4450  remove(0);
4451  }
4452 
4453  typedef T* iterator;
4454 
4455  iterator begin() { return m_pArray; }
4456  iterator end() { return m_pArray + m_Count; }
4457 
4458 private:
4459  AllocatorT m_Allocator;
4460  T* m_pArray;
4461  size_t m_Count;
4462  size_t m_Capacity;
4463 };
4464 
4465 template<typename T, typename allocatorT>
4466 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4467 {
4468  vec.insert(index, item);
4469 }
4470 
4471 template<typename T, typename allocatorT>
4472 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4473 {
4474  vec.remove(index);
4475 }
4476 
4477 #endif // #if VMA_USE_STL_VECTOR
4478 
4479 template<typename CmpLess, typename VectorT>
4480 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4481 {
4482  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4483  vector.data(),
4484  vector.data() + vector.size(),
4485  value,
4486  CmpLess()) - vector.data();
4487  VmaVectorInsert(vector, indexToInsert, value);
4488  return indexToInsert;
4489 }
4490 
4491 template<typename CmpLess, typename VectorT>
4492 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4493 {
4494  CmpLess comparator;
4495  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4496  vector.begin(),
4497  vector.end(),
4498  value,
4499  comparator);
4500  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4501  {
4502  size_t indexToRemove = it - vector.begin();
4503  VmaVectorRemove(vector, indexToRemove);
4504  return true;
4505  }
4506  return false;
4507 }
4508 
4510 // class VmaPoolAllocator
4511 
4512 /*
4513 Allocator for objects of type T using a list of arrays (pools) to speed up
4514 allocation. Number of elements that can be allocated is not bounded because
4515 allocator can create multiple blocks.
4516 */
4517 template<typename T>
4518 class VmaPoolAllocator
4519 {
4520  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4521 public:
4522  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4523  ~VmaPoolAllocator();
4524  T* Alloc();
4525  void Free(T* ptr);
4526 
4527 private:
4528  union Item
4529  {
4530  uint32_t NextFreeIndex;
4531  alignas(T) char Value[sizeof(T)];
4532  };
4533 
4534  struct ItemBlock
4535  {
4536  Item* pItems;
4537  uint32_t Capacity;
4538  uint32_t FirstFreeIndex;
4539  };
4540 
4541  const VkAllocationCallbacks* m_pAllocationCallbacks;
4542  const uint32_t m_FirstBlockCapacity;
4543  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4544 
4545  ItemBlock& CreateNewBlock();
4546 };
4547 
4548 template<typename T>
4549 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
4550  m_pAllocationCallbacks(pAllocationCallbacks),
4551  m_FirstBlockCapacity(firstBlockCapacity),
4552  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4553 {
4554  VMA_ASSERT(m_FirstBlockCapacity > 1);
4555 }
4556 
4557 template<typename T>
4558 VmaPoolAllocator<T>::~VmaPoolAllocator()
4559 {
4560  for(size_t i = m_ItemBlocks.size(); i--; )
4561  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4562  m_ItemBlocks.clear();
4563 }
4564 
4565 template<typename T>
4566 T* VmaPoolAllocator<T>::Alloc()
4567 {
4568  for(size_t i = m_ItemBlocks.size(); i--; )
4569  {
4570  ItemBlock& block = m_ItemBlocks[i];
4571  // This block has some free items: Use first one.
4572  if(block.FirstFreeIndex != UINT32_MAX)
4573  {
4574  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4575  block.FirstFreeIndex = pItem->NextFreeIndex;
4576  T* result = (T*)&pItem->Value;
4577  new(result)T(); // Explicit constructor call.
4578  return result;
4579  }
4580  }
4581 
4582  // No block has free item: Create new one and use it.
4583  ItemBlock& newBlock = CreateNewBlock();
4584  Item* const pItem = &newBlock.pItems[0];
4585  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4586  T* result = (T*)&pItem->Value;
4587  new(result)T(); // Explicit constructor call.
4588  return result;
4589 }
4590 
4591 template<typename T>
4592 void VmaPoolAllocator<T>::Free(T* ptr)
4593 {
4594  // Search all memory blocks to find ptr.
4595  for(size_t i = m_ItemBlocks.size(); i--; )
4596  {
4597  ItemBlock& block = m_ItemBlocks[i];
4598 
4599  // Casting to union.
4600  Item* pItemPtr;
4601  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4602 
4603  // Check if pItemPtr is in address range of this block.
4604  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4605  {
4606  ptr->~T(); // Explicit destructor call.
4607  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4608  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4609  block.FirstFreeIndex = index;
4610  return;
4611  }
4612  }
4613  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4614 }
4615 
4616 template<typename T>
4617 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4618 {
4619  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4620  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4621 
4622  const ItemBlock newBlock = {
4623  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4624  newBlockCapacity,
4625  0 };
4626 
4627  m_ItemBlocks.push_back(newBlock);
4628 
4629  // Setup singly-linked list of all free items in this block.
4630  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4631  newBlock.pItems[i].NextFreeIndex = i + 1;
4632  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4633  return m_ItemBlocks.back();
4634 }
4635 
4637 // class VmaRawList, VmaList
4638 
4639 #if VMA_USE_STL_LIST
4640 
4641 #define VmaList std::list
4642 
4643 #else // #if VMA_USE_STL_LIST
4644 
4645 template<typename T>
4646 struct VmaListItem
4647 {
4648  VmaListItem* pPrev;
4649  VmaListItem* pNext;
4650  T Value;
4651 };
4652 
4653 // Doubly linked list.
4654 template<typename T>
4655 class VmaRawList
4656 {
4657  VMA_CLASS_NO_COPY(VmaRawList)
4658 public:
4659  typedef VmaListItem<T> ItemType;
4660 
4661  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4662  ~VmaRawList();
4663  void Clear();
4664 
4665  size_t GetCount() const { return m_Count; }
4666  bool IsEmpty() const { return m_Count == 0; }
4667 
4668  ItemType* Front() { return m_pFront; }
4669  const ItemType* Front() const { return m_pFront; }
4670  ItemType* Back() { return m_pBack; }
4671  const ItemType* Back() const { return m_pBack; }
4672 
4673  ItemType* PushBack();
4674  ItemType* PushFront();
4675  ItemType* PushBack(const T& value);
4676  ItemType* PushFront(const T& value);
4677  void PopBack();
4678  void PopFront();
4679 
4680  // Item can be null - it means PushBack.
4681  ItemType* InsertBefore(ItemType* pItem);
4682  // Item can be null - it means PushFront.
4683  ItemType* InsertAfter(ItemType* pItem);
4684 
4685  ItemType* InsertBefore(ItemType* pItem, const T& value);
4686  ItemType* InsertAfter(ItemType* pItem, const T& value);
4687 
4688  void Remove(ItemType* pItem);
4689 
4690 private:
4691  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4692  VmaPoolAllocator<ItemType> m_ItemAllocator;
4693  ItemType* m_pFront;
4694  ItemType* m_pBack;
4695  size_t m_Count;
4696 };
4697 
4698 template<typename T>
4699 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4700  m_pAllocationCallbacks(pAllocationCallbacks),
4701  m_ItemAllocator(pAllocationCallbacks, 128),
4702  m_pFront(VMA_NULL),
4703  m_pBack(VMA_NULL),
4704  m_Count(0)
4705 {
4706 }
4707 
4708 template<typename T>
4709 VmaRawList<T>::~VmaRawList()
4710 {
4711  // Intentionally not calling Clear, because that would be unnecessary
4712  // computations to return all items to m_ItemAllocator as free.
4713 }
4714 
4715 template<typename T>
4716 void VmaRawList<T>::Clear()
4717 {
4718  if(IsEmpty() == false)
4719  {
4720  ItemType* pItem = m_pBack;
4721  while(pItem != VMA_NULL)
4722  {
4723  ItemType* const pPrevItem = pItem->pPrev;
4724  m_ItemAllocator.Free(pItem);
4725  pItem = pPrevItem;
4726  }
4727  m_pFront = VMA_NULL;
4728  m_pBack = VMA_NULL;
4729  m_Count = 0;
4730  }
4731 }
4732 
4733 template<typename T>
4734 VmaListItem<T>* VmaRawList<T>::PushBack()
4735 {
4736  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4737  pNewItem->pNext = VMA_NULL;
4738  if(IsEmpty())
4739  {
4740  pNewItem->pPrev = VMA_NULL;
4741  m_pFront = pNewItem;
4742  m_pBack = pNewItem;
4743  m_Count = 1;
4744  }
4745  else
4746  {
4747  pNewItem->pPrev = m_pBack;
4748  m_pBack->pNext = pNewItem;
4749  m_pBack = pNewItem;
4750  ++m_Count;
4751  }
4752  return pNewItem;
4753 }
4754 
4755 template<typename T>
4756 VmaListItem<T>* VmaRawList<T>::PushFront()
4757 {
4758  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4759  pNewItem->pPrev = VMA_NULL;
4760  if(IsEmpty())
4761  {
4762  pNewItem->pNext = VMA_NULL;
4763  m_pFront = pNewItem;
4764  m_pBack = pNewItem;
4765  m_Count = 1;
4766  }
4767  else
4768  {
4769  pNewItem->pNext = m_pFront;
4770  m_pFront->pPrev = pNewItem;
4771  m_pFront = pNewItem;
4772  ++m_Count;
4773  }
4774  return pNewItem;
4775 }
4776 
4777 template<typename T>
4778 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4779 {
4780  ItemType* const pNewItem = PushBack();
4781  pNewItem->Value = value;
4782  return pNewItem;
4783 }
4784 
4785 template<typename T>
4786 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4787 {
4788  ItemType* const pNewItem = PushFront();
4789  pNewItem->Value = value;
4790  return pNewItem;
4791 }
4792 
4793 template<typename T>
4794 void VmaRawList<T>::PopBack()
4795 {
4796  VMA_HEAVY_ASSERT(m_Count > 0);
4797  ItemType* const pBackItem = m_pBack;
4798  ItemType* const pPrevItem = pBackItem->pPrev;
4799  if(pPrevItem != VMA_NULL)
4800  {
4801  pPrevItem->pNext = VMA_NULL;
4802  }
4803  m_pBack = pPrevItem;
4804  m_ItemAllocator.Free(pBackItem);
4805  --m_Count;
4806 }
4807 
4808 template<typename T>
4809 void VmaRawList<T>::PopFront()
4810 {
4811  VMA_HEAVY_ASSERT(m_Count > 0);
4812  ItemType* const pFrontItem = m_pFront;
4813  ItemType* const pNextItem = pFrontItem->pNext;
4814  if(pNextItem != VMA_NULL)
4815  {
4816  pNextItem->pPrev = VMA_NULL;
4817  }
4818  m_pFront = pNextItem;
4819  m_ItemAllocator.Free(pFrontItem);
4820  --m_Count;
4821 }
4822 
4823 template<typename T>
4824 void VmaRawList<T>::Remove(ItemType* pItem)
4825 {
4826  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4827  VMA_HEAVY_ASSERT(m_Count > 0);
4828 
4829  if(pItem->pPrev != VMA_NULL)
4830  {
4831  pItem->pPrev->pNext = pItem->pNext;
4832  }
4833  else
4834  {
4835  VMA_HEAVY_ASSERT(m_pFront == pItem);
4836  m_pFront = pItem->pNext;
4837  }
4838 
4839  if(pItem->pNext != VMA_NULL)
4840  {
4841  pItem->pNext->pPrev = pItem->pPrev;
4842  }
4843  else
4844  {
4845  VMA_HEAVY_ASSERT(m_pBack == pItem);
4846  m_pBack = pItem->pPrev;
4847  }
4848 
4849  m_ItemAllocator.Free(pItem);
4850  --m_Count;
4851 }
4852 
4853 template<typename T>
4854 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4855 {
4856  if(pItem != VMA_NULL)
4857  {
4858  ItemType* const prevItem = pItem->pPrev;
4859  ItemType* const newItem = m_ItemAllocator.Alloc();
4860  newItem->pPrev = prevItem;
4861  newItem->pNext = pItem;
4862  pItem->pPrev = newItem;
4863  if(prevItem != VMA_NULL)
4864  {
4865  prevItem->pNext = newItem;
4866  }
4867  else
4868  {
4869  VMA_HEAVY_ASSERT(m_pFront == pItem);
4870  m_pFront = newItem;
4871  }
4872  ++m_Count;
4873  return newItem;
4874  }
4875  else
4876  return PushBack();
4877 }
4878 
4879 template<typename T>
4880 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4881 {
4882  if(pItem != VMA_NULL)
4883  {
4884  ItemType* const nextItem = pItem->pNext;
4885  ItemType* const newItem = m_ItemAllocator.Alloc();
4886  newItem->pNext = nextItem;
4887  newItem->pPrev = pItem;
4888  pItem->pNext = newItem;
4889  if(nextItem != VMA_NULL)
4890  {
4891  nextItem->pPrev = newItem;
4892  }
4893  else
4894  {
4895  VMA_HEAVY_ASSERT(m_pBack == pItem);
4896  m_pBack = newItem;
4897  }
4898  ++m_Count;
4899  return newItem;
4900  }
4901  else
4902  return PushFront();
4903 }
4904 
4905 template<typename T>
4906 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4907 {
4908  ItemType* const newItem = InsertBefore(pItem);
4909  newItem->Value = value;
4910  return newItem;
4911 }
4912 
4913 template<typename T>
4914 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4915 {
4916  ItemType* const newItem = InsertAfter(pItem);
4917  newItem->Value = value;
4918  return newItem;
4919 }
4920 
4921 template<typename T, typename AllocatorT>
4922 class VmaList
4923 {
4924  VMA_CLASS_NO_COPY(VmaList)
4925 public:
4926  class iterator
4927  {
4928  public:
4929  iterator() :
4930  m_pList(VMA_NULL),
4931  m_pItem(VMA_NULL)
4932  {
4933  }
4934 
4935  T& operator*() const
4936  {
4937  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4938  return m_pItem->Value;
4939  }
4940  T* operator->() const
4941  {
4942  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4943  return &m_pItem->Value;
4944  }
4945 
4946  iterator& operator++()
4947  {
4948  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4949  m_pItem = m_pItem->pNext;
4950  return *this;
4951  }
4952  iterator& operator--()
4953  {
4954  if(m_pItem != VMA_NULL)
4955  {
4956  m_pItem = m_pItem->pPrev;
4957  }
4958  else
4959  {
4960  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4961  m_pItem = m_pList->Back();
4962  }
4963  return *this;
4964  }
4965 
4966  iterator operator++(int)
4967  {
4968  iterator result = *this;
4969  ++*this;
4970  return result;
4971  }
4972  iterator operator--(int)
4973  {
4974  iterator result = *this;
4975  --*this;
4976  return result;
4977  }
4978 
4979  bool operator==(const iterator& rhs) const
4980  {
4981  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4982  return m_pItem == rhs.m_pItem;
4983  }
4984  bool operator!=(const iterator& rhs) const
4985  {
4986  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4987  return m_pItem != rhs.m_pItem;
4988  }
4989 
4990  private:
4991  VmaRawList<T>* m_pList;
4992  VmaListItem<T>* m_pItem;
4993 
4994  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4995  m_pList(pList),
4996  m_pItem(pItem)
4997  {
4998  }
4999 
5000  friend class VmaList<T, AllocatorT>;
5001  };
5002 
5003  class const_iterator
5004  {
5005  public:
5006  const_iterator() :
5007  m_pList(VMA_NULL),
5008  m_pItem(VMA_NULL)
5009  {
5010  }
5011 
5012  const_iterator(const iterator& src) :
5013  m_pList(src.m_pList),
5014  m_pItem(src.m_pItem)
5015  {
5016  }
5017 
5018  const T& operator*() const
5019  {
5020  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5021  return m_pItem->Value;
5022  }
5023  const T* operator->() const
5024  {
5025  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5026  return &m_pItem->Value;
5027  }
5028 
5029  const_iterator& operator++()
5030  {
5031  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5032  m_pItem = m_pItem->pNext;
5033  return *this;
5034  }
5035  const_iterator& operator--()
5036  {
5037  if(m_pItem != VMA_NULL)
5038  {
5039  m_pItem = m_pItem->pPrev;
5040  }
5041  else
5042  {
5043  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5044  m_pItem = m_pList->Back();
5045  }
5046  return *this;
5047  }
5048 
5049  const_iterator operator++(int)
5050  {
5051  const_iterator result = *this;
5052  ++*this;
5053  return result;
5054  }
5055  const_iterator operator--(int)
5056  {
5057  const_iterator result = *this;
5058  --*this;
5059  return result;
5060  }
5061 
5062  bool operator==(const const_iterator& rhs) const
5063  {
5064  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5065  return m_pItem == rhs.m_pItem;
5066  }
5067  bool operator!=(const const_iterator& rhs) const
5068  {
5069  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5070  return m_pItem != rhs.m_pItem;
5071  }
5072 
5073  private:
5074  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5075  m_pList(pList),
5076  m_pItem(pItem)
5077  {
5078  }
5079 
5080  const VmaRawList<T>* m_pList;
5081  const VmaListItem<T>* m_pItem;
5082 
5083  friend class VmaList<T, AllocatorT>;
5084  };
5085 
5086  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5087 
5088  bool empty() const { return m_RawList.IsEmpty(); }
5089  size_t size() const { return m_RawList.GetCount(); }
5090 
5091  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5092  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5093 
5094  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5095  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5096 
5097  void clear() { m_RawList.Clear(); }
5098  void push_back(const T& value) { m_RawList.PushBack(value); }
5099  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5100  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5101 
5102 private:
5103  VmaRawList<T> m_RawList;
5104 };
5105 
5106 #endif // #if VMA_USE_STL_LIST
5107 
5109 // class VmaMap
5110 
5111 // Unused in this version.
5112 #if 0
5113 
5114 #if VMA_USE_STL_UNORDERED_MAP
5115 
5116 #define VmaPair std::pair
5117 
5118 #define VMA_MAP_TYPE(KeyT, ValueT) \
5119  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5120 
5121 #else // #if VMA_USE_STL_UNORDERED_MAP
5122 
5123 template<typename T1, typename T2>
5124 struct VmaPair
5125 {
5126  T1 first;
5127  T2 second;
5128 
5129  VmaPair() : first(), second() { }
5130  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5131 };
5132 
5133 /* Class compatible with subset of interface of std::unordered_map.
5134 KeyT, ValueT must be POD because they will be stored in VmaVector.
5135 */
5136 template<typename KeyT, typename ValueT>
5137 class VmaMap
5138 {
5139 public:
5140  typedef VmaPair<KeyT, ValueT> PairType;
5141  typedef PairType* iterator;
5142 
5143  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5144 
5145  iterator begin() { return m_Vector.begin(); }
5146  iterator end() { return m_Vector.end(); }
5147 
5148  void insert(const PairType& pair);
5149  iterator find(const KeyT& key);
5150  void erase(iterator it);
5151 
5152 private:
5153  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5154 };
5155 
5156 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5157 
5158 template<typename FirstT, typename SecondT>
5159 struct VmaPairFirstLess
5160 {
5161  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5162  {
5163  return lhs.first < rhs.first;
5164  }
5165  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5166  {
5167  return lhs.first < rhsFirst;
5168  }
5169 };
5170 
5171 template<typename KeyT, typename ValueT>
5172 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5173 {
5174  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5175  m_Vector.data(),
5176  m_Vector.data() + m_Vector.size(),
5177  pair,
5178  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5179  VmaVectorInsert(m_Vector, indexToInsert, pair);
5180 }
5181 
5182 template<typename KeyT, typename ValueT>
5183 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5184 {
5185  PairType* it = VmaBinaryFindFirstNotLess(
5186  m_Vector.data(),
5187  m_Vector.data() + m_Vector.size(),
5188  key,
5189  VmaPairFirstLess<KeyT, ValueT>());
5190  if((it != m_Vector.end()) && (it->first == key))
5191  {
5192  return it;
5193  }
5194  else
5195  {
5196  return m_Vector.end();
5197  }
5198 }
5199 
5200 template<typename KeyT, typename ValueT>
5201 void VmaMap<KeyT, ValueT>::erase(iterator it)
5202 {
5203  VmaVectorRemove(m_Vector, it - m_Vector.begin());
5204 }
5205 
5206 #endif // #if VMA_USE_STL_UNORDERED_MAP
5207 
5208 #endif // #if 0
5209 
5211 
5212 class VmaDeviceMemoryBlock;
5213 
5214 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
5215 
5216 struct VmaAllocation_T
5217 {
5218 private:
5219  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
5220 
5221  enum FLAGS
5222  {
5223  FLAG_USER_DATA_STRING = 0x01,
5224  };
5225 
5226 public:
5227  enum ALLOCATION_TYPE
5228  {
5229  ALLOCATION_TYPE_NONE,
5230  ALLOCATION_TYPE_BLOCK,
5231  ALLOCATION_TYPE_DEDICATED,
5232  };
5233 
5234  /*
5235  This struct is allocated using VmaPoolAllocator.
5236  */
5237 
5238  void Ctor(uint32_t currentFrameIndex, bool userDataString)
5239  {
5240  m_Alignment = 1;
5241  m_Size = 0;
5242  m_pUserData = VMA_NULL;
5243  m_LastUseFrameIndex = currentFrameIndex;
5244  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
5245  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
5246  m_MapCount = 0;
5247  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
5248 
5249 #if VMA_STATS_STRING_ENABLED
5250  m_CreationFrameIndex = currentFrameIndex;
5251  m_BufferImageUsage = 0;
5252 #endif
5253  }
5254 
5255  void Dtor()
5256  {
5257  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
5258 
5259  // Check if owned string was freed.
5260  VMA_ASSERT(m_pUserData == VMA_NULL);
5261  }
5262 
5263  void InitBlockAllocation(
5264  VmaDeviceMemoryBlock* block,
5265  VkDeviceSize offset,
5266  VkDeviceSize alignment,
5267  VkDeviceSize size,
5268  VmaSuballocationType suballocationType,
5269  bool mapped,
5270  bool canBecomeLost)
5271  {
5272  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5273  VMA_ASSERT(block != VMA_NULL);
5274  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5275  m_Alignment = alignment;
5276  m_Size = size;
5277  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5278  m_SuballocationType = (uint8_t)suballocationType;
5279  m_BlockAllocation.m_Block = block;
5280  m_BlockAllocation.m_Offset = offset;
5281  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
5282  }
5283 
5284  void InitLost()
5285  {
5286  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5287  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
5288  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
5289  m_BlockAllocation.m_Block = VMA_NULL;
5290  m_BlockAllocation.m_Offset = 0;
5291  m_BlockAllocation.m_CanBecomeLost = true;
5292  }
5293 
5294  void ChangeBlockAllocation(
5295  VmaAllocator hAllocator,
5296  VmaDeviceMemoryBlock* block,
5297  VkDeviceSize offset);
5298 
5299  void ChangeOffset(VkDeviceSize newOffset);
5300 
5301  // pMappedData not null means allocation is created with MAPPED flag.
5302  void InitDedicatedAllocation(
5303  uint32_t memoryTypeIndex,
5304  VkDeviceMemory hMemory,
5305  VmaSuballocationType suballocationType,
5306  void* pMappedData,
5307  VkDeviceSize size)
5308  {
5309  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
5310  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
5311  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
5312  m_Alignment = 0;
5313  m_Size = size;
5314  m_SuballocationType = (uint8_t)suballocationType;
5315  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
5316  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
5317  m_DedicatedAllocation.m_hMemory = hMemory;
5318  m_DedicatedAllocation.m_pMappedData = pMappedData;
5319  }
5320 
5321  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5322  VkDeviceSize GetAlignment() const { return m_Alignment; }
5323  VkDeviceSize GetSize() const { return m_Size; }
5324  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
5325  void* GetUserData() const { return m_pUserData; }
5326  void SetUserData(VmaAllocator hAllocator, void* pUserData);
5327  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
5328 
5329  VmaDeviceMemoryBlock* GetBlock() const
5330  {
5331  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5332  return m_BlockAllocation.m_Block;
5333  }
5334  VkDeviceSize GetOffset() const;
5335  VkDeviceMemory GetMemory() const;
5336  uint32_t GetMemoryTypeIndex() const;
5337  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
5338  void* GetMappedData() const;
5339  bool CanBecomeLost() const;
5340 
5341  uint32_t GetLastUseFrameIndex() const
5342  {
5343  return m_LastUseFrameIndex.load();
5344  }
5345  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
5346  {
5347  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
5348  }
5349  /*
5350  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
5351  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
5352  - Else, returns false.
5353 
5354  If hAllocation is already lost, assert - you should not call it then.
5355  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
5356  */
5357  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5358 
5359  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
5360  {
5361  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
5362  outInfo.blockCount = 1;
5363  outInfo.allocationCount = 1;
5364  outInfo.unusedRangeCount = 0;
5365  outInfo.usedBytes = m_Size;
5366  outInfo.unusedBytes = 0;
5367  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
5368  outInfo.unusedRangeSizeMin = UINT64_MAX;
5369  outInfo.unusedRangeSizeMax = 0;
5370  }
5371 
5372  void BlockAllocMap();
5373  void BlockAllocUnmap();
5374  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
5375  void DedicatedAllocUnmap(VmaAllocator hAllocator);
5376 
5377 #if VMA_STATS_STRING_ENABLED
5378  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
5379  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
5380 
5381  void InitBufferImageUsage(uint32_t bufferImageUsage)
5382  {
5383  VMA_ASSERT(m_BufferImageUsage == 0);
5384  m_BufferImageUsage = bufferImageUsage;
5385  }
5386 
5387  void PrintParameters(class VmaJsonWriter& json) const;
5388 #endif
5389 
5390 private:
5391  VkDeviceSize m_Alignment;
5392  VkDeviceSize m_Size;
5393  void* m_pUserData;
5394  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5395  uint8_t m_Type; // ALLOCATION_TYPE
5396  uint8_t m_SuballocationType; // VmaSuballocationType
5397  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5398  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5399  uint8_t m_MapCount;
5400  uint8_t m_Flags; // enum FLAGS
5401 
5402  // Allocation out of VmaDeviceMemoryBlock.
5403  struct BlockAllocation
5404  {
5405  VmaDeviceMemoryBlock* m_Block;
5406  VkDeviceSize m_Offset;
5407  bool m_CanBecomeLost;
5408  };
5409 
5410  // Allocation for an object that has its own private VkDeviceMemory.
5411  struct DedicatedAllocation
5412  {
5413  uint32_t m_MemoryTypeIndex;
5414  VkDeviceMemory m_hMemory;
5415  void* m_pMappedData; // Not null means memory is mapped.
5416  };
5417 
5418  union
5419  {
5420  // Allocation out of VmaDeviceMemoryBlock.
5421  BlockAllocation m_BlockAllocation;
5422  // Allocation for an object that has its own private VkDeviceMemory.
5423  DedicatedAllocation m_DedicatedAllocation;
5424  };
5425 
5426 #if VMA_STATS_STRING_ENABLED
5427  uint32_t m_CreationFrameIndex;
5428  uint32_t m_BufferImageUsage; // 0 if unknown.
5429 #endif
5430 
5431  void FreeUserDataString(VmaAllocator hAllocator);
5432 };
5433 
5434 /*
5435 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5436 allocated memory block or free.
5437 */
5438 struct VmaSuballocation
5439 {
5440  VkDeviceSize offset;
5441  VkDeviceSize size;
5442  VmaAllocation hAllocation;
5443  VmaSuballocationType type;
5444 };
5445 
5446 // Comparator for offsets.
5447 struct VmaSuballocationOffsetLess
5448 {
5449  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5450  {
5451  return lhs.offset < rhs.offset;
5452  }
5453 };
5454 struct VmaSuballocationOffsetGreater
5455 {
5456  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5457  {
5458  return lhs.offset > rhs.offset;
5459  }
5460 };
5461 
5462 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5463 
5464 // Cost of one additional allocation lost, as equivalent in bytes.
5465 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5466 
5467 enum class VmaAllocationRequestType
5468 {
5469  Normal,
5470  // Used by "Linear" algorithm.
5471  UpperAddress,
5472  EndOf1st,
5473  EndOf2nd,
5474 };
5475 
5476 /*
5477 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5478 
5479 If canMakeOtherLost was false:
5480 - item points to a FREE suballocation.
5481 - itemsToMakeLostCount is 0.
5482 
5483 If canMakeOtherLost was true:
5484 - item points to first of sequence of suballocations, which are either FREE,
5485  or point to VmaAllocations that can become lost.
5486 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5487  the requested allocation to succeed.
5488 */
5489 struct VmaAllocationRequest
5490 {
5491  VkDeviceSize offset;
5492  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5493  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5494  VmaSuballocationList::iterator item;
5495  size_t itemsToMakeLostCount;
5496  void* customData;
5497  VmaAllocationRequestType type;
5498 
5499  VkDeviceSize CalcCost() const
5500  {
5501  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5502  }
5503 };
5504 
5505 /*
5506 Data structure used for bookkeeping of allocations and unused ranges of memory
5507 in a single VkDeviceMemory block.
5508 */
5509 class VmaBlockMetadata
5510 {
5511 public:
5512  VmaBlockMetadata(VmaAllocator hAllocator);
5513  virtual ~VmaBlockMetadata() { }
5514  virtual void Init(VkDeviceSize size) { m_Size = size; }
5515 
5516  // Validates all data structures inside this object. If not valid, returns false.
5517  virtual bool Validate() const = 0;
5518  VkDeviceSize GetSize() const { return m_Size; }
5519  virtual size_t GetAllocationCount() const = 0;
5520  virtual VkDeviceSize GetSumFreeSize() const = 0;
5521  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5522  // Returns true if this block is empty - contains only single free suballocation.
5523  virtual bool IsEmpty() const = 0;
5524 
5525  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5526  // Shouldn't modify blockCount.
5527  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5528 
5529 #if VMA_STATS_STRING_ENABLED
5530  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5531 #endif
5532 
5533  // Tries to find a place for suballocation with given parameters inside this block.
5534  // If succeeded, fills pAllocationRequest and returns true.
5535  // If failed, returns false.
5536  virtual bool CreateAllocationRequest(
5537  uint32_t currentFrameIndex,
5538  uint32_t frameInUseCount,
5539  VkDeviceSize bufferImageGranularity,
5540  VkDeviceSize allocSize,
5541  VkDeviceSize allocAlignment,
5542  bool upperAddress,
5543  VmaSuballocationType allocType,
5544  bool canMakeOtherLost,
5545  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5546  uint32_t strategy,
5547  VmaAllocationRequest* pAllocationRequest) = 0;
5548 
5549  virtual bool MakeRequestedAllocationsLost(
5550  uint32_t currentFrameIndex,
5551  uint32_t frameInUseCount,
5552  VmaAllocationRequest* pAllocationRequest) = 0;
5553 
5554  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5555 
5556  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5557 
5558  // Makes actual allocation based on request. Request must already be checked and valid.
5559  virtual void Alloc(
5560  const VmaAllocationRequest& request,
5561  VmaSuballocationType type,
5562  VkDeviceSize allocSize,
5563  VmaAllocation hAllocation) = 0;
5564 
5565  // Frees suballocation assigned to given memory region.
5566  virtual void Free(const VmaAllocation allocation) = 0;
5567  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5568 
5569 protected:
5570  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5571 
5572 #if VMA_STATS_STRING_ENABLED
5573  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5574  VkDeviceSize unusedBytes,
5575  size_t allocationCount,
5576  size_t unusedRangeCount) const;
5577  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5578  VkDeviceSize offset,
5579  VmaAllocation hAllocation) const;
5580  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5581  VkDeviceSize offset,
5582  VkDeviceSize size) const;
5583  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5584 #endif
5585 
5586 private:
5587  VkDeviceSize m_Size;
5588  const VkAllocationCallbacks* m_pAllocationCallbacks;
5589 };
5590 
5591 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5592  VMA_ASSERT(0 && "Validation failed: " #cond); \
5593  return false; \
5594  } } while(false)
5595 
5596 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5597 {
5598  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5599 public:
5600  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5601  virtual ~VmaBlockMetadata_Generic();
5602  virtual void Init(VkDeviceSize size);
5603 
5604  virtual bool Validate() const;
5605  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5606  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5607  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5608  virtual bool IsEmpty() const;
5609 
5610  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5611  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5612 
5613 #if VMA_STATS_STRING_ENABLED
5614  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5615 #endif
5616 
5617  virtual bool CreateAllocationRequest(
5618  uint32_t currentFrameIndex,
5619  uint32_t frameInUseCount,
5620  VkDeviceSize bufferImageGranularity,
5621  VkDeviceSize allocSize,
5622  VkDeviceSize allocAlignment,
5623  bool upperAddress,
5624  VmaSuballocationType allocType,
5625  bool canMakeOtherLost,
5626  uint32_t strategy,
5627  VmaAllocationRequest* pAllocationRequest);
5628 
5629  virtual bool MakeRequestedAllocationsLost(
5630  uint32_t currentFrameIndex,
5631  uint32_t frameInUseCount,
5632  VmaAllocationRequest* pAllocationRequest);
5633 
5634  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5635 
5636  virtual VkResult CheckCorruption(const void* pBlockData);
5637 
5638  virtual void Alloc(
5639  const VmaAllocationRequest& request,
5640  VmaSuballocationType type,
5641  VkDeviceSize allocSize,
5642  VmaAllocation hAllocation);
5643 
5644  virtual void Free(const VmaAllocation allocation);
5645  virtual void FreeAtOffset(VkDeviceSize offset);
5646 
5648  // For defragmentation
5649 
5650  bool IsBufferImageGranularityConflictPossible(
5651  VkDeviceSize bufferImageGranularity,
5652  VmaSuballocationType& inOutPrevSuballocType) const;
5653 
5654 private:
5655  friend class VmaDefragmentationAlgorithm_Generic;
5656  friend class VmaDefragmentationAlgorithm_Fast;
5657 
5658  uint32_t m_FreeCount;
5659  VkDeviceSize m_SumFreeSize;
5660  VmaSuballocationList m_Suballocations;
5661  // Suballocations that are free and have size greater than certain threshold.
5662  // Sorted by size, ascending.
5663  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5664 
5665  bool ValidateFreeSuballocationList() const;
5666 
5667  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5668  // If yes, fills pOffset and returns true. If no, returns false.
5669  bool CheckAllocation(
5670  uint32_t currentFrameIndex,
5671  uint32_t frameInUseCount,
5672  VkDeviceSize bufferImageGranularity,
5673  VkDeviceSize allocSize,
5674  VkDeviceSize allocAlignment,
5675  VmaSuballocationType allocType,
5676  VmaSuballocationList::const_iterator suballocItem,
5677  bool canMakeOtherLost,
5678  VkDeviceSize* pOffset,
5679  size_t* itemsToMakeLostCount,
5680  VkDeviceSize* pSumFreeSize,
5681  VkDeviceSize* pSumItemSize) const;
5682  // Given free suballocation, it merges it with following one, which must also be free.
5683  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5684  // Releases given suballocation, making it free.
5685  // Merges it with adjacent free suballocations if applicable.
5686  // Returns iterator to new free suballocation at this place.
5687  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5688  // Given free suballocation, it inserts it into sorted list of
5689  // m_FreeSuballocationsBySize if it's suitable.
5690  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5691  // Given free suballocation, it removes it from sorted list of
5692  // m_FreeSuballocationsBySize if it's suitable.
5693  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5694 };
5695 
5696 /*
5697 Allocations and their references in internal data structure look like this:
5698 
5699 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5700 
5701  0 +-------+
5702  | |
5703  | |
5704  | |
5705  +-------+
5706  | Alloc | 1st[m_1stNullItemsBeginCount]
5707  +-------+
5708  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5709  +-------+
5710  | ... |
5711  +-------+
5712  | Alloc | 1st[1st.size() - 1]
5713  +-------+
5714  | |
5715  | |
5716  | |
5717 GetSize() +-------+
5718 
5719 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5720 
5721  0 +-------+
5722  | Alloc | 2nd[0]
5723  +-------+
5724  | Alloc | 2nd[1]
5725  +-------+
5726  | ... |
5727  +-------+
5728  | Alloc | 2nd[2nd.size() - 1]
5729  +-------+
5730  | |
5731  | |
5732  | |
5733  +-------+
5734  | Alloc | 1st[m_1stNullItemsBeginCount]
5735  +-------+
5736  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5737  +-------+
5738  | ... |
5739  +-------+
5740  | Alloc | 1st[1st.size() - 1]
5741  +-------+
5742  | |
5743 GetSize() +-------+
5744 
5745 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5746 
5747  0 +-------+
5748  | |
5749  | |
5750  | |
5751  +-------+
5752  | Alloc | 1st[m_1stNullItemsBeginCount]
5753  +-------+
5754  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5755  +-------+
5756  | ... |
5757  +-------+
5758  | Alloc | 1st[1st.size() - 1]
5759  +-------+
5760  | |
5761  | |
5762  | |
5763  +-------+
5764  | Alloc | 2nd[2nd.size() - 1]
5765  +-------+
5766  | ... |
5767  +-------+
5768  | Alloc | 2nd[1]
5769  +-------+
5770  | Alloc | 2nd[0]
5771 GetSize() +-------+
5772 
5773 */
5774 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5775 {
5776  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5777 public:
5778  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5779  virtual ~VmaBlockMetadata_Linear();
5780  virtual void Init(VkDeviceSize size);
5781 
5782  virtual bool Validate() const;
5783  virtual size_t GetAllocationCount() const;
5784  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5785  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5786  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5787 
5788  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5789  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5790 
5791 #if VMA_STATS_STRING_ENABLED
5792  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5793 #endif
5794 
5795  virtual bool CreateAllocationRequest(
5796  uint32_t currentFrameIndex,
5797  uint32_t frameInUseCount,
5798  VkDeviceSize bufferImageGranularity,
5799  VkDeviceSize allocSize,
5800  VkDeviceSize allocAlignment,
5801  bool upperAddress,
5802  VmaSuballocationType allocType,
5803  bool canMakeOtherLost,
5804  uint32_t strategy,
5805  VmaAllocationRequest* pAllocationRequest);
5806 
5807  virtual bool MakeRequestedAllocationsLost(
5808  uint32_t currentFrameIndex,
5809  uint32_t frameInUseCount,
5810  VmaAllocationRequest* pAllocationRequest);
5811 
5812  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5813 
5814  virtual VkResult CheckCorruption(const void* pBlockData);
5815 
5816  virtual void Alloc(
5817  const VmaAllocationRequest& request,
5818  VmaSuballocationType type,
5819  VkDeviceSize allocSize,
5820  VmaAllocation hAllocation);
5821 
5822  virtual void Free(const VmaAllocation allocation);
5823  virtual void FreeAtOffset(VkDeviceSize offset);
5824 
5825 private:
5826  /*
5827  There are two suballocation vectors, used in ping-pong way.
5828  The one with index m_1stVectorIndex is called 1st.
5829  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5830  2nd can be non-empty only when 1st is not empty.
5831  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5832  */
5833  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5834 
5835  enum SECOND_VECTOR_MODE
5836  {
5837  SECOND_VECTOR_EMPTY,
5838  /*
5839  Suballocations in 2nd vector are created later than the ones in 1st, but they
5840  all have smaller offset.
5841  */
5842  SECOND_VECTOR_RING_BUFFER,
5843  /*
5844  Suballocations in 2nd vector are upper side of double stack.
5845  They all have offsets higher than those in 1st vector.
5846  Top of this stack means smaller offsets, but higher indices in this vector.
5847  */
5848  SECOND_VECTOR_DOUBLE_STACK,
5849  };
5850 
5851  VkDeviceSize m_SumFreeSize;
5852  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5853  uint32_t m_1stVectorIndex;
5854  SECOND_VECTOR_MODE m_2ndVectorMode;
5855 
5856  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5857  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5858  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5859  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5860 
5861  // Number of items in 1st vector with hAllocation = null at the beginning.
5862  size_t m_1stNullItemsBeginCount;
5863  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5864  size_t m_1stNullItemsMiddleCount;
5865  // Number of items in 2nd vector with hAllocation = null.
5866  size_t m_2ndNullItemsCount;
5867 
5868  bool ShouldCompact1st() const;
5869  void CleanupAfterFree();
5870 
5871  bool CreateAllocationRequest_LowerAddress(
5872  uint32_t currentFrameIndex,
5873  uint32_t frameInUseCount,
5874  VkDeviceSize bufferImageGranularity,
5875  VkDeviceSize allocSize,
5876  VkDeviceSize allocAlignment,
5877  VmaSuballocationType allocType,
5878  bool canMakeOtherLost,
5879  uint32_t strategy,
5880  VmaAllocationRequest* pAllocationRequest);
5881  bool CreateAllocationRequest_UpperAddress(
5882  uint32_t currentFrameIndex,
5883  uint32_t frameInUseCount,
5884  VkDeviceSize bufferImageGranularity,
5885  VkDeviceSize allocSize,
5886  VkDeviceSize allocAlignment,
5887  VmaSuballocationType allocType,
5888  bool canMakeOtherLost,
5889  uint32_t strategy,
5890  VmaAllocationRequest* pAllocationRequest);
5891 };
5892 
5893 /*
5894 - GetSize() is the original size of allocated memory block.
5895 - m_UsableSize is this size aligned down to a power of two.
5896  All allocations and calculations happen relative to m_UsableSize.
5897 - GetUnusableSize() is the difference between them.
5898  It is repoted as separate, unused range, not available for allocations.
5899 
5900 Node at level 0 has size = m_UsableSize.
5901 Each next level contains nodes with size 2 times smaller than current level.
5902 m_LevelCount is the maximum number of levels to use in the current object.
5903 */
5904 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5905 {
5906  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5907 public:
5908  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5909  virtual ~VmaBlockMetadata_Buddy();
5910  virtual void Init(VkDeviceSize size);
5911 
5912  virtual bool Validate() const;
5913  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5914  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5915  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5916  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5917 
5918  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5919  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5920 
5921 #if VMA_STATS_STRING_ENABLED
5922  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5923 #endif
5924 
5925  virtual bool CreateAllocationRequest(
5926  uint32_t currentFrameIndex,
5927  uint32_t frameInUseCount,
5928  VkDeviceSize bufferImageGranularity,
5929  VkDeviceSize allocSize,
5930  VkDeviceSize allocAlignment,
5931  bool upperAddress,
5932  VmaSuballocationType allocType,
5933  bool canMakeOtherLost,
5934  uint32_t strategy,
5935  VmaAllocationRequest* pAllocationRequest);
5936 
5937  virtual bool MakeRequestedAllocationsLost(
5938  uint32_t currentFrameIndex,
5939  uint32_t frameInUseCount,
5940  VmaAllocationRequest* pAllocationRequest);
5941 
5942  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5943 
5944  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5945 
5946  virtual void Alloc(
5947  const VmaAllocationRequest& request,
5948  VmaSuballocationType type,
5949  VkDeviceSize allocSize,
5950  VmaAllocation hAllocation);
5951 
5952  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5953  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5954 
5955 private:
5956  static const VkDeviceSize MIN_NODE_SIZE = 32;
5957  static const size_t MAX_LEVELS = 30;
5958 
5959  struct ValidationContext
5960  {
5961  size_t calculatedAllocationCount;
5962  size_t calculatedFreeCount;
5963  VkDeviceSize calculatedSumFreeSize;
5964 
5965  ValidationContext() :
5966  calculatedAllocationCount(0),
5967  calculatedFreeCount(0),
5968  calculatedSumFreeSize(0) { }
5969  };
5970 
5971  struct Node
5972  {
5973  VkDeviceSize offset;
5974  enum TYPE
5975  {
5976  TYPE_FREE,
5977  TYPE_ALLOCATION,
5978  TYPE_SPLIT,
5979  TYPE_COUNT
5980  } type;
5981  Node* parent;
5982  Node* buddy;
5983 
5984  union
5985  {
5986  struct
5987  {
5988  Node* prev;
5989  Node* next;
5990  } free;
5991  struct
5992  {
5993  VmaAllocation alloc;
5994  } allocation;
5995  struct
5996  {
5997  Node* leftChild;
5998  } split;
5999  };
6000  };
6001 
6002  // Size of the memory block aligned down to a power of two.
6003  VkDeviceSize m_UsableSize;
6004  uint32_t m_LevelCount;
6005 
6006  Node* m_Root;
6007  struct {
6008  Node* front;
6009  Node* back;
6010  } m_FreeList[MAX_LEVELS];
6011  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6012  size_t m_AllocationCount;
6013  // Number of nodes in the tree with type == TYPE_FREE.
6014  size_t m_FreeCount;
6015  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6016  VkDeviceSize m_SumFreeSize;
6017 
6018  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6019  void DeleteNode(Node* node);
6020  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6021  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6022  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6023  // Alloc passed just for validation. Can be null.
6024  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6025  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6026  // Adds node to the front of FreeList at given level.
6027  // node->type must be FREE.
6028  // node->free.prev, next can be undefined.
6029  void AddToFreeListFront(uint32_t level, Node* node);
6030  // Removes node from FreeList at given level.
6031  // node->type must be FREE.
6032  // node->free.prev, next stay untouched.
6033  void RemoveFromFreeList(uint32_t level, Node* node);
6034 
6035 #if VMA_STATS_STRING_ENABLED
6036  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6037 #endif
6038 };
6039 
6040 /*
6041 Represents a single block of device memory (`VkDeviceMemory`) with all the
6042 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6043 
6044 Thread-safety: This class must be externally synchronized.
6045 */
6046 class VmaDeviceMemoryBlock
6047 {
6048  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6049 public:
6050  VmaBlockMetadata* m_pMetadata;
6051 
6052  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6053 
6054  ~VmaDeviceMemoryBlock()
6055  {
6056  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6057  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6058  }
6059 
6060  // Always call after construction.
6061  void Init(
6062  VmaAllocator hAllocator,
6063  VmaPool hParentPool,
6064  uint32_t newMemoryTypeIndex,
6065  VkDeviceMemory newMemory,
6066  VkDeviceSize newSize,
6067  uint32_t id,
6068  uint32_t algorithm);
6069  // Always call before destruction.
6070  void Destroy(VmaAllocator allocator);
6071 
6072  VmaPool GetParentPool() const { return m_hParentPool; }
6073  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6074  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6075  uint32_t GetId() const { return m_Id; }
6076  void* GetMappedData() const { return m_pMappedData; }
6077 
6078  // Validates all data structures inside this object. If not valid, returns false.
6079  bool Validate() const;
6080 
6081  VkResult CheckCorruption(VmaAllocator hAllocator);
6082 
6083  // ppData can be null.
6084  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6085  void Unmap(VmaAllocator hAllocator, uint32_t count);
6086 
6087  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6088  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6089 
6090  VkResult BindBufferMemory(
6091  const VmaAllocator hAllocator,
6092  const VmaAllocation hAllocation,
6093  VkDeviceSize allocationLocalOffset,
6094  VkBuffer hBuffer,
6095  const void* pNext);
6096  VkResult BindImageMemory(
6097  const VmaAllocator hAllocator,
6098  const VmaAllocation hAllocation,
6099  VkDeviceSize allocationLocalOffset,
6100  VkImage hImage,
6101  const void* pNext);
6102 
6103 private:
6104  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6105  uint32_t m_MemoryTypeIndex;
6106  uint32_t m_Id;
6107  VkDeviceMemory m_hMemory;
6108 
6109  /*
6110  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6111  Also protects m_MapCount, m_pMappedData.
6112  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6113  */
6114  VMA_MUTEX m_Mutex;
6115  uint32_t m_MapCount;
6116  void* m_pMappedData;
6117 };
6118 
6119 struct VmaPointerLess
6120 {
6121  bool operator()(const void* lhs, const void* rhs) const
6122  {
6123  return lhs < rhs;
6124  }
6125 };
6126 
6127 struct VmaDefragmentationMove
6128 {
6129  size_t srcBlockIndex;
6130  size_t dstBlockIndex;
6131  VkDeviceSize srcOffset;
6132  VkDeviceSize dstOffset;
6133  VkDeviceSize size;
6134 };
6135 
6136 class VmaDefragmentationAlgorithm;
6137 
6138 /*
6139 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6140 Vulkan memory type.
6141 
6142 Synchronized internally with a mutex.
6143 */
6144 struct VmaBlockVector
6145 {
6146  VMA_CLASS_NO_COPY(VmaBlockVector)
6147 public:
6148  VmaBlockVector(
6149  VmaAllocator hAllocator,
6150  VmaPool hParentPool,
6151  uint32_t memoryTypeIndex,
6152  VkDeviceSize preferredBlockSize,
6153  size_t minBlockCount,
6154  size_t maxBlockCount,
6155  VkDeviceSize bufferImageGranularity,
6156  uint32_t frameInUseCount,
6157  bool isCustomPool,
6158  bool explicitBlockSize,
6159  uint32_t algorithm);
6160  ~VmaBlockVector();
6161 
6162  VkResult CreateMinBlocks();
6163 
6164  VmaPool GetParentPool() const { return m_hParentPool; }
6165  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6166  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
6167  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6168  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
6169  uint32_t GetAlgorithm() const { return m_Algorithm; }
6170 
6171  void GetPoolStats(VmaPoolStats* pStats);
6172 
6173  bool IsEmpty() const { return m_Blocks.empty(); }
6174  bool IsCorruptionDetectionEnabled() const;
6175 
6176  VkResult Allocate(
6177  uint32_t currentFrameIndex,
6178  VkDeviceSize size,
6179  VkDeviceSize alignment,
6180  const VmaAllocationCreateInfo& createInfo,
6181  VmaSuballocationType suballocType,
6182  size_t allocationCount,
6183  VmaAllocation* pAllocations);
6184 
6185  void Free(const VmaAllocation hAllocation);
6186 
6187  // Adds statistics of this BlockVector to pStats.
6188  void AddStats(VmaStats* pStats);
6189 
6190 #if VMA_STATS_STRING_ENABLED
6191  void PrintDetailedMap(class VmaJsonWriter& json);
6192 #endif
6193 
6194  void MakePoolAllocationsLost(
6195  uint32_t currentFrameIndex,
6196  size_t* pLostAllocationCount);
6197  VkResult CheckCorruption();
6198 
6199  // Saves results in pCtx->res.
6200  void Defragment(
6201  class VmaBlockVectorDefragmentationContext* pCtx,
6202  VmaDefragmentationStats* pStats,
6203  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
6204  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
6205  VkCommandBuffer commandBuffer);
6206  void DefragmentationEnd(
6207  class VmaBlockVectorDefragmentationContext* pCtx,
6208  VmaDefragmentationStats* pStats);
6209 
6211  // To be used only while the m_Mutex is locked. Used during defragmentation.
6212 
6213  size_t GetBlockCount() const { return m_Blocks.size(); }
6214  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
6215  size_t CalcAllocationCount() const;
6216  bool IsBufferImageGranularityConflictPossible() const;
6217 
6218 private:
6219  friend class VmaDefragmentationAlgorithm_Generic;
6220 
6221  const VmaAllocator m_hAllocator;
6222  const VmaPool m_hParentPool;
6223  const uint32_t m_MemoryTypeIndex;
6224  const VkDeviceSize m_PreferredBlockSize;
6225  const size_t m_MinBlockCount;
6226  const size_t m_MaxBlockCount;
6227  const VkDeviceSize m_BufferImageGranularity;
6228  const uint32_t m_FrameInUseCount;
6229  const bool m_IsCustomPool;
6230  const bool m_ExplicitBlockSize;
6231  const uint32_t m_Algorithm;
6232  /* There can be at most one allocation that is completely empty - a
6233  hysteresis to avoid pessimistic case of alternating creation and destruction
6234  of a VkDeviceMemory. */
6235  bool m_HasEmptyBlock;
6236  VMA_RW_MUTEX m_Mutex;
6237  // Incrementally sorted by sumFreeSize, ascending.
6238  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
6239  uint32_t m_NextBlockId;
6240 
6241  VkDeviceSize CalcMaxBlockSize() const;
6242 
6243  // Finds and removes given block from vector.
6244  void Remove(VmaDeviceMemoryBlock* pBlock);
6245 
6246  // Performs single step in sorting m_Blocks. They may not be fully sorted
6247  // after this call.
6248  void IncrementallySortBlocks();
6249 
6250  VkResult AllocatePage(
6251  uint32_t currentFrameIndex,
6252  VkDeviceSize size,
6253  VkDeviceSize alignment,
6254  const VmaAllocationCreateInfo& createInfo,
6255  VmaSuballocationType suballocType,
6256  VmaAllocation* pAllocation);
6257 
6258  // To be used only without CAN_MAKE_OTHER_LOST flag.
6259  VkResult AllocateFromBlock(
6260  VmaDeviceMemoryBlock* pBlock,
6261  uint32_t currentFrameIndex,
6262  VkDeviceSize size,
6263  VkDeviceSize alignment,
6264  VmaAllocationCreateFlags allocFlags,
6265  void* pUserData,
6266  VmaSuballocationType suballocType,
6267  uint32_t strategy,
6268  VmaAllocation* pAllocation);
6269 
6270  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
6271 
6272  // Saves result to pCtx->res.
6273  void ApplyDefragmentationMovesCpu(
6274  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6275  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
6276  // Saves result to pCtx->res.
6277  void ApplyDefragmentationMovesGpu(
6278  class VmaBlockVectorDefragmentationContext* pDefragCtx,
6279  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6280  VkCommandBuffer commandBuffer);
6281 
6282  /*
6283  Used during defragmentation. pDefragmentationStats is optional. It's in/out
6284  - updated with new data.
6285  */
6286  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
6287 };
6288 
6289 struct VmaPool_T
6290 {
6291  VMA_CLASS_NO_COPY(VmaPool_T)
6292 public:
6293  VmaBlockVector m_BlockVector;
6294 
6295  VmaPool_T(
6296  VmaAllocator hAllocator,
6297  const VmaPoolCreateInfo& createInfo,
6298  VkDeviceSize preferredBlockSize);
6299  ~VmaPool_T();
6300 
6301  uint32_t GetId() const { return m_Id; }
6302  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
6303 
6304 #if VMA_STATS_STRING_ENABLED
6305  //void PrintDetailedMap(class VmaStringBuilder& sb);
6306 #endif
6307 
6308 private:
6309  uint32_t m_Id;
6310 };
6311 
6312 /*
6313 Performs defragmentation:
6314 
6315 - Updates `pBlockVector->m_pMetadata`.
6316 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
6317 - Does not move actual data, only returns requested moves as `moves`.
6318 */
6319 class VmaDefragmentationAlgorithm
6320 {
6321  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
6322 public:
6323  VmaDefragmentationAlgorithm(
6324  VmaAllocator hAllocator,
6325  VmaBlockVector* pBlockVector,
6326  uint32_t currentFrameIndex) :
6327  m_hAllocator(hAllocator),
6328  m_pBlockVector(pBlockVector),
6329  m_CurrentFrameIndex(currentFrameIndex)
6330  {
6331  }
6332  virtual ~VmaDefragmentationAlgorithm()
6333  {
6334  }
6335 
6336  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
6337  virtual void AddAll() = 0;
6338 
6339  virtual VkResult Defragment(
6340  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6341  VkDeviceSize maxBytesToMove,
6342  uint32_t maxAllocationsToMove) = 0;
6343 
6344  virtual VkDeviceSize GetBytesMoved() const = 0;
6345  virtual uint32_t GetAllocationsMoved() const = 0;
6346 
6347 protected:
6348  VmaAllocator const m_hAllocator;
6349  VmaBlockVector* const m_pBlockVector;
6350  const uint32_t m_CurrentFrameIndex;
6351 
6352  struct AllocationInfo
6353  {
6354  VmaAllocation m_hAllocation;
6355  VkBool32* m_pChanged;
6356 
6357  AllocationInfo() :
6358  m_hAllocation(VK_NULL_HANDLE),
6359  m_pChanged(VMA_NULL)
6360  {
6361  }
6362  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
6363  m_hAllocation(hAlloc),
6364  m_pChanged(pChanged)
6365  {
6366  }
6367  };
6368 };
6369 
6370 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
6371 {
6372  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
6373 public:
6374  VmaDefragmentationAlgorithm_Generic(
6375  VmaAllocator hAllocator,
6376  VmaBlockVector* pBlockVector,
6377  uint32_t currentFrameIndex,
6378  bool overlappingMoveSupported);
6379  virtual ~VmaDefragmentationAlgorithm_Generic();
6380 
6381  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6382  virtual void AddAll() { m_AllAllocations = true; }
6383 
6384  virtual VkResult Defragment(
6385  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6386  VkDeviceSize maxBytesToMove,
6387  uint32_t maxAllocationsToMove);
6388 
6389  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6390  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6391 
6392 private:
6393  uint32_t m_AllocationCount;
6394  bool m_AllAllocations;
6395 
6396  VkDeviceSize m_BytesMoved;
6397  uint32_t m_AllocationsMoved;
6398 
6399  struct AllocationInfoSizeGreater
6400  {
6401  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6402  {
6403  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
6404  }
6405  };
6406 
6407  struct AllocationInfoOffsetGreater
6408  {
6409  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
6410  {
6411  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
6412  }
6413  };
6414 
6415  struct BlockInfo
6416  {
6417  size_t m_OriginalBlockIndex;
6418  VmaDeviceMemoryBlock* m_pBlock;
6419  bool m_HasNonMovableAllocations;
6420  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
6421 
6422  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6423  m_OriginalBlockIndex(SIZE_MAX),
6424  m_pBlock(VMA_NULL),
6425  m_HasNonMovableAllocations(true),
6426  m_Allocations(pAllocationCallbacks)
6427  {
6428  }
6429 
6430  void CalcHasNonMovableAllocations()
6431  {
6432  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6433  const size_t defragmentAllocCount = m_Allocations.size();
6434  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6435  }
6436 
6437  void SortAllocationsBySizeDescending()
6438  {
6439  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6440  }
6441 
6442  void SortAllocationsByOffsetDescending()
6443  {
6444  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6445  }
6446  };
6447 
6448  struct BlockPointerLess
6449  {
6450  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6451  {
6452  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6453  }
6454  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6455  {
6456  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6457  }
6458  };
6459 
6460  // 1. Blocks with some non-movable allocations go first.
6461  // 2. Blocks with smaller sumFreeSize go first.
6462  struct BlockInfoCompareMoveDestination
6463  {
6464  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6465  {
6466  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6467  {
6468  return true;
6469  }
6470  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6471  {
6472  return false;
6473  }
6474  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6475  {
6476  return true;
6477  }
6478  return false;
6479  }
6480  };
6481 
6482  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6483  BlockInfoVector m_Blocks;
6484 
6485  VkResult DefragmentRound(
6486  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6487  VkDeviceSize maxBytesToMove,
6488  uint32_t maxAllocationsToMove);
6489 
6490  size_t CalcBlocksWithNonMovableCount() const;
6491 
6492  static bool MoveMakesSense(
6493  size_t dstBlockIndex, VkDeviceSize dstOffset,
6494  size_t srcBlockIndex, VkDeviceSize srcOffset);
6495 };
6496 
6497 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6498 {
6499  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6500 public:
6501  VmaDefragmentationAlgorithm_Fast(
6502  VmaAllocator hAllocator,
6503  VmaBlockVector* pBlockVector,
6504  uint32_t currentFrameIndex,
6505  bool overlappingMoveSupported);
6506  virtual ~VmaDefragmentationAlgorithm_Fast();
6507 
6508  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6509  virtual void AddAll() { m_AllAllocations = true; }
6510 
6511  virtual VkResult Defragment(
6512  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6513  VkDeviceSize maxBytesToMove,
6514  uint32_t maxAllocationsToMove);
6515 
6516  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6517  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6518 
6519 private:
6520  struct BlockInfo
6521  {
6522  size_t origBlockIndex;
6523  };
6524 
6525  class FreeSpaceDatabase
6526  {
6527  public:
6528  FreeSpaceDatabase()
6529  {
6530  FreeSpace s = {};
6531  s.blockInfoIndex = SIZE_MAX;
6532  for(size_t i = 0; i < MAX_COUNT; ++i)
6533  {
6534  m_FreeSpaces[i] = s;
6535  }
6536  }
6537 
6538  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6539  {
6540  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6541  {
6542  return;
6543  }
6544 
6545  // Find first invalid or the smallest structure.
6546  size_t bestIndex = SIZE_MAX;
6547  for(size_t i = 0; i < MAX_COUNT; ++i)
6548  {
6549  // Empty structure.
6550  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6551  {
6552  bestIndex = i;
6553  break;
6554  }
6555  if(m_FreeSpaces[i].size < size &&
6556  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6557  {
6558  bestIndex = i;
6559  }
6560  }
6561 
6562  if(bestIndex != SIZE_MAX)
6563  {
6564  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6565  m_FreeSpaces[bestIndex].offset = offset;
6566  m_FreeSpaces[bestIndex].size = size;
6567  }
6568  }
6569 
6570  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6571  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6572  {
6573  size_t bestIndex = SIZE_MAX;
6574  VkDeviceSize bestFreeSpaceAfter = 0;
6575  for(size_t i = 0; i < MAX_COUNT; ++i)
6576  {
6577  // Structure is valid.
6578  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6579  {
6580  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6581  // Allocation fits into this structure.
6582  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6583  {
6584  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6585  (dstOffset + size);
6586  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6587  {
6588  bestIndex = i;
6589  bestFreeSpaceAfter = freeSpaceAfter;
6590  }
6591  }
6592  }
6593  }
6594 
6595  if(bestIndex != SIZE_MAX)
6596  {
6597  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6598  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6599 
6600  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6601  {
6602  // Leave this structure for remaining empty space.
6603  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6604  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6605  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6606  }
6607  else
6608  {
6609  // This structure becomes invalid.
6610  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6611  }
6612 
6613  return true;
6614  }
6615 
6616  return false;
6617  }
6618 
6619  private:
6620  static const size_t MAX_COUNT = 4;
6621 
6622  struct FreeSpace
6623  {
6624  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6625  VkDeviceSize offset;
6626  VkDeviceSize size;
6627  } m_FreeSpaces[MAX_COUNT];
6628  };
6629 
6630  const bool m_OverlappingMoveSupported;
6631 
6632  uint32_t m_AllocationCount;
6633  bool m_AllAllocations;
6634 
6635  VkDeviceSize m_BytesMoved;
6636  uint32_t m_AllocationsMoved;
6637 
6638  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6639 
6640  void PreprocessMetadata();
6641  void PostprocessMetadata();
6642  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6643 };
6644 
6645 struct VmaBlockDefragmentationContext
6646 {
6647  enum BLOCK_FLAG
6648  {
6649  BLOCK_FLAG_USED = 0x00000001,
6650  };
6651  uint32_t flags;
6652  VkBuffer hBuffer;
6653 };
6654 
6655 class VmaBlockVectorDefragmentationContext
6656 {
6657  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6658 public:
6659  VkResult res;
6660  bool mutexLocked;
6661  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6662 
6663  VmaBlockVectorDefragmentationContext(
6664  VmaAllocator hAllocator,
6665  VmaPool hCustomPool, // Optional.
6666  VmaBlockVector* pBlockVector,
6667  uint32_t currFrameIndex);
6668  ~VmaBlockVectorDefragmentationContext();
6669 
6670  VmaPool GetCustomPool() const { return m_hCustomPool; }
6671  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6672  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6673 
6674  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6675  void AddAll() { m_AllAllocations = true; }
6676 
6677  void Begin(bool overlappingMoveSupported);
6678 
6679 private:
6680  const VmaAllocator m_hAllocator;
6681  // Null if not from custom pool.
6682  const VmaPool m_hCustomPool;
6683  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6684  VmaBlockVector* const m_pBlockVector;
6685  const uint32_t m_CurrFrameIndex;
6686  // Owner of this object.
6687  VmaDefragmentationAlgorithm* m_pAlgorithm;
6688 
6689  struct AllocInfo
6690  {
6691  VmaAllocation hAlloc;
6692  VkBool32* pChanged;
6693  };
6694  // Used between constructor and Begin.
6695  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6696  bool m_AllAllocations;
6697 };
6698 
6699 struct VmaDefragmentationContext_T
6700 {
6701 private:
6702  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6703 public:
6704  VmaDefragmentationContext_T(
6705  VmaAllocator hAllocator,
6706  uint32_t currFrameIndex,
6707  uint32_t flags,
6708  VmaDefragmentationStats* pStats);
6709  ~VmaDefragmentationContext_T();
6710 
6711  void AddPools(uint32_t poolCount, VmaPool* pPools);
6712  void AddAllocations(
6713  uint32_t allocationCount,
6714  VmaAllocation* pAllocations,
6715  VkBool32* pAllocationsChanged);
6716 
6717  /*
6718  Returns:
6719  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6720  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6721  - Negative value if error occured and object can be destroyed immediately.
6722  */
6723  VkResult Defragment(
6724  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6725  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6726  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6727 
6728 private:
6729  const VmaAllocator m_hAllocator;
6730  const uint32_t m_CurrFrameIndex;
6731  const uint32_t m_Flags;
6732  VmaDefragmentationStats* const m_pStats;
6733  // Owner of these objects.
6734  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6735  // Owner of these objects.
6736  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6737 };
6738 
6739 #if VMA_RECORDING_ENABLED
6740 
6741 class VmaRecorder
6742 {
6743 public:
6744  VmaRecorder();
6745  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6746  void WriteConfiguration(
6747  const VkPhysicalDeviceProperties& devProps,
6748  const VkPhysicalDeviceMemoryProperties& memProps,
6749  bool dedicatedAllocationExtensionEnabled,
6750  bool bindMemory2ExtensionEnabled);
6751  ~VmaRecorder();
6752 
6753  void RecordCreateAllocator(uint32_t frameIndex);
6754  void RecordDestroyAllocator(uint32_t frameIndex);
6755  void RecordCreatePool(uint32_t frameIndex,
6756  const VmaPoolCreateInfo& createInfo,
6757  VmaPool pool);
6758  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6759  void RecordAllocateMemory(uint32_t frameIndex,
6760  const VkMemoryRequirements& vkMemReq,
6761  const VmaAllocationCreateInfo& createInfo,
6762  VmaAllocation allocation);
6763  void RecordAllocateMemoryPages(uint32_t frameIndex,
6764  const VkMemoryRequirements& vkMemReq,
6765  const VmaAllocationCreateInfo& createInfo,
6766  uint64_t allocationCount,
6767  const VmaAllocation* pAllocations);
6768  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6769  const VkMemoryRequirements& vkMemReq,
6770  bool requiresDedicatedAllocation,
6771  bool prefersDedicatedAllocation,
6772  const VmaAllocationCreateInfo& createInfo,
6773  VmaAllocation allocation);
6774  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6775  const VkMemoryRequirements& vkMemReq,
6776  bool requiresDedicatedAllocation,
6777  bool prefersDedicatedAllocation,
6778  const VmaAllocationCreateInfo& createInfo,
6779  VmaAllocation allocation);
6780  void RecordFreeMemory(uint32_t frameIndex,
6781  VmaAllocation allocation);
6782  void RecordFreeMemoryPages(uint32_t frameIndex,
6783  uint64_t allocationCount,
6784  const VmaAllocation* pAllocations);
6785  void RecordSetAllocationUserData(uint32_t frameIndex,
6786  VmaAllocation allocation,
6787  const void* pUserData);
6788  void RecordCreateLostAllocation(uint32_t frameIndex,
6789  VmaAllocation allocation);
6790  void RecordMapMemory(uint32_t frameIndex,
6791  VmaAllocation allocation);
6792  void RecordUnmapMemory(uint32_t frameIndex,
6793  VmaAllocation allocation);
6794  void RecordFlushAllocation(uint32_t frameIndex,
6795  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6796  void RecordInvalidateAllocation(uint32_t frameIndex,
6797  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6798  void RecordCreateBuffer(uint32_t frameIndex,
6799  const VkBufferCreateInfo& bufCreateInfo,
6800  const VmaAllocationCreateInfo& allocCreateInfo,
6801  VmaAllocation allocation);
6802  void RecordCreateImage(uint32_t frameIndex,
6803  const VkImageCreateInfo& imageCreateInfo,
6804  const VmaAllocationCreateInfo& allocCreateInfo,
6805  VmaAllocation allocation);
6806  void RecordDestroyBuffer(uint32_t frameIndex,
6807  VmaAllocation allocation);
6808  void RecordDestroyImage(uint32_t frameIndex,
6809  VmaAllocation allocation);
6810  void RecordTouchAllocation(uint32_t frameIndex,
6811  VmaAllocation allocation);
6812  void RecordGetAllocationInfo(uint32_t frameIndex,
6813  VmaAllocation allocation);
6814  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6815  VmaPool pool);
6816  void RecordDefragmentationBegin(uint32_t frameIndex,
6817  const VmaDefragmentationInfo2& info,
6819  void RecordDefragmentationEnd(uint32_t frameIndex,
6821 
6822 private:
6823  struct CallParams
6824  {
6825  uint32_t threadId;
6826  double time;
6827  };
6828 
6829  class UserDataString
6830  {
6831  public:
6832  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6833  const char* GetString() const { return m_Str; }
6834 
6835  private:
6836  char m_PtrStr[17];
6837  const char* m_Str;
6838  };
6839 
6840  bool m_UseMutex;
6841  VmaRecordFlags m_Flags;
6842  FILE* m_File;
6843  VMA_MUTEX m_FileMutex;
6844  int64_t m_Freq;
6845  int64_t m_StartCounter;
6846 
6847  void GetBasicParams(CallParams& outParams);
6848 
6849  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6850  template<typename T>
6851  void PrintPointerList(uint64_t count, const T* pItems)
6852  {
6853  if(count)
6854  {
6855  fprintf(m_File, "%p", pItems[0]);
6856  for(uint64_t i = 1; i < count; ++i)
6857  {
6858  fprintf(m_File, " %p", pItems[i]);
6859  }
6860  }
6861  }
6862 
6863  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
6864  void Flush();
6865 };
6866 
6867 #endif // #if VMA_RECORDING_ENABLED
6868 
6869 /*
6870 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
6871 */
6872 class VmaAllocationObjectAllocator
6873 {
6874  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
6875 public:
6876  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
6877 
6878  VmaAllocation Allocate();
6879  void Free(VmaAllocation hAlloc);
6880 
6881 private:
6882  VMA_MUTEX m_Mutex;
6883  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
6884 };
6885 
6886 struct VmaCurrentBudgetData
6887 {
6888  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
6889  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
6890 
6891 #if VMA_MEMORY_BUDGET
6892  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
6893  VMA_RW_MUTEX m_BudgetMutex;
6894  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
6895  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
6896  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
6897 #endif // #if VMA_MEMORY_BUDGET
6898 
6899  VmaCurrentBudgetData()
6900  {
6901  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
6902  {
6903  m_BlockBytes[heapIndex] = 0;
6904  m_AllocationBytes[heapIndex] = 0;
6905 #if VMA_MEMORY_BUDGET
6906  m_VulkanUsage[heapIndex] = 0;
6907  m_VulkanBudget[heapIndex] = 0;
6908  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
6909 #endif
6910  }
6911 
6912 #if VMA_MEMORY_BUDGET
6913  m_OperationsSinceBudgetFetch = 0;
6914 #endif
6915  }
6916 };
6917 
6918 // Main allocator object.
6919 struct VmaAllocator_T
6920 {
6921  VMA_CLASS_NO_COPY(VmaAllocator_T)
6922 public:
6923  bool m_UseMutex;
6924  bool m_UseKhrDedicatedAllocation;
6925  bool m_UseKhrBindMemory2;
6926  bool m_UseExtMemoryBudget;
6927  VkDevice m_hDevice;
6928  VkInstance m_hInstance;
6929  bool m_AllocationCallbacksSpecified;
6930  VkAllocationCallbacks m_AllocationCallbacks;
6931  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6932  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
6933 
6934  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
6935  uint32_t m_HeapSizeLimitMask;
6936 
6937  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6938  VkPhysicalDeviceMemoryProperties m_MemProps;
6939 
6940  // Default pools.
6941  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6942 
6943  // Each vector is sorted by memory (handle value).
6944  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6945  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6946  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6947 
6948  VmaCurrentBudgetData m_Budget;
6949 
6950  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6951  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6952  ~VmaAllocator_T();
6953 
6954  const VkAllocationCallbacks* GetAllocationCallbacks() const
6955  {
6956  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6957  }
6958  const VmaVulkanFunctions& GetVulkanFunctions() const
6959  {
6960  return m_VulkanFunctions;
6961  }
6962 
6963  VkDeviceSize GetBufferImageGranularity() const
6964  {
6965  return VMA_MAX(
6966  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6967  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6968  }
6969 
6970  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6971  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6972 
6973  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6974  {
6975  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6976  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6977  }
6978  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6979  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6980  {
6981  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6982  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6983  }
6984  // Minimum alignment for all allocations in specific memory type.
6985  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6986  {
6987  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6988  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6989  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6990  }
6991 
6992  bool IsIntegratedGpu() const
6993  {
6994  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6995  }
6996 
6997 #if VMA_RECORDING_ENABLED
6998  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6999 #endif
7000 
7001  void GetBufferMemoryRequirements(
7002  VkBuffer hBuffer,
7003  VkMemoryRequirements& memReq,
7004  bool& requiresDedicatedAllocation,
7005  bool& prefersDedicatedAllocation) const;
7006  void GetImageMemoryRequirements(
7007  VkImage hImage,
7008  VkMemoryRequirements& memReq,
7009  bool& requiresDedicatedAllocation,
7010  bool& prefersDedicatedAllocation) const;
7011 
7012  // Main allocation function.
7013  VkResult AllocateMemory(
7014  const VkMemoryRequirements& vkMemReq,
7015  bool requiresDedicatedAllocation,
7016  bool prefersDedicatedAllocation,
7017  VkBuffer dedicatedBuffer,
7018  VkImage dedicatedImage,
7019  const VmaAllocationCreateInfo& createInfo,
7020  VmaSuballocationType suballocType,
7021  size_t allocationCount,
7022  VmaAllocation* pAllocations);
7023 
7024  // Main deallocation function.
7025  void FreeMemory(
7026  size_t allocationCount,
7027  const VmaAllocation* pAllocations);
7028 
7029  VkResult ResizeAllocation(
7030  const VmaAllocation alloc,
7031  VkDeviceSize newSize);
7032 
7033  void CalculateStats(VmaStats* pStats);
7034 
7035  void GetBudget(
7036  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7037 
7038 #if VMA_STATS_STRING_ENABLED
7039  void PrintDetailedMap(class VmaJsonWriter& json);
7040 #endif
7041 
7042  VkResult DefragmentationBegin(
7043  const VmaDefragmentationInfo2& info,
7044  VmaDefragmentationStats* pStats,
7045  VmaDefragmentationContext* pContext);
7046  VkResult DefragmentationEnd(
7047  VmaDefragmentationContext context);
7048 
7049  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7050  bool TouchAllocation(VmaAllocation hAllocation);
7051 
7052  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7053  void DestroyPool(VmaPool pool);
7054  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7055 
7056  void SetCurrentFrameIndex(uint32_t frameIndex);
7057  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7058 
7059  void MakePoolAllocationsLost(
7060  VmaPool hPool,
7061  size_t* pLostAllocationCount);
7062  VkResult CheckPoolCorruption(VmaPool hPool);
7063  VkResult CheckCorruption(uint32_t memoryTypeBits);
7064 
7065  void CreateLostAllocation(VmaAllocation* pAllocation);
7066 
7067  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7068  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7069  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7070  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7071  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7072  VkResult BindVulkanBuffer(
7073  VkDeviceMemory memory,
7074  VkDeviceSize memoryOffset,
7075  VkBuffer buffer,
7076  const void* pNext);
7077  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7078  VkResult BindVulkanImage(
7079  VkDeviceMemory memory,
7080  VkDeviceSize memoryOffset,
7081  VkImage image,
7082  const void* pNext);
7083 
7084  VkResult Map(VmaAllocation hAllocation, void** ppData);
7085  void Unmap(VmaAllocation hAllocation);
7086 
7087  VkResult BindBufferMemory(
7088  VmaAllocation hAllocation,
7089  VkDeviceSize allocationLocalOffset,
7090  VkBuffer hBuffer,
7091  const void* pNext);
7092  VkResult BindImageMemory(
7093  VmaAllocation hAllocation,
7094  VkDeviceSize allocationLocalOffset,
7095  VkImage hImage,
7096  const void* pNext);
7097 
7098  void FlushOrInvalidateAllocation(
7099  VmaAllocation hAllocation,
7100  VkDeviceSize offset, VkDeviceSize size,
7101  VMA_CACHE_OPERATION op);
7102 
7103  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
7104 
7105  /*
7106  Returns bit mask of memory types that can support defragmentation on GPU as
7107  they support creation of required buffer for copy operations.
7108  */
7109  uint32_t GetGpuDefragmentationMemoryTypeBits();
7110 
7111 private:
7112  VkDeviceSize m_PreferredLargeHeapBlockSize;
7113 
7114  VkPhysicalDevice m_PhysicalDevice;
7115  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
7116  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
7117 
7118  VMA_RW_MUTEX m_PoolsMutex;
7119  // Protected by m_PoolsMutex. Sorted by pointer value.
7120  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
7121  uint32_t m_NextPoolId;
7122 
7123  VmaVulkanFunctions m_VulkanFunctions;
7124 
7125 #if VMA_RECORDING_ENABLED
7126  VmaRecorder* m_pRecorder;
7127 #endif
7128 
7129  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
7130 
7131  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
7132 
7133  VkResult AllocateMemoryOfType(
7134  VkDeviceSize size,
7135  VkDeviceSize alignment,
7136  bool dedicatedAllocation,
7137  VkBuffer dedicatedBuffer,
7138  VkImage dedicatedImage,
7139  const VmaAllocationCreateInfo& createInfo,
7140  uint32_t memTypeIndex,
7141  VmaSuballocationType suballocType,
7142  size_t allocationCount,
7143  VmaAllocation* pAllocations);
7144 
7145  // Helper function only to be used inside AllocateDedicatedMemory.
7146  VkResult AllocateDedicatedMemoryPage(
7147  VkDeviceSize size,
7148  VmaSuballocationType suballocType,
7149  uint32_t memTypeIndex,
7150  const VkMemoryAllocateInfo& allocInfo,
7151  bool map,
7152  bool isUserDataString,
7153  void* pUserData,
7154  VmaAllocation* pAllocation);
7155 
7156  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
7157  VkResult AllocateDedicatedMemory(
7158  VkDeviceSize size,
7159  VmaSuballocationType suballocType,
7160  uint32_t memTypeIndex,
7161  bool withinBudget,
7162  bool map,
7163  bool isUserDataString,
7164  void* pUserData,
7165  VkBuffer dedicatedBuffer,
7166  VkImage dedicatedImage,
7167  size_t allocationCount,
7168  VmaAllocation* pAllocations);
7169 
7170  void FreeDedicatedMemory(const VmaAllocation allocation);
7171 
7172  /*
7173  Calculates and returns bit mask of memory types that can support defragmentation
7174  on GPU as they support creation of required buffer for copy operations.
7175  */
7176  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
7177 
7178 #if VMA_MEMORY_BUDGET
7179  void UpdateVulkanBudget();
7180 #endif // #if VMA_MEMORY_BUDGET
7181 };
7182 
7184 // Memory allocation #2 after VmaAllocator_T definition
7185 
7186 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
7187 {
7188  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
7189 }
7190 
7191 static void VmaFree(VmaAllocator hAllocator, void* ptr)
7192 {
7193  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
7194 }
7195 
7196 template<typename T>
7197 static T* VmaAllocate(VmaAllocator hAllocator)
7198 {
7199  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
7200 }
7201 
7202 template<typename T>
7203 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
7204 {
7205  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
7206 }
7207 
7208 template<typename T>
7209 static void vma_delete(VmaAllocator hAllocator, T* ptr)
7210 {
7211  if(ptr != VMA_NULL)
7212  {
7213  ptr->~T();
7214  VmaFree(hAllocator, ptr);
7215  }
7216 }
7217 
7218 template<typename T>
7219 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
7220 {
7221  if(ptr != VMA_NULL)
7222  {
7223  for(size_t i = count; i--; )
7224  ptr[i].~T();
7225  VmaFree(hAllocator, ptr);
7226  }
7227 }
7228 
7230 // VmaStringBuilder
7231 
7232 #if VMA_STATS_STRING_ENABLED
7233 
7234 class VmaStringBuilder
7235 {
7236 public:
7237  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
7238  size_t GetLength() const { return m_Data.size(); }
7239  const char* GetData() const { return m_Data.data(); }
7240 
7241  void Add(char ch) { m_Data.push_back(ch); }
7242  void Add(const char* pStr);
7243  void AddNewLine() { Add('\n'); }
7244  void AddNumber(uint32_t num);
7245  void AddNumber(uint64_t num);
7246  void AddPointer(const void* ptr);
7247 
7248 private:
7249  VmaVector< char, VmaStlAllocator<char> > m_Data;
7250 };
7251 
7252 void VmaStringBuilder::Add(const char* pStr)
7253 {
7254  const size_t strLen = strlen(pStr);
7255  if(strLen > 0)
7256  {
7257  const size_t oldCount = m_Data.size();
7258  m_Data.resize(oldCount + strLen);
7259  memcpy(m_Data.data() + oldCount, pStr, strLen);
7260  }
7261 }
7262 
7263 void VmaStringBuilder::AddNumber(uint32_t num)
7264 {
7265  char buf[11];
7266  buf[10] = '\0';
7267  char *p = &buf[10];
7268  do
7269  {
7270  *--p = '0' + (num % 10);
7271  num /= 10;
7272  }
7273  while(num);
7274  Add(p);
7275 }
7276 
7277 void VmaStringBuilder::AddNumber(uint64_t num)
7278 {
7279  char buf[21];
7280  buf[20] = '\0';
7281  char *p = &buf[20];
7282  do
7283  {
7284  *--p = '0' + (num % 10);
7285  num /= 10;
7286  }
7287  while(num);
7288  Add(p);
7289 }
7290 
7291 void VmaStringBuilder::AddPointer(const void* ptr)
7292 {
7293  char buf[21];
7294  VmaPtrToStr(buf, sizeof(buf), ptr);
7295  Add(buf);
7296 }
7297 
7298 #endif // #if VMA_STATS_STRING_ENABLED
7299 
7301 // VmaJsonWriter
7302 
7303 #if VMA_STATS_STRING_ENABLED
7304 
7305 class VmaJsonWriter
7306 {
7307  VMA_CLASS_NO_COPY(VmaJsonWriter)
7308 public:
7309  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
7310  ~VmaJsonWriter();
7311 
7312  void BeginObject(bool singleLine = false);
7313  void EndObject();
7314 
7315  void BeginArray(bool singleLine = false);
7316  void EndArray();
7317 
7318  void WriteString(const char* pStr);
7319  void BeginString(const char* pStr = VMA_NULL);
7320  void ContinueString(const char* pStr);
7321  void ContinueString(uint32_t n);
7322  void ContinueString(uint64_t n);
7323  void ContinueString_Pointer(const void* ptr);
7324  void EndString(const char* pStr = VMA_NULL);
7325 
7326  void WriteNumber(uint32_t n);
7327  void WriteNumber(uint64_t n);
7328  void WriteBool(bool b);
7329  void WriteNull();
7330 
7331 private:
7332  static const char* const INDENT;
7333 
7334  enum COLLECTION_TYPE
7335  {
7336  COLLECTION_TYPE_OBJECT,
7337  COLLECTION_TYPE_ARRAY,
7338  };
7339  struct StackItem
7340  {
7341  COLLECTION_TYPE type;
7342  uint32_t valueCount;
7343  bool singleLineMode;
7344  };
7345 
7346  VmaStringBuilder& m_SB;
7347  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
7348  bool m_InsideString;
7349 
7350  void BeginValue(bool isString);
7351  void WriteIndent(bool oneLess = false);
7352 };
7353 
7354 const char* const VmaJsonWriter::INDENT = " ";
7355 
7356 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
7357  m_SB(sb),
7358  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
7359  m_InsideString(false)
7360 {
7361 }
7362 
7363 VmaJsonWriter::~VmaJsonWriter()
7364 {
7365  VMA_ASSERT(!m_InsideString);
7366  VMA_ASSERT(m_Stack.empty());
7367 }
7368 
7369 void VmaJsonWriter::BeginObject(bool singleLine)
7370 {
7371  VMA_ASSERT(!m_InsideString);
7372 
7373  BeginValue(false);
7374  m_SB.Add('{');
7375 
7376  StackItem item;
7377  item.type = COLLECTION_TYPE_OBJECT;
7378  item.valueCount = 0;
7379  item.singleLineMode = singleLine;
7380  m_Stack.push_back(item);
7381 }
7382 
7383 void VmaJsonWriter::EndObject()
7384 {
7385  VMA_ASSERT(!m_InsideString);
7386 
7387  WriteIndent(true);
7388  m_SB.Add('}');
7389 
7390  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
7391  m_Stack.pop_back();
7392 }
7393 
7394 void VmaJsonWriter::BeginArray(bool singleLine)
7395 {
7396  VMA_ASSERT(!m_InsideString);
7397 
7398  BeginValue(false);
7399  m_SB.Add('[');
7400 
7401  StackItem item;
7402  item.type = COLLECTION_TYPE_ARRAY;
7403  item.valueCount = 0;
7404  item.singleLineMode = singleLine;
7405  m_Stack.push_back(item);
7406 }
7407 
7408 void VmaJsonWriter::EndArray()
7409 {
7410  VMA_ASSERT(!m_InsideString);
7411 
7412  WriteIndent(true);
7413  m_SB.Add(']');
7414 
7415  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
7416  m_Stack.pop_back();
7417 }
7418 
7419 void VmaJsonWriter::WriteString(const char* pStr)
7420 {
7421  BeginString(pStr);
7422  EndString();
7423 }
7424 
7425 void VmaJsonWriter::BeginString(const char* pStr)
7426 {
7427  VMA_ASSERT(!m_InsideString);
7428 
7429  BeginValue(true);
7430  m_SB.Add('"');
7431  m_InsideString = true;
7432  if(pStr != VMA_NULL && pStr[0] != '\0')
7433  {
7434  ContinueString(pStr);
7435  }
7436 }
7437 
7438 void VmaJsonWriter::ContinueString(const char* pStr)
7439 {
7440  VMA_ASSERT(m_InsideString);
7441 
7442  const size_t strLen = strlen(pStr);
7443  for(size_t i = 0; i < strLen; ++i)
7444  {
7445  char ch = pStr[i];
7446  if(ch == '\\')
7447  {
7448  m_SB.Add("\\\\");
7449  }
7450  else if(ch == '"')
7451  {
7452  m_SB.Add("\\\"");
7453  }
7454  else if(ch >= 32)
7455  {
7456  m_SB.Add(ch);
7457  }
7458  else switch(ch)
7459  {
7460  case '\b':
7461  m_SB.Add("\\b");
7462  break;
7463  case '\f':
7464  m_SB.Add("\\f");
7465  break;
7466  case '\n':
7467  m_SB.Add("\\n");
7468  break;
7469  case '\r':
7470  m_SB.Add("\\r");
7471  break;
7472  case '\t':
7473  m_SB.Add("\\t");
7474  break;
7475  default:
7476  VMA_ASSERT(0 && "Character not currently supported.");
7477  break;
7478  }
7479  }
7480 }
7481 
7482 void VmaJsonWriter::ContinueString(uint32_t n)
7483 {
7484  VMA_ASSERT(m_InsideString);
7485  m_SB.AddNumber(n);
7486 }
7487 
7488 void VmaJsonWriter::ContinueString(uint64_t n)
7489 {
7490  VMA_ASSERT(m_InsideString);
7491  m_SB.AddNumber(n);
7492 }
7493 
7494 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
7495 {
7496  VMA_ASSERT(m_InsideString);
7497  m_SB.AddPointer(ptr);
7498 }
7499 
7500 void VmaJsonWriter::EndString(const char* pStr)
7501 {
7502  VMA_ASSERT(m_InsideString);
7503  if(pStr != VMA_NULL && pStr[0] != '\0')
7504  {
7505  ContinueString(pStr);
7506  }
7507  m_SB.Add('"');
7508  m_InsideString = false;
7509 }
7510 
7511 void VmaJsonWriter::WriteNumber(uint32_t n)
7512 {
7513  VMA_ASSERT(!m_InsideString);
7514  BeginValue(false);
7515  m_SB.AddNumber(n);
7516 }
7517 
7518 void VmaJsonWriter::WriteNumber(uint64_t n)
7519 {
7520  VMA_ASSERT(!m_InsideString);
7521  BeginValue(false);
7522  m_SB.AddNumber(n);
7523 }
7524 
7525 void VmaJsonWriter::WriteBool(bool b)
7526 {
7527  VMA_ASSERT(!m_InsideString);
7528  BeginValue(false);
7529  m_SB.Add(b ? "true" : "false");
7530 }
7531 
7532 void VmaJsonWriter::WriteNull()
7533 {
7534  VMA_ASSERT(!m_InsideString);
7535  BeginValue(false);
7536  m_SB.Add("null");
7537 }
7538 
7539 void VmaJsonWriter::BeginValue(bool isString)
7540 {
7541  if(!m_Stack.empty())
7542  {
7543  StackItem& currItem = m_Stack.back();
7544  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7545  currItem.valueCount % 2 == 0)
7546  {
7547  VMA_ASSERT(isString);
7548  }
7549 
7550  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7551  currItem.valueCount % 2 != 0)
7552  {
7553  m_SB.Add(": ");
7554  }
7555  else if(currItem.valueCount > 0)
7556  {
7557  m_SB.Add(", ");
7558  WriteIndent();
7559  }
7560  else
7561  {
7562  WriteIndent();
7563  }
7564  ++currItem.valueCount;
7565  }
7566 }
7567 
7568 void VmaJsonWriter::WriteIndent(bool oneLess)
7569 {
7570  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7571  {
7572  m_SB.AddNewLine();
7573 
7574  size_t count = m_Stack.size();
7575  if(count > 0 && oneLess)
7576  {
7577  --count;
7578  }
7579  for(size_t i = 0; i < count; ++i)
7580  {
7581  m_SB.Add(INDENT);
7582  }
7583  }
7584 }
7585 
7586 #endif // #if VMA_STATS_STRING_ENABLED
7587 
7589 
7590 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7591 {
7592  if(IsUserDataString())
7593  {
7594  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7595 
7596  FreeUserDataString(hAllocator);
7597 
7598  if(pUserData != VMA_NULL)
7599  {
7600  const char* const newStrSrc = (char*)pUserData;
7601  const size_t newStrLen = strlen(newStrSrc);
7602  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7603  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7604  m_pUserData = newStrDst;
7605  }
7606  }
7607  else
7608  {
7609  m_pUserData = pUserData;
7610  }
7611 }
7612 
7613 void VmaAllocation_T::ChangeBlockAllocation(
7614  VmaAllocator hAllocator,
7615  VmaDeviceMemoryBlock* block,
7616  VkDeviceSize offset)
7617 {
7618  VMA_ASSERT(block != VMA_NULL);
7619  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7620 
7621  // Move mapping reference counter from old block to new block.
7622  if(block != m_BlockAllocation.m_Block)
7623  {
7624  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7625  if(IsPersistentMap())
7626  ++mapRefCount;
7627  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7628  block->Map(hAllocator, mapRefCount, VMA_NULL);
7629  }
7630 
7631  m_BlockAllocation.m_Block = block;
7632  m_BlockAllocation.m_Offset = offset;
7633 }
7634 
7635 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7636 {
7637  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7638  m_BlockAllocation.m_Offset = newOffset;
7639 }
7640 
7641 VkDeviceSize VmaAllocation_T::GetOffset() const
7642 {
7643  switch(m_Type)
7644  {
7645  case ALLOCATION_TYPE_BLOCK:
7646  return m_BlockAllocation.m_Offset;
7647  case ALLOCATION_TYPE_DEDICATED:
7648  return 0;
7649  default:
7650  VMA_ASSERT(0);
7651  return 0;
7652  }
7653 }
7654 
7655 VkDeviceMemory VmaAllocation_T::GetMemory() const
7656 {
7657  switch(m_Type)
7658  {
7659  case ALLOCATION_TYPE_BLOCK:
7660  return m_BlockAllocation.m_Block->GetDeviceMemory();
7661  case ALLOCATION_TYPE_DEDICATED:
7662  return m_DedicatedAllocation.m_hMemory;
7663  default:
7664  VMA_ASSERT(0);
7665  return VK_NULL_HANDLE;
7666  }
7667 }
7668 
7669 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7670 {
7671  switch(m_Type)
7672  {
7673  case ALLOCATION_TYPE_BLOCK:
7674  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7675  case ALLOCATION_TYPE_DEDICATED:
7676  return m_DedicatedAllocation.m_MemoryTypeIndex;
7677  default:
7678  VMA_ASSERT(0);
7679  return UINT32_MAX;
7680  }
7681 }
7682 
7683 void* VmaAllocation_T::GetMappedData() const
7684 {
7685  switch(m_Type)
7686  {
7687  case ALLOCATION_TYPE_BLOCK:
7688  if(m_MapCount != 0)
7689  {
7690  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7691  VMA_ASSERT(pBlockData != VMA_NULL);
7692  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7693  }
7694  else
7695  {
7696  return VMA_NULL;
7697  }
7698  break;
7699  case ALLOCATION_TYPE_DEDICATED:
7700  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7701  return m_DedicatedAllocation.m_pMappedData;
7702  default:
7703  VMA_ASSERT(0);
7704  return VMA_NULL;
7705  }
7706 }
7707 
7708 bool VmaAllocation_T::CanBecomeLost() const
7709 {
7710  switch(m_Type)
7711  {
7712  case ALLOCATION_TYPE_BLOCK:
7713  return m_BlockAllocation.m_CanBecomeLost;
7714  case ALLOCATION_TYPE_DEDICATED:
7715  return false;
7716  default:
7717  VMA_ASSERT(0);
7718  return false;
7719  }
7720 }
7721 
7722 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7723 {
7724  VMA_ASSERT(CanBecomeLost());
7725 
7726  /*
7727  Warning: This is a carefully designed algorithm.
7728  Do not modify unless you really know what you're doing :)
7729  */
7730  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7731  for(;;)
7732  {
7733  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7734  {
7735  VMA_ASSERT(0);
7736  return false;
7737  }
7738  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7739  {
7740  return false;
7741  }
7742  else // Last use time earlier than current time.
7743  {
7744  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7745  {
7746  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7747  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7748  return true;
7749  }
7750  }
7751  }
7752 }
7753 
7754 #if VMA_STATS_STRING_ENABLED
7755 
7756 // Correspond to values of enum VmaSuballocationType.
7757 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7758  "FREE",
7759  "UNKNOWN",
7760  "BUFFER",
7761  "IMAGE_UNKNOWN",
7762  "IMAGE_LINEAR",
7763  "IMAGE_OPTIMAL",
7764 };
7765 
7766 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7767 {
7768  json.WriteString("Type");
7769  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7770 
7771  json.WriteString("Size");
7772  json.WriteNumber(m_Size);
7773 
7774  if(m_pUserData != VMA_NULL)
7775  {
7776  json.WriteString("UserData");
7777  if(IsUserDataString())
7778  {
7779  json.WriteString((const char*)m_pUserData);
7780  }
7781  else
7782  {
7783  json.BeginString();
7784  json.ContinueString_Pointer(m_pUserData);
7785  json.EndString();
7786  }
7787  }
7788 
7789  json.WriteString("CreationFrameIndex");
7790  json.WriteNumber(m_CreationFrameIndex);
7791 
7792  json.WriteString("LastUseFrameIndex");
7793  json.WriteNumber(GetLastUseFrameIndex());
7794 
7795  if(m_BufferImageUsage != 0)
7796  {
7797  json.WriteString("Usage");
7798  json.WriteNumber(m_BufferImageUsage);
7799  }
7800 }
7801 
7802 #endif
7803 
7804 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7805 {
7806  VMA_ASSERT(IsUserDataString());
7807  if(m_pUserData != VMA_NULL)
7808  {
7809  char* const oldStr = (char*)m_pUserData;
7810  const size_t oldStrLen = strlen(oldStr);
7811  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7812  m_pUserData = VMA_NULL;
7813  }
7814 }
7815 
7816 void VmaAllocation_T::BlockAllocMap()
7817 {
7818  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7819 
7820  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7821  {
7822  ++m_MapCount;
7823  }
7824  else
7825  {
7826  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7827  }
7828 }
7829 
7830 void VmaAllocation_T::BlockAllocUnmap()
7831 {
7832  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7833 
7834  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7835  {
7836  --m_MapCount;
7837  }
7838  else
7839  {
7840  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7841  }
7842 }
7843 
7844 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7845 {
7846  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7847 
7848  if(m_MapCount != 0)
7849  {
7850  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7851  {
7852  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7853  *ppData = m_DedicatedAllocation.m_pMappedData;
7854  ++m_MapCount;
7855  return VK_SUCCESS;
7856  }
7857  else
7858  {
7859  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7860  return VK_ERROR_MEMORY_MAP_FAILED;
7861  }
7862  }
7863  else
7864  {
7865  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7866  hAllocator->m_hDevice,
7867  m_DedicatedAllocation.m_hMemory,
7868  0, // offset
7869  VK_WHOLE_SIZE,
7870  0, // flags
7871  ppData);
7872  if(result == VK_SUCCESS)
7873  {
7874  m_DedicatedAllocation.m_pMappedData = *ppData;
7875  m_MapCount = 1;
7876  }
7877  return result;
7878  }
7879 }
7880 
7881 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7882 {
7883  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7884 
7885  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7886  {
7887  --m_MapCount;
7888  if(m_MapCount == 0)
7889  {
7890  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7891  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7892  hAllocator->m_hDevice,
7893  m_DedicatedAllocation.m_hMemory);
7894  }
7895  }
7896  else
7897  {
7898  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7899  }
7900 }
7901 
7902 #if VMA_STATS_STRING_ENABLED
7903 
7904 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7905 {
7906  json.BeginObject();
7907 
7908  json.WriteString("Blocks");
7909  json.WriteNumber(stat.blockCount);
7910 
7911  json.WriteString("Allocations");
7912  json.WriteNumber(stat.allocationCount);
7913 
7914  json.WriteString("UnusedRanges");
7915  json.WriteNumber(stat.unusedRangeCount);
7916 
7917  json.WriteString("UsedBytes");
7918  json.WriteNumber(stat.usedBytes);
7919 
7920  json.WriteString("UnusedBytes");
7921  json.WriteNumber(stat.unusedBytes);
7922 
7923  if(stat.allocationCount > 1)
7924  {
7925  json.WriteString("AllocationSize");
7926  json.BeginObject(true);
7927  json.WriteString("Min");
7928  json.WriteNumber(stat.allocationSizeMin);
7929  json.WriteString("Avg");
7930  json.WriteNumber(stat.allocationSizeAvg);
7931  json.WriteString("Max");
7932  json.WriteNumber(stat.allocationSizeMax);
7933  json.EndObject();
7934  }
7935 
7936  if(stat.unusedRangeCount > 1)
7937  {
7938  json.WriteString("UnusedRangeSize");
7939  json.BeginObject(true);
7940  json.WriteString("Min");
7941  json.WriteNumber(stat.unusedRangeSizeMin);
7942  json.WriteString("Avg");
7943  json.WriteNumber(stat.unusedRangeSizeAvg);
7944  json.WriteString("Max");
7945  json.WriteNumber(stat.unusedRangeSizeMax);
7946  json.EndObject();
7947  }
7948 
7949  json.EndObject();
7950 }
7951 
7952 #endif // #if VMA_STATS_STRING_ENABLED
7953 
7954 struct VmaSuballocationItemSizeLess
7955 {
7956  bool operator()(
7957  const VmaSuballocationList::iterator lhs,
7958  const VmaSuballocationList::iterator rhs) const
7959  {
7960  return lhs->size < rhs->size;
7961  }
7962  bool operator()(
7963  const VmaSuballocationList::iterator lhs,
7964  VkDeviceSize rhsSize) const
7965  {
7966  return lhs->size < rhsSize;
7967  }
7968 };
7969 
7970 
7972 // class VmaBlockMetadata
7973 
7974 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7975  m_Size(0),
7976  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7977 {
7978 }
7979 
7980 #if VMA_STATS_STRING_ENABLED
7981 
7982 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7983  VkDeviceSize unusedBytes,
7984  size_t allocationCount,
7985  size_t unusedRangeCount) const
7986 {
7987  json.BeginObject();
7988 
7989  json.WriteString("TotalBytes");
7990  json.WriteNumber(GetSize());
7991 
7992  json.WriteString("UnusedBytes");
7993  json.WriteNumber(unusedBytes);
7994 
7995  json.WriteString("Allocations");
7996  json.WriteNumber((uint64_t)allocationCount);
7997 
7998  json.WriteString("UnusedRanges");
7999  json.WriteNumber((uint64_t)unusedRangeCount);
8000 
8001  json.WriteString("Suballocations");
8002  json.BeginArray();
8003 }
8004 
8005 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8006  VkDeviceSize offset,
8007  VmaAllocation hAllocation) const
8008 {
8009  json.BeginObject(true);
8010 
8011  json.WriteString("Offset");
8012  json.WriteNumber(offset);
8013 
8014  hAllocation->PrintParameters(json);
8015 
8016  json.EndObject();
8017 }
8018 
8019 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8020  VkDeviceSize offset,
8021  VkDeviceSize size) const
8022 {
8023  json.BeginObject(true);
8024 
8025  json.WriteString("Offset");
8026  json.WriteNumber(offset);
8027 
8028  json.WriteString("Type");
8029  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8030 
8031  json.WriteString("Size");
8032  json.WriteNumber(size);
8033 
8034  json.EndObject();
8035 }
8036 
8037 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8038 {
8039  json.EndArray();
8040  json.EndObject();
8041 }
8042 
8043 #endif // #if VMA_STATS_STRING_ENABLED
8044 
8046 // class VmaBlockMetadata_Generic
8047 
8048 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8049  VmaBlockMetadata(hAllocator),
8050  m_FreeCount(0),
8051  m_SumFreeSize(0),
8052  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8053  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8054 {
8055 }
8056 
8057 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8058 {
8059 }
8060 
8061 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8062 {
8063  VmaBlockMetadata::Init(size);
8064 
8065  m_FreeCount = 1;
8066  m_SumFreeSize = size;
8067 
8068  VmaSuballocation suballoc = {};
8069  suballoc.offset = 0;
8070  suballoc.size = size;
8071  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8072  suballoc.hAllocation = VK_NULL_HANDLE;
8073 
8074  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8075  m_Suballocations.push_back(suballoc);
8076  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8077  --suballocItem;
8078  m_FreeSuballocationsBySize.push_back(suballocItem);
8079 }
8080 
8081 bool VmaBlockMetadata_Generic::Validate() const
8082 {
8083  VMA_VALIDATE(!m_Suballocations.empty());
8084 
8085  // Expected offset of new suballocation as calculated from previous ones.
8086  VkDeviceSize calculatedOffset = 0;
8087  // Expected number of free suballocations as calculated from traversing their list.
8088  uint32_t calculatedFreeCount = 0;
8089  // Expected sum size of free suballocations as calculated from traversing their list.
8090  VkDeviceSize calculatedSumFreeSize = 0;
8091  // Expected number of free suballocations that should be registered in
8092  // m_FreeSuballocationsBySize calculated from traversing their list.
8093  size_t freeSuballocationsToRegister = 0;
8094  // True if previous visited suballocation was free.
8095  bool prevFree = false;
8096 
8097  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8098  suballocItem != m_Suballocations.cend();
8099  ++suballocItem)
8100  {
8101  const VmaSuballocation& subAlloc = *suballocItem;
8102 
8103  // Actual offset of this suballocation doesn't match expected one.
8104  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
8105 
8106  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
8107  // Two adjacent free suballocations are invalid. They should be merged.
8108  VMA_VALIDATE(!prevFree || !currFree);
8109 
8110  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
8111 
8112  if(currFree)
8113  {
8114  calculatedSumFreeSize += subAlloc.size;
8115  ++calculatedFreeCount;
8116  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8117  {
8118  ++freeSuballocationsToRegister;
8119  }
8120 
8121  // Margin required between allocations - every free space must be at least that large.
8122  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
8123  }
8124  else
8125  {
8126  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
8127  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
8128 
8129  // Margin required between allocations - previous allocation must be free.
8130  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
8131  }
8132 
8133  calculatedOffset += subAlloc.size;
8134  prevFree = currFree;
8135  }
8136 
8137  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
8138  // match expected one.
8139  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
8140 
8141  VkDeviceSize lastSize = 0;
8142  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
8143  {
8144  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
8145 
8146  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
8147  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8148  // They must be sorted by size ascending.
8149  VMA_VALIDATE(suballocItem->size >= lastSize);
8150 
8151  lastSize = suballocItem->size;
8152  }
8153 
8154  // Check if totals match calculacted values.
8155  VMA_VALIDATE(ValidateFreeSuballocationList());
8156  VMA_VALIDATE(calculatedOffset == GetSize());
8157  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
8158  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
8159 
8160  return true;
8161 }
8162 
8163 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
8164 {
8165  if(!m_FreeSuballocationsBySize.empty())
8166  {
8167  return m_FreeSuballocationsBySize.back()->size;
8168  }
8169  else
8170  {
8171  return 0;
8172  }
8173 }
8174 
8175 bool VmaBlockMetadata_Generic::IsEmpty() const
8176 {
8177  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
8178 }
8179 
8180 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8181 {
8182  outInfo.blockCount = 1;
8183 
8184  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8185  outInfo.allocationCount = rangeCount - m_FreeCount;
8186  outInfo.unusedRangeCount = m_FreeCount;
8187 
8188  outInfo.unusedBytes = m_SumFreeSize;
8189  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
8190 
8191  outInfo.allocationSizeMin = UINT64_MAX;
8192  outInfo.allocationSizeMax = 0;
8193  outInfo.unusedRangeSizeMin = UINT64_MAX;
8194  outInfo.unusedRangeSizeMax = 0;
8195 
8196  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8197  suballocItem != m_Suballocations.cend();
8198  ++suballocItem)
8199  {
8200  const VmaSuballocation& suballoc = *suballocItem;
8201  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8202  {
8203  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8204  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
8205  }
8206  else
8207  {
8208  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
8209  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
8210  }
8211  }
8212 }
8213 
8214 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
8215 {
8216  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
8217 
8218  inoutStats.size += GetSize();
8219  inoutStats.unusedSize += m_SumFreeSize;
8220  inoutStats.allocationCount += rangeCount - m_FreeCount;
8221  inoutStats.unusedRangeCount += m_FreeCount;
8222  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
8223 }
8224 
8225 #if VMA_STATS_STRING_ENABLED
8226 
8227 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
8228 {
8229  PrintDetailedMap_Begin(json,
8230  m_SumFreeSize, // unusedBytes
8231  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
8232  m_FreeCount); // unusedRangeCount
8233 
8234  size_t i = 0;
8235  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
8236  suballocItem != m_Suballocations.cend();
8237  ++suballocItem, ++i)
8238  {
8239  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8240  {
8241  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
8242  }
8243  else
8244  {
8245  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
8246  }
8247  }
8248 
8249  PrintDetailedMap_End(json);
8250 }
8251 
8252 #endif // #if VMA_STATS_STRING_ENABLED
8253 
8254 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
8255  uint32_t currentFrameIndex,
8256  uint32_t frameInUseCount,
8257  VkDeviceSize bufferImageGranularity,
8258  VkDeviceSize allocSize,
8259  VkDeviceSize allocAlignment,
8260  bool upperAddress,
8261  VmaSuballocationType allocType,
8262  bool canMakeOtherLost,
8263  uint32_t strategy,
8264  VmaAllocationRequest* pAllocationRequest)
8265 {
8266  VMA_ASSERT(allocSize > 0);
8267  VMA_ASSERT(!upperAddress);
8268  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8269  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8270  VMA_HEAVY_ASSERT(Validate());
8271 
8272  pAllocationRequest->type = VmaAllocationRequestType::Normal;
8273 
8274  // There is not enough total free space in this block to fullfill the request: Early return.
8275  if(canMakeOtherLost == false &&
8276  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
8277  {
8278  return false;
8279  }
8280 
8281  // New algorithm, efficiently searching freeSuballocationsBySize.
8282  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
8283  if(freeSuballocCount > 0)
8284  {
8286  {
8287  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
8288  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8289  m_FreeSuballocationsBySize.data(),
8290  m_FreeSuballocationsBySize.data() + freeSuballocCount,
8291  allocSize + 2 * VMA_DEBUG_MARGIN,
8292  VmaSuballocationItemSizeLess());
8293  size_t index = it - m_FreeSuballocationsBySize.data();
8294  for(; index < freeSuballocCount; ++index)
8295  {
8296  if(CheckAllocation(
8297  currentFrameIndex,
8298  frameInUseCount,
8299  bufferImageGranularity,
8300  allocSize,
8301  allocAlignment,
8302  allocType,
8303  m_FreeSuballocationsBySize[index],
8304  false, // canMakeOtherLost
8305  &pAllocationRequest->offset,
8306  &pAllocationRequest->itemsToMakeLostCount,
8307  &pAllocationRequest->sumFreeSize,
8308  &pAllocationRequest->sumItemSize))
8309  {
8310  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8311  return true;
8312  }
8313  }
8314  }
8315  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
8316  {
8317  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8318  it != m_Suballocations.end();
8319  ++it)
8320  {
8321  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
8322  currentFrameIndex,
8323  frameInUseCount,
8324  bufferImageGranularity,
8325  allocSize,
8326  allocAlignment,
8327  allocType,
8328  it,
8329  false, // canMakeOtherLost
8330  &pAllocationRequest->offset,
8331  &pAllocationRequest->itemsToMakeLostCount,
8332  &pAllocationRequest->sumFreeSize,
8333  &pAllocationRequest->sumItemSize))
8334  {
8335  pAllocationRequest->item = it;
8336  return true;
8337  }
8338  }
8339  }
8340  else // WORST_FIT, FIRST_FIT
8341  {
8342  // Search staring from biggest suballocations.
8343  for(size_t index = freeSuballocCount; index--; )
8344  {
8345  if(CheckAllocation(
8346  currentFrameIndex,
8347  frameInUseCount,
8348  bufferImageGranularity,
8349  allocSize,
8350  allocAlignment,
8351  allocType,
8352  m_FreeSuballocationsBySize[index],
8353  false, // canMakeOtherLost
8354  &pAllocationRequest->offset,
8355  &pAllocationRequest->itemsToMakeLostCount,
8356  &pAllocationRequest->sumFreeSize,
8357  &pAllocationRequest->sumItemSize))
8358  {
8359  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
8360  return true;
8361  }
8362  }
8363  }
8364  }
8365 
8366  if(canMakeOtherLost)
8367  {
8368  // Brute-force algorithm. TODO: Come up with something better.
8369 
8370  bool found = false;
8371  VmaAllocationRequest tmpAllocRequest = {};
8372  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
8373  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
8374  suballocIt != m_Suballocations.end();
8375  ++suballocIt)
8376  {
8377  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
8378  suballocIt->hAllocation->CanBecomeLost())
8379  {
8380  if(CheckAllocation(
8381  currentFrameIndex,
8382  frameInUseCount,
8383  bufferImageGranularity,
8384  allocSize,
8385  allocAlignment,
8386  allocType,
8387  suballocIt,
8388  canMakeOtherLost,
8389  &tmpAllocRequest.offset,
8390  &tmpAllocRequest.itemsToMakeLostCount,
8391  &tmpAllocRequest.sumFreeSize,
8392  &tmpAllocRequest.sumItemSize))
8393  {
8395  {
8396  *pAllocationRequest = tmpAllocRequest;
8397  pAllocationRequest->item = suballocIt;
8398  break;
8399  }
8400  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
8401  {
8402  *pAllocationRequest = tmpAllocRequest;
8403  pAllocationRequest->item = suballocIt;
8404  found = true;
8405  }
8406  }
8407  }
8408  }
8409 
8410  return found;
8411  }
8412 
8413  return false;
8414 }
8415 
8416 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
8417  uint32_t currentFrameIndex,
8418  uint32_t frameInUseCount,
8419  VmaAllocationRequest* pAllocationRequest)
8420 {
8421  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
8422 
8423  while(pAllocationRequest->itemsToMakeLostCount > 0)
8424  {
8425  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
8426  {
8427  ++pAllocationRequest->item;
8428  }
8429  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8430  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
8431  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
8432  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8433  {
8434  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
8435  --pAllocationRequest->itemsToMakeLostCount;
8436  }
8437  else
8438  {
8439  return false;
8440  }
8441  }
8442 
8443  VMA_HEAVY_ASSERT(Validate());
8444  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
8445  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
8446 
8447  return true;
8448 }
8449 
8450 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8451 {
8452  uint32_t lostAllocationCount = 0;
8453  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8454  it != m_Suballocations.end();
8455  ++it)
8456  {
8457  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
8458  it->hAllocation->CanBecomeLost() &&
8459  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8460  {
8461  it = FreeSuballocation(it);
8462  ++lostAllocationCount;
8463  }
8464  }
8465  return lostAllocationCount;
8466 }
8467 
8468 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
8469 {
8470  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
8471  it != m_Suballocations.end();
8472  ++it)
8473  {
8474  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
8475  {
8476  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
8477  {
8478  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8479  return VK_ERROR_VALIDATION_FAILED_EXT;
8480  }
8481  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
8482  {
8483  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8484  return VK_ERROR_VALIDATION_FAILED_EXT;
8485  }
8486  }
8487  }
8488 
8489  return VK_SUCCESS;
8490 }
8491 
8492 void VmaBlockMetadata_Generic::Alloc(
8493  const VmaAllocationRequest& request,
8494  VmaSuballocationType type,
8495  VkDeviceSize allocSize,
8496  VmaAllocation hAllocation)
8497 {
8498  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
8499  VMA_ASSERT(request.item != m_Suballocations.end());
8500  VmaSuballocation& suballoc = *request.item;
8501  // Given suballocation is a free block.
8502  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8503  // Given offset is inside this suballocation.
8504  VMA_ASSERT(request.offset >= suballoc.offset);
8505  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
8506  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
8507  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
8508 
8509  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
8510  // it to become used.
8511  UnregisterFreeSuballocation(request.item);
8512 
8513  suballoc.offset = request.offset;
8514  suballoc.size = allocSize;
8515  suballoc.type = type;
8516  suballoc.hAllocation = hAllocation;
8517 
8518  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
8519  if(paddingEnd)
8520  {
8521  VmaSuballocation paddingSuballoc = {};
8522  paddingSuballoc.offset = request.offset + allocSize;
8523  paddingSuballoc.size = paddingEnd;
8524  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8525  VmaSuballocationList::iterator next = request.item;
8526  ++next;
8527  const VmaSuballocationList::iterator paddingEndItem =
8528  m_Suballocations.insert(next, paddingSuballoc);
8529  RegisterFreeSuballocation(paddingEndItem);
8530  }
8531 
8532  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
8533  if(paddingBegin)
8534  {
8535  VmaSuballocation paddingSuballoc = {};
8536  paddingSuballoc.offset = request.offset - paddingBegin;
8537  paddingSuballoc.size = paddingBegin;
8538  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8539  const VmaSuballocationList::iterator paddingBeginItem =
8540  m_Suballocations.insert(request.item, paddingSuballoc);
8541  RegisterFreeSuballocation(paddingBeginItem);
8542  }
8543 
8544  // Update totals.
8545  m_FreeCount = m_FreeCount - 1;
8546  if(paddingBegin > 0)
8547  {
8548  ++m_FreeCount;
8549  }
8550  if(paddingEnd > 0)
8551  {
8552  ++m_FreeCount;
8553  }
8554  m_SumFreeSize -= allocSize;
8555 }
8556 
8557 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8558 {
8559  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8560  suballocItem != m_Suballocations.end();
8561  ++suballocItem)
8562  {
8563  VmaSuballocation& suballoc = *suballocItem;
8564  if(suballoc.hAllocation == allocation)
8565  {
8566  FreeSuballocation(suballocItem);
8567  VMA_HEAVY_ASSERT(Validate());
8568  return;
8569  }
8570  }
8571  VMA_ASSERT(0 && "Not found!");
8572 }
8573 
8574 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8575 {
8576  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8577  suballocItem != m_Suballocations.end();
8578  ++suballocItem)
8579  {
8580  VmaSuballocation& suballoc = *suballocItem;
8581  if(suballoc.offset == offset)
8582  {
8583  FreeSuballocation(suballocItem);
8584  return;
8585  }
8586  }
8587  VMA_ASSERT(0 && "Not found!");
8588 }
8589 
8590 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8591 {
8592  VkDeviceSize lastSize = 0;
8593  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8594  {
8595  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8596 
8597  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8598  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8599  VMA_VALIDATE(it->size >= lastSize);
8600  lastSize = it->size;
8601  }
8602  return true;
8603 }
8604 
8605 bool VmaBlockMetadata_Generic::CheckAllocation(
8606  uint32_t currentFrameIndex,
8607  uint32_t frameInUseCount,
8608  VkDeviceSize bufferImageGranularity,
8609  VkDeviceSize allocSize,
8610  VkDeviceSize allocAlignment,
8611  VmaSuballocationType allocType,
8612  VmaSuballocationList::const_iterator suballocItem,
8613  bool canMakeOtherLost,
8614  VkDeviceSize* pOffset,
8615  size_t* itemsToMakeLostCount,
8616  VkDeviceSize* pSumFreeSize,
8617  VkDeviceSize* pSumItemSize) const
8618 {
8619  VMA_ASSERT(allocSize > 0);
8620  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8621  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8622  VMA_ASSERT(pOffset != VMA_NULL);
8623 
8624  *itemsToMakeLostCount = 0;
8625  *pSumFreeSize = 0;
8626  *pSumItemSize = 0;
8627 
8628  if(canMakeOtherLost)
8629  {
8630  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8631  {
8632  *pSumFreeSize = suballocItem->size;
8633  }
8634  else
8635  {
8636  if(suballocItem->hAllocation->CanBecomeLost() &&
8637  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8638  {
8639  ++*itemsToMakeLostCount;
8640  *pSumItemSize = suballocItem->size;
8641  }
8642  else
8643  {
8644  return false;
8645  }
8646  }
8647 
8648  // Remaining size is too small for this request: Early return.
8649  if(GetSize() - suballocItem->offset < allocSize)
8650  {
8651  return false;
8652  }
8653 
8654  // Start from offset equal to beginning of this suballocation.
8655  *pOffset = suballocItem->offset;
8656 
8657  // Apply VMA_DEBUG_MARGIN at the beginning.
8658  if(VMA_DEBUG_MARGIN > 0)
8659  {
8660  *pOffset += VMA_DEBUG_MARGIN;
8661  }
8662 
8663  // Apply alignment.
8664  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8665 
8666  // Check previous suballocations for BufferImageGranularity conflicts.
8667  // Make bigger alignment if necessary.
8668  if(bufferImageGranularity > 1)
8669  {
8670  bool bufferImageGranularityConflict = false;
8671  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8672  while(prevSuballocItem != m_Suballocations.cbegin())
8673  {
8674  --prevSuballocItem;
8675  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8676  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8677  {
8678  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8679  {
8680  bufferImageGranularityConflict = true;
8681  break;
8682  }
8683  }
8684  else
8685  // Already on previous page.
8686  break;
8687  }
8688  if(bufferImageGranularityConflict)
8689  {
8690  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8691  }
8692  }
8693 
8694  // Now that we have final *pOffset, check if we are past suballocItem.
8695  // If yes, return false - this function should be called for another suballocItem as starting point.
8696  if(*pOffset >= suballocItem->offset + suballocItem->size)
8697  {
8698  return false;
8699  }
8700 
8701  // Calculate padding at the beginning based on current offset.
8702  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8703 
8704  // Calculate required margin at the end.
8705  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8706 
8707  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8708  // Another early return check.
8709  if(suballocItem->offset + totalSize > GetSize())
8710  {
8711  return false;
8712  }
8713 
8714  // Advance lastSuballocItem until desired size is reached.
8715  // Update itemsToMakeLostCount.
8716  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8717  if(totalSize > suballocItem->size)
8718  {
8719  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8720  while(remainingSize > 0)
8721  {
8722  ++lastSuballocItem;
8723  if(lastSuballocItem == m_Suballocations.cend())
8724  {
8725  return false;
8726  }
8727  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8728  {
8729  *pSumFreeSize += lastSuballocItem->size;
8730  }
8731  else
8732  {
8733  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8734  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8735  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8736  {
8737  ++*itemsToMakeLostCount;
8738  *pSumItemSize += lastSuballocItem->size;
8739  }
8740  else
8741  {
8742  return false;
8743  }
8744  }
8745  remainingSize = (lastSuballocItem->size < remainingSize) ?
8746  remainingSize - lastSuballocItem->size : 0;
8747  }
8748  }
8749 
8750  // Check next suballocations for BufferImageGranularity conflicts.
8751  // If conflict exists, we must mark more allocations lost or fail.
8752  if(bufferImageGranularity > 1)
8753  {
8754  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8755  ++nextSuballocItem;
8756  while(nextSuballocItem != m_Suballocations.cend())
8757  {
8758  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8759  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8760  {
8761  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8762  {
8763  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8764  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8765  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8766  {
8767  ++*itemsToMakeLostCount;
8768  }
8769  else
8770  {
8771  return false;
8772  }
8773  }
8774  }
8775  else
8776  {
8777  // Already on next page.
8778  break;
8779  }
8780  ++nextSuballocItem;
8781  }
8782  }
8783  }
8784  else
8785  {
8786  const VmaSuballocation& suballoc = *suballocItem;
8787  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8788 
8789  *pSumFreeSize = suballoc.size;
8790 
8791  // Size of this suballocation is too small for this request: Early return.
8792  if(suballoc.size < allocSize)
8793  {
8794  return false;
8795  }
8796 
8797  // Start from offset equal to beginning of this suballocation.
8798  *pOffset = suballoc.offset;
8799 
8800  // Apply VMA_DEBUG_MARGIN at the beginning.
8801  if(VMA_DEBUG_MARGIN > 0)
8802  {
8803  *pOffset += VMA_DEBUG_MARGIN;
8804  }
8805 
8806  // Apply alignment.
8807  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8808 
8809  // Check previous suballocations for BufferImageGranularity conflicts.
8810  // Make bigger alignment if necessary.
8811  if(bufferImageGranularity > 1)
8812  {
8813  bool bufferImageGranularityConflict = false;
8814  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8815  while(prevSuballocItem != m_Suballocations.cbegin())
8816  {
8817  --prevSuballocItem;
8818  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8820  {
8821  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8822  {
8823  bufferImageGranularityConflict = true;
8824  break;
8825  }
8826  }
8827  else
8828  // Already on previous page.
8829  break;
8830  }
8831  if(bufferImageGranularityConflict)
8832  {
8833  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8834  }
8835  }
8836 
8837  // Calculate padding at the beginning based on current offset.
8838  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8839 
8840  // Calculate required margin at the end.
8841  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8842 
8843  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8844  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8845  {
8846  return false;
8847  }
8848 
8849  // Check next suballocations for BufferImageGranularity conflicts.
8850  // If conflict exists, allocation cannot be made here.
8851  if(bufferImageGranularity > 1)
8852  {
8853  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8854  ++nextSuballocItem;
8855  while(nextSuballocItem != m_Suballocations.cend())
8856  {
8857  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8858  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8859  {
8860  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8861  {
8862  return false;
8863  }
8864  }
8865  else
8866  {
8867  // Already on next page.
8868  break;
8869  }
8870  ++nextSuballocItem;
8871  }
8872  }
8873  }
8874 
8875  // All tests passed: Success. pOffset is already filled.
8876  return true;
8877 }
8878 
8879 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8880 {
8881  VMA_ASSERT(item != m_Suballocations.end());
8882  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8883 
8884  VmaSuballocationList::iterator nextItem = item;
8885  ++nextItem;
8886  VMA_ASSERT(nextItem != m_Suballocations.end());
8887  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8888 
8889  item->size += nextItem->size;
8890  --m_FreeCount;
8891  m_Suballocations.erase(nextItem);
8892 }
8893 
8894 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8895 {
8896  // Change this suballocation to be marked as free.
8897  VmaSuballocation& suballoc = *suballocItem;
8898  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8899  suballoc.hAllocation = VK_NULL_HANDLE;
8900 
8901  // Update totals.
8902  ++m_FreeCount;
8903  m_SumFreeSize += suballoc.size;
8904 
8905  // Merge with previous and/or next suballocation if it's also free.
8906  bool mergeWithNext = false;
8907  bool mergeWithPrev = false;
8908 
8909  VmaSuballocationList::iterator nextItem = suballocItem;
8910  ++nextItem;
8911  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8912  {
8913  mergeWithNext = true;
8914  }
8915 
8916  VmaSuballocationList::iterator prevItem = suballocItem;
8917  if(suballocItem != m_Suballocations.begin())
8918  {
8919  --prevItem;
8920  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8921  {
8922  mergeWithPrev = true;
8923  }
8924  }
8925 
8926  if(mergeWithNext)
8927  {
8928  UnregisterFreeSuballocation(nextItem);
8929  MergeFreeWithNext(suballocItem);
8930  }
8931 
8932  if(mergeWithPrev)
8933  {
8934  UnregisterFreeSuballocation(prevItem);
8935  MergeFreeWithNext(prevItem);
8936  RegisterFreeSuballocation(prevItem);
8937  return prevItem;
8938  }
8939  else
8940  {
8941  RegisterFreeSuballocation(suballocItem);
8942  return suballocItem;
8943  }
8944 }
8945 
8946 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8947 {
8948  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8949  VMA_ASSERT(item->size > 0);
8950 
8951  // You may want to enable this validation at the beginning or at the end of
8952  // this function, depending on what do you want to check.
8953  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8954 
8955  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8956  {
8957  if(m_FreeSuballocationsBySize.empty())
8958  {
8959  m_FreeSuballocationsBySize.push_back(item);
8960  }
8961  else
8962  {
8963  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8964  }
8965  }
8966 
8967  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8968 }
8969 
8970 
8971 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8972 {
8973  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8974  VMA_ASSERT(item->size > 0);
8975 
8976  // You may want to enable this validation at the beginning or at the end of
8977  // this function, depending on what do you want to check.
8978  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8979 
8980  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8981  {
8982  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8983  m_FreeSuballocationsBySize.data(),
8984  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8985  item,
8986  VmaSuballocationItemSizeLess());
8987  for(size_t index = it - m_FreeSuballocationsBySize.data();
8988  index < m_FreeSuballocationsBySize.size();
8989  ++index)
8990  {
8991  if(m_FreeSuballocationsBySize[index] == item)
8992  {
8993  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8994  return;
8995  }
8996  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8997  }
8998  VMA_ASSERT(0 && "Not found.");
8999  }
9000 
9001  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9002 }
9003 
9004 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9005  VkDeviceSize bufferImageGranularity,
9006  VmaSuballocationType& inOutPrevSuballocType) const
9007 {
9008  if(bufferImageGranularity == 1 || IsEmpty())
9009  {
9010  return false;
9011  }
9012 
9013  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9014  bool typeConflictFound = false;
9015  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9016  it != m_Suballocations.cend();
9017  ++it)
9018  {
9019  const VmaSuballocationType suballocType = it->type;
9020  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9021  {
9022  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9023  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9024  {
9025  typeConflictFound = true;
9026  }
9027  inOutPrevSuballocType = suballocType;
9028  }
9029  }
9030 
9031  return typeConflictFound || minAlignment >= bufferImageGranularity;
9032 }
9033 
9035 // class VmaBlockMetadata_Linear
9036 
9037 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9038  VmaBlockMetadata(hAllocator),
9039  m_SumFreeSize(0),
9040  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9041  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9042  m_1stVectorIndex(0),
9043  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9044  m_1stNullItemsBeginCount(0),
9045  m_1stNullItemsMiddleCount(0),
9046  m_2ndNullItemsCount(0)
9047 {
9048 }
9049 
9050 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9051 {
9052 }
9053 
9054 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9055 {
9056  VmaBlockMetadata::Init(size);
9057  m_SumFreeSize = size;
9058 }
9059 
9060 bool VmaBlockMetadata_Linear::Validate() const
9061 {
9062  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9063  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9064 
9065  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9066  VMA_VALIDATE(!suballocations1st.empty() ||
9067  suballocations2nd.empty() ||
9068  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9069 
9070  if(!suballocations1st.empty())
9071  {
9072  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9073  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9074  // Null item at the end should be just pop_back().
9075  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9076  }
9077  if(!suballocations2nd.empty())
9078  {
9079  // Null item at the end should be just pop_back().
9080  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
9081  }
9082 
9083  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
9084  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
9085 
9086  VkDeviceSize sumUsedSize = 0;
9087  const size_t suballoc1stCount = suballocations1st.size();
9088  VkDeviceSize offset = VMA_DEBUG_MARGIN;
9089 
9090  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9091  {
9092  const size_t suballoc2ndCount = suballocations2nd.size();
9093  size_t nullItem2ndCount = 0;
9094  for(size_t i = 0; i < suballoc2ndCount; ++i)
9095  {
9096  const VmaSuballocation& suballoc = suballocations2nd[i];
9097  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9098 
9099  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9100  VMA_VALIDATE(suballoc.offset >= offset);
9101 
9102  if(!currFree)
9103  {
9104  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9105  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9106  sumUsedSize += suballoc.size;
9107  }
9108  else
9109  {
9110  ++nullItem2ndCount;
9111  }
9112 
9113  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9114  }
9115 
9116  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9117  }
9118 
9119  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
9120  {
9121  const VmaSuballocation& suballoc = suballocations1st[i];
9122  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
9123  suballoc.hAllocation == VK_NULL_HANDLE);
9124  }
9125 
9126  size_t nullItem1stCount = m_1stNullItemsBeginCount;
9127 
9128  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
9129  {
9130  const VmaSuballocation& suballoc = suballocations1st[i];
9131  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9132 
9133  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9134  VMA_VALIDATE(suballoc.offset >= offset);
9135  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
9136 
9137  if(!currFree)
9138  {
9139  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9140  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9141  sumUsedSize += suballoc.size;
9142  }
9143  else
9144  {
9145  ++nullItem1stCount;
9146  }
9147 
9148  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9149  }
9150  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
9151 
9152  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9153  {
9154  const size_t suballoc2ndCount = suballocations2nd.size();
9155  size_t nullItem2ndCount = 0;
9156  for(size_t i = suballoc2ndCount; i--; )
9157  {
9158  const VmaSuballocation& suballoc = suballocations2nd[i];
9159  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9160 
9161  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
9162  VMA_VALIDATE(suballoc.offset >= offset);
9163 
9164  if(!currFree)
9165  {
9166  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
9167  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
9168  sumUsedSize += suballoc.size;
9169  }
9170  else
9171  {
9172  ++nullItem2ndCount;
9173  }
9174 
9175  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
9176  }
9177 
9178  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
9179  }
9180 
9181  VMA_VALIDATE(offset <= GetSize());
9182  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
9183 
9184  return true;
9185 }
9186 
9187 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
9188 {
9189  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
9190  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
9191 }
9192 
9193 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
9194 {
9195  const VkDeviceSize size = GetSize();
9196 
9197  /*
9198  We don't consider gaps inside allocation vectors with freed allocations because
9199  they are not suitable for reuse in linear allocator. We consider only space that
9200  is available for new allocations.
9201  */
9202  if(IsEmpty())
9203  {
9204  return size;
9205  }
9206 
9207  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9208 
9209  switch(m_2ndVectorMode)
9210  {
9211  case SECOND_VECTOR_EMPTY:
9212  /*
9213  Available space is after end of 1st, as well as before beginning of 1st (which
9214  whould make it a ring buffer).
9215  */
9216  {
9217  const size_t suballocations1stCount = suballocations1st.size();
9218  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
9219  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9220  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
9221  return VMA_MAX(
9222  firstSuballoc.offset,
9223  size - (lastSuballoc.offset + lastSuballoc.size));
9224  }
9225  break;
9226 
9227  case SECOND_VECTOR_RING_BUFFER:
9228  /*
9229  Available space is only between end of 2nd and beginning of 1st.
9230  */
9231  {
9232  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9233  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
9234  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
9235  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
9236  }
9237  break;
9238 
9239  case SECOND_VECTOR_DOUBLE_STACK:
9240  /*
9241  Available space is only between end of 1st and top of 2nd.
9242  */
9243  {
9244  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9245  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
9246  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
9247  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
9248  }
9249  break;
9250 
9251  default:
9252  VMA_ASSERT(0);
9253  return 0;
9254  }
9255 }
9256 
9257 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9258 {
9259  const VkDeviceSize size = GetSize();
9260  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9261  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9262  const size_t suballoc1stCount = suballocations1st.size();
9263  const size_t suballoc2ndCount = suballocations2nd.size();
9264 
9265  outInfo.blockCount = 1;
9266  outInfo.allocationCount = (uint32_t)GetAllocationCount();
9267  outInfo.unusedRangeCount = 0;
9268  outInfo.usedBytes = 0;
9269  outInfo.allocationSizeMin = UINT64_MAX;
9270  outInfo.allocationSizeMax = 0;
9271  outInfo.unusedRangeSizeMin = UINT64_MAX;
9272  outInfo.unusedRangeSizeMax = 0;
9273 
9274  VkDeviceSize lastOffset = 0;
9275 
9276  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9277  {
9278  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9279  size_t nextAlloc2ndIndex = 0;
9280  while(lastOffset < freeSpace2ndTo1stEnd)
9281  {
9282  // Find next non-null allocation or move nextAllocIndex to the end.
9283  while(nextAlloc2ndIndex < suballoc2ndCount &&
9284  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9285  {
9286  ++nextAlloc2ndIndex;
9287  }
9288 
9289  // Found non-null allocation.
9290  if(nextAlloc2ndIndex < suballoc2ndCount)
9291  {
9292  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9293 
9294  // 1. Process free space before this allocation.
9295  if(lastOffset < suballoc.offset)
9296  {
9297  // There is free space from lastOffset to suballoc.offset.
9298  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9299  ++outInfo.unusedRangeCount;
9300  outInfo.unusedBytes += unusedRangeSize;
9301  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9302  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9303  }
9304 
9305  // 2. Process this allocation.
9306  // There is allocation with suballoc.offset, suballoc.size.
9307  outInfo.usedBytes += suballoc.size;
9308  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9309  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9310 
9311  // 3. Prepare for next iteration.
9312  lastOffset = suballoc.offset + suballoc.size;
9313  ++nextAlloc2ndIndex;
9314  }
9315  // We are at the end.
9316  else
9317  {
9318  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9319  if(lastOffset < freeSpace2ndTo1stEnd)
9320  {
9321  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9322  ++outInfo.unusedRangeCount;
9323  outInfo.unusedBytes += unusedRangeSize;
9324  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9325  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9326  }
9327 
9328  // End of loop.
9329  lastOffset = freeSpace2ndTo1stEnd;
9330  }
9331  }
9332  }
9333 
9334  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9335  const VkDeviceSize freeSpace1stTo2ndEnd =
9336  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9337  while(lastOffset < freeSpace1stTo2ndEnd)
9338  {
9339  // Find next non-null allocation or move nextAllocIndex to the end.
9340  while(nextAlloc1stIndex < suballoc1stCount &&
9341  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9342  {
9343  ++nextAlloc1stIndex;
9344  }
9345 
9346  // Found non-null allocation.
9347  if(nextAlloc1stIndex < suballoc1stCount)
9348  {
9349  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9350 
9351  // 1. Process free space before this allocation.
9352  if(lastOffset < suballoc.offset)
9353  {
9354  // There is free space from lastOffset to suballoc.offset.
9355  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9356  ++outInfo.unusedRangeCount;
9357  outInfo.unusedBytes += unusedRangeSize;
9358  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9359  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9360  }
9361 
9362  // 2. Process this allocation.
9363  // There is allocation with suballoc.offset, suballoc.size.
9364  outInfo.usedBytes += suballoc.size;
9365  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9366  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9367 
9368  // 3. Prepare for next iteration.
9369  lastOffset = suballoc.offset + suballoc.size;
9370  ++nextAlloc1stIndex;
9371  }
9372  // We are at the end.
9373  else
9374  {
9375  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9376  if(lastOffset < freeSpace1stTo2ndEnd)
9377  {
9378  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9379  ++outInfo.unusedRangeCount;
9380  outInfo.unusedBytes += unusedRangeSize;
9381  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9382  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9383  }
9384 
9385  // End of loop.
9386  lastOffset = freeSpace1stTo2ndEnd;
9387  }
9388  }
9389 
9390  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9391  {
9392  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9393  while(lastOffset < size)
9394  {
9395  // Find next non-null allocation or move nextAllocIndex to the end.
9396  while(nextAlloc2ndIndex != SIZE_MAX &&
9397  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9398  {
9399  --nextAlloc2ndIndex;
9400  }
9401 
9402  // Found non-null allocation.
9403  if(nextAlloc2ndIndex != SIZE_MAX)
9404  {
9405  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9406 
9407  // 1. Process free space before this allocation.
9408  if(lastOffset < suballoc.offset)
9409  {
9410  // There is free space from lastOffset to suballoc.offset.
9411  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9412  ++outInfo.unusedRangeCount;
9413  outInfo.unusedBytes += unusedRangeSize;
9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9415  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9416  }
9417 
9418  // 2. Process this allocation.
9419  // There is allocation with suballoc.offset, suballoc.size.
9420  outInfo.usedBytes += suballoc.size;
9421  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9422  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9423 
9424  // 3. Prepare for next iteration.
9425  lastOffset = suballoc.offset + suballoc.size;
9426  --nextAlloc2ndIndex;
9427  }
9428  // We are at the end.
9429  else
9430  {
9431  // There is free space from lastOffset to size.
9432  if(lastOffset < size)
9433  {
9434  const VkDeviceSize unusedRangeSize = size - lastOffset;
9435  ++outInfo.unusedRangeCount;
9436  outInfo.unusedBytes += unusedRangeSize;
9437  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9438  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9439  }
9440 
9441  // End of loop.
9442  lastOffset = size;
9443  }
9444  }
9445  }
9446 
9447  outInfo.unusedBytes = size - outInfo.usedBytes;
9448 }
9449 
9450 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9451 {
9452  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9453  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9454  const VkDeviceSize size = GetSize();
9455  const size_t suballoc1stCount = suballocations1st.size();
9456  const size_t suballoc2ndCount = suballocations2nd.size();
9457 
9458  inoutStats.size += size;
9459 
9460  VkDeviceSize lastOffset = 0;
9461 
9462  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9463  {
9464  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9465  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9466  while(lastOffset < freeSpace2ndTo1stEnd)
9467  {
9468  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9469  while(nextAlloc2ndIndex < suballoc2ndCount &&
9470  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9471  {
9472  ++nextAlloc2ndIndex;
9473  }
9474 
9475  // Found non-null allocation.
9476  if(nextAlloc2ndIndex < suballoc2ndCount)
9477  {
9478  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9479 
9480  // 1. Process free space before this allocation.
9481  if(lastOffset < suballoc.offset)
9482  {
9483  // There is free space from lastOffset to suballoc.offset.
9484  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9485  inoutStats.unusedSize += unusedRangeSize;
9486  ++inoutStats.unusedRangeCount;
9487  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9488  }
9489 
9490  // 2. Process this allocation.
9491  // There is allocation with suballoc.offset, suballoc.size.
9492  ++inoutStats.allocationCount;
9493 
9494  // 3. Prepare for next iteration.
9495  lastOffset = suballoc.offset + suballoc.size;
9496  ++nextAlloc2ndIndex;
9497  }
9498  // We are at the end.
9499  else
9500  {
9501  if(lastOffset < freeSpace2ndTo1stEnd)
9502  {
9503  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9504  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9505  inoutStats.unusedSize += unusedRangeSize;
9506  ++inoutStats.unusedRangeCount;
9507  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9508  }
9509 
9510  // End of loop.
9511  lastOffset = freeSpace2ndTo1stEnd;
9512  }
9513  }
9514  }
9515 
9516  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9517  const VkDeviceSize freeSpace1stTo2ndEnd =
9518  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9519  while(lastOffset < freeSpace1stTo2ndEnd)
9520  {
9521  // Find next non-null allocation or move nextAllocIndex to the end.
9522  while(nextAlloc1stIndex < suballoc1stCount &&
9523  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9524  {
9525  ++nextAlloc1stIndex;
9526  }
9527 
9528  // Found non-null allocation.
9529  if(nextAlloc1stIndex < suballoc1stCount)
9530  {
9531  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9532 
9533  // 1. Process free space before this allocation.
9534  if(lastOffset < suballoc.offset)
9535  {
9536  // There is free space from lastOffset to suballoc.offset.
9537  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9538  inoutStats.unusedSize += unusedRangeSize;
9539  ++inoutStats.unusedRangeCount;
9540  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9541  }
9542 
9543  // 2. Process this allocation.
9544  // There is allocation with suballoc.offset, suballoc.size.
9545  ++inoutStats.allocationCount;
9546 
9547  // 3. Prepare for next iteration.
9548  lastOffset = suballoc.offset + suballoc.size;
9549  ++nextAlloc1stIndex;
9550  }
9551  // We are at the end.
9552  else
9553  {
9554  if(lastOffset < freeSpace1stTo2ndEnd)
9555  {
9556  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9557  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9558  inoutStats.unusedSize += unusedRangeSize;
9559  ++inoutStats.unusedRangeCount;
9560  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9561  }
9562 
9563  // End of loop.
9564  lastOffset = freeSpace1stTo2ndEnd;
9565  }
9566  }
9567 
9568  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9569  {
9570  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9571  while(lastOffset < size)
9572  {
9573  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9574  while(nextAlloc2ndIndex != SIZE_MAX &&
9575  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9576  {
9577  --nextAlloc2ndIndex;
9578  }
9579 
9580  // Found non-null allocation.
9581  if(nextAlloc2ndIndex != SIZE_MAX)
9582  {
9583  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9584 
9585  // 1. Process free space before this allocation.
9586  if(lastOffset < suballoc.offset)
9587  {
9588  // There is free space from lastOffset to suballoc.offset.
9589  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9590  inoutStats.unusedSize += unusedRangeSize;
9591  ++inoutStats.unusedRangeCount;
9592  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9593  }
9594 
9595  // 2. Process this allocation.
9596  // There is allocation with suballoc.offset, suballoc.size.
9597  ++inoutStats.allocationCount;
9598 
9599  // 3. Prepare for next iteration.
9600  lastOffset = suballoc.offset + suballoc.size;
9601  --nextAlloc2ndIndex;
9602  }
9603  // We are at the end.
9604  else
9605  {
9606  if(lastOffset < size)
9607  {
9608  // There is free space from lastOffset to size.
9609  const VkDeviceSize unusedRangeSize = size - lastOffset;
9610  inoutStats.unusedSize += unusedRangeSize;
9611  ++inoutStats.unusedRangeCount;
9612  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9613  }
9614 
9615  // End of loop.
9616  lastOffset = size;
9617  }
9618  }
9619  }
9620 }
9621 
9622 #if VMA_STATS_STRING_ENABLED
9623 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9624 {
9625  const VkDeviceSize size = GetSize();
9626  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9627  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9628  const size_t suballoc1stCount = suballocations1st.size();
9629  const size_t suballoc2ndCount = suballocations2nd.size();
9630 
9631  // FIRST PASS
9632 
9633  size_t unusedRangeCount = 0;
9634  VkDeviceSize usedBytes = 0;
9635 
9636  VkDeviceSize lastOffset = 0;
9637 
9638  size_t alloc2ndCount = 0;
9639  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9640  {
9641  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9642  size_t nextAlloc2ndIndex = 0;
9643  while(lastOffset < freeSpace2ndTo1stEnd)
9644  {
9645  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9646  while(nextAlloc2ndIndex < suballoc2ndCount &&
9647  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9648  {
9649  ++nextAlloc2ndIndex;
9650  }
9651 
9652  // Found non-null allocation.
9653  if(nextAlloc2ndIndex < suballoc2ndCount)
9654  {
9655  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9656 
9657  // 1. Process free space before this allocation.
9658  if(lastOffset < suballoc.offset)
9659  {
9660  // There is free space from lastOffset to suballoc.offset.
9661  ++unusedRangeCount;
9662  }
9663 
9664  // 2. Process this allocation.
9665  // There is allocation with suballoc.offset, suballoc.size.
9666  ++alloc2ndCount;
9667  usedBytes += suballoc.size;
9668 
9669  // 3. Prepare for next iteration.
9670  lastOffset = suballoc.offset + suballoc.size;
9671  ++nextAlloc2ndIndex;
9672  }
9673  // We are at the end.
9674  else
9675  {
9676  if(lastOffset < freeSpace2ndTo1stEnd)
9677  {
9678  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9679  ++unusedRangeCount;
9680  }
9681 
9682  // End of loop.
9683  lastOffset = freeSpace2ndTo1stEnd;
9684  }
9685  }
9686  }
9687 
9688  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9689  size_t alloc1stCount = 0;
9690  const VkDeviceSize freeSpace1stTo2ndEnd =
9691  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9692  while(lastOffset < freeSpace1stTo2ndEnd)
9693  {
9694  // Find next non-null allocation or move nextAllocIndex to the end.
9695  while(nextAlloc1stIndex < suballoc1stCount &&
9696  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9697  {
9698  ++nextAlloc1stIndex;
9699  }
9700 
9701  // Found non-null allocation.
9702  if(nextAlloc1stIndex < suballoc1stCount)
9703  {
9704  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9705 
9706  // 1. Process free space before this allocation.
9707  if(lastOffset < suballoc.offset)
9708  {
9709  // There is free space from lastOffset to suballoc.offset.
9710  ++unusedRangeCount;
9711  }
9712 
9713  // 2. Process this allocation.
9714  // There is allocation with suballoc.offset, suballoc.size.
9715  ++alloc1stCount;
9716  usedBytes += suballoc.size;
9717 
9718  // 3. Prepare for next iteration.
9719  lastOffset = suballoc.offset + suballoc.size;
9720  ++nextAlloc1stIndex;
9721  }
9722  // We are at the end.
9723  else
9724  {
9725  if(lastOffset < size)
9726  {
9727  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9728  ++unusedRangeCount;
9729  }
9730 
9731  // End of loop.
9732  lastOffset = freeSpace1stTo2ndEnd;
9733  }
9734  }
9735 
9736  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9737  {
9738  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9739  while(lastOffset < size)
9740  {
9741  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9742  while(nextAlloc2ndIndex != SIZE_MAX &&
9743  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9744  {
9745  --nextAlloc2ndIndex;
9746  }
9747 
9748  // Found non-null allocation.
9749  if(nextAlloc2ndIndex != SIZE_MAX)
9750  {
9751  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9752 
9753  // 1. Process free space before this allocation.
9754  if(lastOffset < suballoc.offset)
9755  {
9756  // There is free space from lastOffset to suballoc.offset.
9757  ++unusedRangeCount;
9758  }
9759 
9760  // 2. Process this allocation.
9761  // There is allocation with suballoc.offset, suballoc.size.
9762  ++alloc2ndCount;
9763  usedBytes += suballoc.size;
9764 
9765  // 3. Prepare for next iteration.
9766  lastOffset = suballoc.offset + suballoc.size;
9767  --nextAlloc2ndIndex;
9768  }
9769  // We are at the end.
9770  else
9771  {
9772  if(lastOffset < size)
9773  {
9774  // There is free space from lastOffset to size.
9775  ++unusedRangeCount;
9776  }
9777 
9778  // End of loop.
9779  lastOffset = size;
9780  }
9781  }
9782  }
9783 
9784  const VkDeviceSize unusedBytes = size - usedBytes;
9785  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9786 
9787  // SECOND PASS
9788  lastOffset = 0;
9789 
9790  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9791  {
9792  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9793  size_t nextAlloc2ndIndex = 0;
9794  while(lastOffset < freeSpace2ndTo1stEnd)
9795  {
9796  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9797  while(nextAlloc2ndIndex < suballoc2ndCount &&
9798  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9799  {
9800  ++nextAlloc2ndIndex;
9801  }
9802 
9803  // Found non-null allocation.
9804  if(nextAlloc2ndIndex < suballoc2ndCount)
9805  {
9806  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9807 
9808  // 1. Process free space before this allocation.
9809  if(lastOffset < suballoc.offset)
9810  {
9811  // There is free space from lastOffset to suballoc.offset.
9812  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9813  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9814  }
9815 
9816  // 2. Process this allocation.
9817  // There is allocation with suballoc.offset, suballoc.size.
9818  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9819 
9820  // 3. Prepare for next iteration.
9821  lastOffset = suballoc.offset + suballoc.size;
9822  ++nextAlloc2ndIndex;
9823  }
9824  // We are at the end.
9825  else
9826  {
9827  if(lastOffset < freeSpace2ndTo1stEnd)
9828  {
9829  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9830  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9831  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9832  }
9833 
9834  // End of loop.
9835  lastOffset = freeSpace2ndTo1stEnd;
9836  }
9837  }
9838  }
9839 
9840  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9841  while(lastOffset < freeSpace1stTo2ndEnd)
9842  {
9843  // Find next non-null allocation or move nextAllocIndex to the end.
9844  while(nextAlloc1stIndex < suballoc1stCount &&
9845  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9846  {
9847  ++nextAlloc1stIndex;
9848  }
9849 
9850  // Found non-null allocation.
9851  if(nextAlloc1stIndex < suballoc1stCount)
9852  {
9853  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9854 
9855  // 1. Process free space before this allocation.
9856  if(lastOffset < suballoc.offset)
9857  {
9858  // There is free space from lastOffset to suballoc.offset.
9859  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9860  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9861  }
9862 
9863  // 2. Process this allocation.
9864  // There is allocation with suballoc.offset, suballoc.size.
9865  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9866 
9867  // 3. Prepare for next iteration.
9868  lastOffset = suballoc.offset + suballoc.size;
9869  ++nextAlloc1stIndex;
9870  }
9871  // We are at the end.
9872  else
9873  {
9874  if(lastOffset < freeSpace1stTo2ndEnd)
9875  {
9876  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9877  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9878  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9879  }
9880 
9881  // End of loop.
9882  lastOffset = freeSpace1stTo2ndEnd;
9883  }
9884  }
9885 
9886  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9887  {
9888  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9889  while(lastOffset < size)
9890  {
9891  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9892  while(nextAlloc2ndIndex != SIZE_MAX &&
9893  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9894  {
9895  --nextAlloc2ndIndex;
9896  }
9897 
9898  // Found non-null allocation.
9899  if(nextAlloc2ndIndex != SIZE_MAX)
9900  {
9901  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9902 
9903  // 1. Process free space before this allocation.
9904  if(lastOffset < suballoc.offset)
9905  {
9906  // There is free space from lastOffset to suballoc.offset.
9907  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9908  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9909  }
9910 
9911  // 2. Process this allocation.
9912  // There is allocation with suballoc.offset, suballoc.size.
9913  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9914 
9915  // 3. Prepare for next iteration.
9916  lastOffset = suballoc.offset + suballoc.size;
9917  --nextAlloc2ndIndex;
9918  }
9919  // We are at the end.
9920  else
9921  {
9922  if(lastOffset < size)
9923  {
9924  // There is free space from lastOffset to size.
9925  const VkDeviceSize unusedRangeSize = size - lastOffset;
9926  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9927  }
9928 
9929  // End of loop.
9930  lastOffset = size;
9931  }
9932  }
9933  }
9934 
9935  PrintDetailedMap_End(json);
9936 }
9937 #endif // #if VMA_STATS_STRING_ENABLED
9938 
9939 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9940  uint32_t currentFrameIndex,
9941  uint32_t frameInUseCount,
9942  VkDeviceSize bufferImageGranularity,
9943  VkDeviceSize allocSize,
9944  VkDeviceSize allocAlignment,
9945  bool upperAddress,
9946  VmaSuballocationType allocType,
9947  bool canMakeOtherLost,
9948  uint32_t strategy,
9949  VmaAllocationRequest* pAllocationRequest)
9950 {
9951  VMA_ASSERT(allocSize > 0);
9952  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9953  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9954  VMA_HEAVY_ASSERT(Validate());
9955  return upperAddress ?
9956  CreateAllocationRequest_UpperAddress(
9957  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9958  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
9959  CreateAllocationRequest_LowerAddress(
9960  currentFrameIndex, frameInUseCount, bufferImageGranularity,
9961  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
9962 }
9963 
9964 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9965  uint32_t currentFrameIndex,
9966  uint32_t frameInUseCount,
9967  VkDeviceSize bufferImageGranularity,
9968  VkDeviceSize allocSize,
9969  VkDeviceSize allocAlignment,
9970  VmaSuballocationType allocType,
9971  bool canMakeOtherLost,
9972  uint32_t strategy,
9973  VmaAllocationRequest* pAllocationRequest)
9974 {
9975  const VkDeviceSize size = GetSize();
9976  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9977  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9978 
9979  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9980  {
9981  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9982  return false;
9983  }
9984 
9985  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9986  if(allocSize > size)
9987  {
9988  return false;
9989  }
9990  VkDeviceSize resultBaseOffset = size - allocSize;
9991  if(!suballocations2nd.empty())
9992  {
9993  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9994  resultBaseOffset = lastSuballoc.offset - allocSize;
9995  if(allocSize > lastSuballoc.offset)
9996  {
9997  return false;
9998  }
9999  }
10000 
10001  // Start from offset equal to end of free space.
10002  VkDeviceSize resultOffset = resultBaseOffset;
10003 
10004  // Apply VMA_DEBUG_MARGIN at the end.
10005  if(VMA_DEBUG_MARGIN > 0)
10006  {
10007  if(resultOffset < VMA_DEBUG_MARGIN)
10008  {
10009  return false;
10010  }
10011  resultOffset -= VMA_DEBUG_MARGIN;
10012  }
10013 
10014  // Apply alignment.
10015  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10016 
10017  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10018  // Make bigger alignment if necessary.
10019  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10020  {
10021  bool bufferImageGranularityConflict = false;
10022  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10023  {
10024  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10025  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10026  {
10027  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10028  {
10029  bufferImageGranularityConflict = true;
10030  break;
10031  }
10032  }
10033  else
10034  // Already on previous page.
10035  break;
10036  }
10037  if(bufferImageGranularityConflict)
10038  {
10039  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10040  }
10041  }
10042 
10043  // There is enough free space.
10044  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10045  suballocations1st.back().offset + suballocations1st.back().size :
10046  0;
10047  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10048  {
10049  // Check previous suballocations for BufferImageGranularity conflicts.
10050  // If conflict exists, allocation cannot be made here.
10051  if(bufferImageGranularity > 1)
10052  {
10053  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10054  {
10055  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10056  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10057  {
10058  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10059  {
10060  return false;
10061  }
10062  }
10063  else
10064  {
10065  // Already on next page.
10066  break;
10067  }
10068  }
10069  }
10070 
10071  // All tests passed: Success.
10072  pAllocationRequest->offset = resultOffset;
10073  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10074  pAllocationRequest->sumItemSize = 0;
10075  // pAllocationRequest->item unused.
10076  pAllocationRequest->itemsToMakeLostCount = 0;
10077  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10078  return true;
10079  }
10080 
10081  return false;
10082 }
10083 
10084 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
10085  uint32_t currentFrameIndex,
10086  uint32_t frameInUseCount,
10087  VkDeviceSize bufferImageGranularity,
10088  VkDeviceSize allocSize,
10089  VkDeviceSize allocAlignment,
10090  VmaSuballocationType allocType,
10091  bool canMakeOtherLost,
10092  uint32_t strategy,
10093  VmaAllocationRequest* pAllocationRequest)
10094 {
10095  const VkDeviceSize size = GetSize();
10096  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10097  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10098 
10099  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10100  {
10101  // Try to allocate at the end of 1st vector.
10102 
10103  VkDeviceSize resultBaseOffset = 0;
10104  if(!suballocations1st.empty())
10105  {
10106  const VmaSuballocation& lastSuballoc = suballocations1st.back();
10107  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10108  }
10109 
10110  // Start from offset equal to beginning of free space.
10111  VkDeviceSize resultOffset = resultBaseOffset;
10112 
10113  // Apply VMA_DEBUG_MARGIN at the beginning.
10114  if(VMA_DEBUG_MARGIN > 0)
10115  {
10116  resultOffset += VMA_DEBUG_MARGIN;
10117  }
10118 
10119  // Apply alignment.
10120  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10121 
10122  // Check previous suballocations for BufferImageGranularity conflicts.
10123  // Make bigger alignment if necessary.
10124  if(bufferImageGranularity > 1 && !suballocations1st.empty())
10125  {
10126  bool bufferImageGranularityConflict = false;
10127  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10128  {
10129  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10130  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10131  {
10132  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10133  {
10134  bufferImageGranularityConflict = true;
10135  break;
10136  }
10137  }
10138  else
10139  // Already on previous page.
10140  break;
10141  }
10142  if(bufferImageGranularityConflict)
10143  {
10144  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10145  }
10146  }
10147 
10148  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
10149  suballocations2nd.back().offset : size;
10150 
10151  // There is enough free space at the end after alignment.
10152  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
10153  {
10154  // Check next suballocations for BufferImageGranularity conflicts.
10155  // If conflict exists, allocation cannot be made here.
10156  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10157  {
10158  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10159  {
10160  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10161  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10162  {
10163  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10164  {
10165  return false;
10166  }
10167  }
10168  else
10169  {
10170  // Already on previous page.
10171  break;
10172  }
10173  }
10174  }
10175 
10176  // All tests passed: Success.
10177  pAllocationRequest->offset = resultOffset;
10178  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
10179  pAllocationRequest->sumItemSize = 0;
10180  // pAllocationRequest->item, customData unused.
10181  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
10182  pAllocationRequest->itemsToMakeLostCount = 0;
10183  return true;
10184  }
10185  }
10186 
10187  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
10188  // beginning of 1st vector as the end of free space.
10189  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10190  {
10191  VMA_ASSERT(!suballocations1st.empty());
10192 
10193  VkDeviceSize resultBaseOffset = 0;
10194  if(!suballocations2nd.empty())
10195  {
10196  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10197  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
10198  }
10199 
10200  // Start from offset equal to beginning of free space.
10201  VkDeviceSize resultOffset = resultBaseOffset;
10202 
10203  // Apply VMA_DEBUG_MARGIN at the beginning.
10204  if(VMA_DEBUG_MARGIN > 0)
10205  {
10206  resultOffset += VMA_DEBUG_MARGIN;
10207  }
10208 
10209  // Apply alignment.
10210  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
10211 
10212  // Check previous suballocations for BufferImageGranularity conflicts.
10213  // Make bigger alignment if necessary.
10214  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10215  {
10216  bool bufferImageGranularityConflict = false;
10217  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
10218  {
10219  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
10220  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10221  {
10222  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10223  {
10224  bufferImageGranularityConflict = true;
10225  break;
10226  }
10227  }
10228  else
10229  // Already on previous page.
10230  break;
10231  }
10232  if(bufferImageGranularityConflict)
10233  {
10234  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
10235  }
10236  }
10237 
10238  pAllocationRequest->itemsToMakeLostCount = 0;
10239  pAllocationRequest->sumItemSize = 0;
10240  size_t index1st = m_1stNullItemsBeginCount;
10241 
10242  if(canMakeOtherLost)
10243  {
10244  while(index1st < suballocations1st.size() &&
10245  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
10246  {
10247  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
10248  const VmaSuballocation& suballoc = suballocations1st[index1st];
10249  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
10250  {
10251  // No problem.
10252  }
10253  else
10254  {
10255  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10256  if(suballoc.hAllocation->CanBecomeLost() &&
10257  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10258  {
10259  ++pAllocationRequest->itemsToMakeLostCount;
10260  pAllocationRequest->sumItemSize += suballoc.size;
10261  }
10262  else
10263  {
10264  return false;
10265  }
10266  }
10267  ++index1st;
10268  }
10269 
10270  // Check next suballocations for BufferImageGranularity conflicts.
10271  // If conflict exists, we must mark more allocations lost or fail.
10272  if(bufferImageGranularity > 1)
10273  {
10274  while(index1st < suballocations1st.size())
10275  {
10276  const VmaSuballocation& suballoc = suballocations1st[index1st];
10277  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
10278  {
10279  if(suballoc.hAllocation != VK_NULL_HANDLE)
10280  {
10281  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
10282  if(suballoc.hAllocation->CanBecomeLost() &&
10283  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10284  {
10285  ++pAllocationRequest->itemsToMakeLostCount;
10286  pAllocationRequest->sumItemSize += suballoc.size;
10287  }
10288  else
10289  {
10290  return false;
10291  }
10292  }
10293  }
10294  else
10295  {
10296  // Already on next page.
10297  break;
10298  }
10299  ++index1st;
10300  }
10301  }
10302 
10303  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
10304  if(index1st == suballocations1st.size() &&
10305  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
10306  {
10307  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
10308  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
10309  }
10310  }
10311 
10312  // There is enough free space at the end after alignment.
10313  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
10314  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
10315  {
10316  // Check next suballocations for BufferImageGranularity conflicts.
10317  // If conflict exists, allocation cannot be made here.
10318  if(bufferImageGranularity > 1)
10319  {
10320  for(size_t nextSuballocIndex = index1st;
10321  nextSuballocIndex < suballocations1st.size();
10322  nextSuballocIndex++)
10323  {
10324  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
10325  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10326  {
10327  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10328  {
10329  return false;
10330  }
10331  }
10332  else
10333  {
10334  // Already on next page.
10335  break;
10336  }
10337  }
10338  }
10339 
10340  // All tests passed: Success.
10341  pAllocationRequest->offset = resultOffset;
10342  pAllocationRequest->sumFreeSize =
10343  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
10344  - resultBaseOffset
10345  - pAllocationRequest->sumItemSize;
10346  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
10347  // pAllocationRequest->item, customData unused.
10348  return true;
10349  }
10350  }
10351 
10352  return false;
10353 }
10354 
10355 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
10356  uint32_t currentFrameIndex,
10357  uint32_t frameInUseCount,
10358  VmaAllocationRequest* pAllocationRequest)
10359 {
10360  if(pAllocationRequest->itemsToMakeLostCount == 0)
10361  {
10362  return true;
10363  }
10364 
10365  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
10366 
10367  // We always start from 1st.
10368  SuballocationVectorType* suballocations = &AccessSuballocations1st();
10369  size_t index = m_1stNullItemsBeginCount;
10370  size_t madeLostCount = 0;
10371  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
10372  {
10373  if(index == suballocations->size())
10374  {
10375  index = 0;
10376  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
10377  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10378  {
10379  suballocations = &AccessSuballocations2nd();
10380  }
10381  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
10382  // suballocations continues pointing at AccessSuballocations1st().
10383  VMA_ASSERT(!suballocations->empty());
10384  }
10385  VmaSuballocation& suballoc = (*suballocations)[index];
10386  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10387  {
10388  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
10389  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
10390  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10391  {
10392  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10393  suballoc.hAllocation = VK_NULL_HANDLE;
10394  m_SumFreeSize += suballoc.size;
10395  if(suballocations == &AccessSuballocations1st())
10396  {
10397  ++m_1stNullItemsMiddleCount;
10398  }
10399  else
10400  {
10401  ++m_2ndNullItemsCount;
10402  }
10403  ++madeLostCount;
10404  }
10405  else
10406  {
10407  return false;
10408  }
10409  }
10410  ++index;
10411  }
10412 
10413  CleanupAfterFree();
10414  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
10415 
10416  return true;
10417 }
10418 
10419 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10420 {
10421  uint32_t lostAllocationCount = 0;
10422 
10423  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10424  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10425  {
10426  VmaSuballocation& suballoc = suballocations1st[i];
10427  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10428  suballoc.hAllocation->CanBecomeLost() &&
10429  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10430  {
10431  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10432  suballoc.hAllocation = VK_NULL_HANDLE;
10433  ++m_1stNullItemsMiddleCount;
10434  m_SumFreeSize += suballoc.size;
10435  ++lostAllocationCount;
10436  }
10437  }
10438 
10439  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10440  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10441  {
10442  VmaSuballocation& suballoc = suballocations2nd[i];
10443  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
10444  suballoc.hAllocation->CanBecomeLost() &&
10445  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
10446  {
10447  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10448  suballoc.hAllocation = VK_NULL_HANDLE;
10449  ++m_2ndNullItemsCount;
10450  m_SumFreeSize += suballoc.size;
10451  ++lostAllocationCount;
10452  }
10453  }
10454 
10455  if(lostAllocationCount)
10456  {
10457  CleanupAfterFree();
10458  }
10459 
10460  return lostAllocationCount;
10461 }
10462 
10463 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
10464 {
10465  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10466  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
10467  {
10468  const VmaSuballocation& suballoc = suballocations1st[i];
10469  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10470  {
10471  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10472  {
10473  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10474  return VK_ERROR_VALIDATION_FAILED_EXT;
10475  }
10476  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10477  {
10478  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10479  return VK_ERROR_VALIDATION_FAILED_EXT;
10480  }
10481  }
10482  }
10483 
10484  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10485  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10486  {
10487  const VmaSuballocation& suballoc = suballocations2nd[i];
10488  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10489  {
10490  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10491  {
10492  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10493  return VK_ERROR_VALIDATION_FAILED_EXT;
10494  }
10495  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10496  {
10497  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10498  return VK_ERROR_VALIDATION_FAILED_EXT;
10499  }
10500  }
10501  }
10502 
10503  return VK_SUCCESS;
10504 }
10505 
10506 void VmaBlockMetadata_Linear::Alloc(
10507  const VmaAllocationRequest& request,
10508  VmaSuballocationType type,
10509  VkDeviceSize allocSize,
10510  VmaAllocation hAllocation)
10511 {
10512  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10513 
10514  switch(request.type)
10515  {
10516  case VmaAllocationRequestType::UpperAddress:
10517  {
10518  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10519  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10520  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10521  suballocations2nd.push_back(newSuballoc);
10522  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10523  }
10524  break;
10525  case VmaAllocationRequestType::EndOf1st:
10526  {
10527  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10528 
10529  VMA_ASSERT(suballocations1st.empty() ||
10530  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
10531  // Check if it fits before the end of the block.
10532  VMA_ASSERT(request.offset + allocSize <= GetSize());
10533 
10534  suballocations1st.push_back(newSuballoc);
10535  }
10536  break;
10537  case VmaAllocationRequestType::EndOf2nd:
10538  {
10539  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10540  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10541  VMA_ASSERT(!suballocations1st.empty() &&
10542  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
10543  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10544 
10545  switch(m_2ndVectorMode)
10546  {
10547  case SECOND_VECTOR_EMPTY:
10548  // First allocation from second part ring buffer.
10549  VMA_ASSERT(suballocations2nd.empty());
10550  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10551  break;
10552  case SECOND_VECTOR_RING_BUFFER:
10553  // 2-part ring buffer is already started.
10554  VMA_ASSERT(!suballocations2nd.empty());
10555  break;
10556  case SECOND_VECTOR_DOUBLE_STACK:
10557  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10558  break;
10559  default:
10560  VMA_ASSERT(0);
10561  }
10562 
10563  suballocations2nd.push_back(newSuballoc);
10564  }
10565  break;
10566  default:
10567  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10568  }
10569 
10570  m_SumFreeSize -= newSuballoc.size;
10571 }
10572 
10573 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10574 {
10575  FreeAtOffset(allocation->GetOffset());
10576 }
10577 
10578 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10579 {
10580  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10581  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10582 
10583  if(!suballocations1st.empty())
10584  {
10585  // First allocation: Mark it as next empty at the beginning.
10586  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10587  if(firstSuballoc.offset == offset)
10588  {
10589  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10590  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10591  m_SumFreeSize += firstSuballoc.size;
10592  ++m_1stNullItemsBeginCount;
10593  CleanupAfterFree();
10594  return;
10595  }
10596  }
10597 
10598  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10599  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10600  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10601  {
10602  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10603  if(lastSuballoc.offset == offset)
10604  {
10605  m_SumFreeSize += lastSuballoc.size;
10606  suballocations2nd.pop_back();
10607  CleanupAfterFree();
10608  return;
10609  }
10610  }
10611  // Last allocation in 1st vector.
10612  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10613  {
10614  VmaSuballocation& lastSuballoc = suballocations1st.back();
10615  if(lastSuballoc.offset == offset)
10616  {
10617  m_SumFreeSize += lastSuballoc.size;
10618  suballocations1st.pop_back();
10619  CleanupAfterFree();
10620  return;
10621  }
10622  }
10623 
10624  // Item from the middle of 1st vector.
10625  {
10626  VmaSuballocation refSuballoc;
10627  refSuballoc.offset = offset;
10628  // Rest of members stays uninitialized intentionally for better performance.
10629  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
10630  suballocations1st.begin() + m_1stNullItemsBeginCount,
10631  suballocations1st.end(),
10632  refSuballoc,
10633  VmaSuballocationOffsetLess());
10634  if(it != suballocations1st.end())
10635  {
10636  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10637  it->hAllocation = VK_NULL_HANDLE;
10638  ++m_1stNullItemsMiddleCount;
10639  m_SumFreeSize += it->size;
10640  CleanupAfterFree();
10641  return;
10642  }
10643  }
10644 
10645  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10646  {
10647  // Item from the middle of 2nd vector.
10648  VmaSuballocation refSuballoc;
10649  refSuballoc.offset = offset;
10650  // Rest of members stays uninitialized intentionally for better performance.
10651  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10652  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
10653  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
10654  if(it != suballocations2nd.end())
10655  {
10656  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10657  it->hAllocation = VK_NULL_HANDLE;
10658  ++m_2ndNullItemsCount;
10659  m_SumFreeSize += it->size;
10660  CleanupAfterFree();
10661  return;
10662  }
10663  }
10664 
10665  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10666 }
10667 
10668 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10669 {
10670  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10671  const size_t suballocCount = AccessSuballocations1st().size();
10672  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10673 }
10674 
10675 void VmaBlockMetadata_Linear::CleanupAfterFree()
10676 {
10677  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10678  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10679 
10680  if(IsEmpty())
10681  {
10682  suballocations1st.clear();
10683  suballocations2nd.clear();
10684  m_1stNullItemsBeginCount = 0;
10685  m_1stNullItemsMiddleCount = 0;
10686  m_2ndNullItemsCount = 0;
10687  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10688  }
10689  else
10690  {
10691  const size_t suballoc1stCount = suballocations1st.size();
10692  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10693  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10694 
10695  // Find more null items at the beginning of 1st vector.
10696  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10697  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10698  {
10699  ++m_1stNullItemsBeginCount;
10700  --m_1stNullItemsMiddleCount;
10701  }
10702 
10703  // Find more null items at the end of 1st vector.
10704  while(m_1stNullItemsMiddleCount > 0 &&
10705  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10706  {
10707  --m_1stNullItemsMiddleCount;
10708  suballocations1st.pop_back();
10709  }
10710 
10711  // Find more null items at the end of 2nd vector.
10712  while(m_2ndNullItemsCount > 0 &&
10713  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10714  {
10715  --m_2ndNullItemsCount;
10716  suballocations2nd.pop_back();
10717  }
10718 
10719  // Find more null items at the beginning of 2nd vector.
10720  while(m_2ndNullItemsCount > 0 &&
10721  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
10722  {
10723  --m_2ndNullItemsCount;
10724  VmaVectorRemove(suballocations2nd, 0);
10725  }
10726 
10727  if(ShouldCompact1st())
10728  {
10729  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10730  size_t srcIndex = m_1stNullItemsBeginCount;
10731  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10732  {
10733  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10734  {
10735  ++srcIndex;
10736  }
10737  if(dstIndex != srcIndex)
10738  {
10739  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10740  }
10741  ++srcIndex;
10742  }
10743  suballocations1st.resize(nonNullItemCount);
10744  m_1stNullItemsBeginCount = 0;
10745  m_1stNullItemsMiddleCount = 0;
10746  }
10747 
10748  // 2nd vector became empty.
10749  if(suballocations2nd.empty())
10750  {
10751  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10752  }
10753 
10754  // 1st vector became empty.
10755  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10756  {
10757  suballocations1st.clear();
10758  m_1stNullItemsBeginCount = 0;
10759 
10760  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10761  {
10762  // Swap 1st with 2nd. Now 2nd is empty.
10763  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10764  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10765  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10766  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10767  {
10768  ++m_1stNullItemsBeginCount;
10769  --m_1stNullItemsMiddleCount;
10770  }
10771  m_2ndNullItemsCount = 0;
10772  m_1stVectorIndex ^= 1;
10773  }
10774  }
10775  }
10776 
10777  VMA_HEAVY_ASSERT(Validate());
10778 }
10779 
10780 
10782 // class VmaBlockMetadata_Buddy
10783 
10784 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10785  VmaBlockMetadata(hAllocator),
10786  m_Root(VMA_NULL),
10787  m_AllocationCount(0),
10788  m_FreeCount(1),
10789  m_SumFreeSize(0)
10790 {
10791  memset(m_FreeList, 0, sizeof(m_FreeList));
10792 }
10793 
10794 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10795 {
10796  DeleteNode(m_Root);
10797 }
10798 
10799 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10800 {
10801  VmaBlockMetadata::Init(size);
10802 
10803  m_UsableSize = VmaPrevPow2(size);
10804  m_SumFreeSize = m_UsableSize;
10805 
10806  // Calculate m_LevelCount.
10807  m_LevelCount = 1;
10808  while(m_LevelCount < MAX_LEVELS &&
10809  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10810  {
10811  ++m_LevelCount;
10812  }
10813 
10814  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10815  rootNode->offset = 0;
10816  rootNode->type = Node::TYPE_FREE;
10817  rootNode->parent = VMA_NULL;
10818  rootNode->buddy = VMA_NULL;
10819 
10820  m_Root = rootNode;
10821  AddToFreeListFront(0, rootNode);
10822 }
10823 
10824 bool VmaBlockMetadata_Buddy::Validate() const
10825 {
10826  // Validate tree.
10827  ValidationContext ctx;
10828  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10829  {
10830  VMA_VALIDATE(false && "ValidateNode failed.");
10831  }
10832  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10833  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10834 
10835  // Validate free node lists.
10836  for(uint32_t level = 0; level < m_LevelCount; ++level)
10837  {
10838  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10839  m_FreeList[level].front->free.prev == VMA_NULL);
10840 
10841  for(Node* node = m_FreeList[level].front;
10842  node != VMA_NULL;
10843  node = node->free.next)
10844  {
10845  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10846 
10847  if(node->free.next == VMA_NULL)
10848  {
10849  VMA_VALIDATE(m_FreeList[level].back == node);
10850  }
10851  else
10852  {
10853  VMA_VALIDATE(node->free.next->free.prev == node);
10854  }
10855  }
10856  }
10857 
10858  // Validate that free lists ar higher levels are empty.
10859  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10860  {
10861  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10862  }
10863 
10864  return true;
10865 }
10866 
10867 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10868 {
10869  for(uint32_t level = 0; level < m_LevelCount; ++level)
10870  {
10871  if(m_FreeList[level].front != VMA_NULL)
10872  {
10873  return LevelToNodeSize(level);
10874  }
10875  }
10876  return 0;
10877 }
10878 
10879 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10880 {
10881  const VkDeviceSize unusableSize = GetUnusableSize();
10882 
10883  outInfo.blockCount = 1;
10884 
10885  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10886  outInfo.usedBytes = outInfo.unusedBytes = 0;
10887 
10888  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10889  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10890  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10891 
10892  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10893 
10894  if(unusableSize > 0)
10895  {
10896  ++outInfo.unusedRangeCount;
10897  outInfo.unusedBytes += unusableSize;
10898  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10899  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10900  }
10901 }
10902 
10903 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10904 {
10905  const VkDeviceSize unusableSize = GetUnusableSize();
10906 
10907  inoutStats.size += GetSize();
10908  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10909  inoutStats.allocationCount += m_AllocationCount;
10910  inoutStats.unusedRangeCount += m_FreeCount;
10911  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10912 
10913  if(unusableSize > 0)
10914  {
10915  ++inoutStats.unusedRangeCount;
10916  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10917  }
10918 }
10919 
10920 #if VMA_STATS_STRING_ENABLED
10921 
10922 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10923 {
10924  // TODO optimize
10925  VmaStatInfo stat;
10926  CalcAllocationStatInfo(stat);
10927 
10928  PrintDetailedMap_Begin(
10929  json,
10930  stat.unusedBytes,
10931  stat.allocationCount,
10932  stat.unusedRangeCount);
10933 
10934  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10935 
10936  const VkDeviceSize unusableSize = GetUnusableSize();
10937  if(unusableSize > 0)
10938  {
10939  PrintDetailedMap_UnusedRange(json,
10940  m_UsableSize, // offset
10941  unusableSize); // size
10942  }
10943 
10944  PrintDetailedMap_End(json);
10945 }
10946 
10947 #endif // #if VMA_STATS_STRING_ENABLED
10948 
10949 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10950  uint32_t currentFrameIndex,
10951  uint32_t frameInUseCount,
10952  VkDeviceSize bufferImageGranularity,
10953  VkDeviceSize allocSize,
10954  VkDeviceSize allocAlignment,
10955  bool upperAddress,
10956  VmaSuballocationType allocType,
10957  bool canMakeOtherLost,
10958  uint32_t strategy,
10959  VmaAllocationRequest* pAllocationRequest)
10960 {
10961  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10962 
10963  // Simple way to respect bufferImageGranularity. May be optimized some day.
10964  // Whenever it might be an OPTIMAL image...
10965  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10966  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10967  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10968  {
10969  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10970  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10971  }
10972 
10973  if(allocSize > m_UsableSize)
10974  {
10975  return false;
10976  }
10977 
10978  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10979  for(uint32_t level = targetLevel + 1; level--; )
10980  {
10981  for(Node* freeNode = m_FreeList[level].front;
10982  freeNode != VMA_NULL;
10983  freeNode = freeNode->free.next)
10984  {
10985  if(freeNode->offset % allocAlignment == 0)
10986  {
10987  pAllocationRequest->type = VmaAllocationRequestType::Normal;
10988  pAllocationRequest->offset = freeNode->offset;
10989  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10990  pAllocationRequest->sumItemSize = 0;
10991  pAllocationRequest->itemsToMakeLostCount = 0;
10992  pAllocationRequest->customData = (void*)(uintptr_t)level;
10993  return true;
10994  }
10995  }
10996  }
10997 
10998  return false;
10999 }
11000 
11001 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11002  uint32_t currentFrameIndex,
11003  uint32_t frameInUseCount,
11004  VmaAllocationRequest* pAllocationRequest)
11005 {
11006  /*
11007  Lost allocations are not supported in buddy allocator at the moment.
11008  Support might be added in the future.
11009  */
11010  return pAllocationRequest->itemsToMakeLostCount == 0;
11011 }
11012 
11013 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11014 {
11015  /*
11016  Lost allocations are not supported in buddy allocator at the moment.
11017  Support might be added in the future.
11018  */
11019  return 0;
11020 }
11021 
11022 void VmaBlockMetadata_Buddy::Alloc(
11023  const VmaAllocationRequest& request,
11024  VmaSuballocationType type,
11025  VkDeviceSize allocSize,
11026  VmaAllocation hAllocation)
11027 {
11028  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11029 
11030  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11031  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11032 
11033  Node* currNode = m_FreeList[currLevel].front;
11034  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11035  while(currNode->offset != request.offset)
11036  {
11037  currNode = currNode->free.next;
11038  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11039  }
11040 
11041  // Go down, splitting free nodes.
11042  while(currLevel < targetLevel)
11043  {
11044  // currNode is already first free node at currLevel.
11045  // Remove it from list of free nodes at this currLevel.
11046  RemoveFromFreeList(currLevel, currNode);
11047 
11048  const uint32_t childrenLevel = currLevel + 1;
11049 
11050  // Create two free sub-nodes.
11051  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11052  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11053 
11054  leftChild->offset = currNode->offset;
11055  leftChild->type = Node::TYPE_FREE;
11056  leftChild->parent = currNode;
11057  leftChild->buddy = rightChild;
11058 
11059  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11060  rightChild->type = Node::TYPE_FREE;
11061  rightChild->parent = currNode;
11062  rightChild->buddy = leftChild;
11063 
11064  // Convert current currNode to split type.
11065  currNode->type = Node::TYPE_SPLIT;
11066  currNode->split.leftChild = leftChild;
11067 
11068  // Add child nodes to free list. Order is important!
11069  AddToFreeListFront(childrenLevel, rightChild);
11070  AddToFreeListFront(childrenLevel, leftChild);
11071 
11072  ++m_FreeCount;
11073  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11074  ++currLevel;
11075  currNode = m_FreeList[currLevel].front;
11076 
11077  /*
11078  We can be sure that currNode, as left child of node previously split,
11079  also fullfills the alignment requirement.
11080  */
11081  }
11082 
11083  // Remove from free list.
11084  VMA_ASSERT(currLevel == targetLevel &&
11085  currNode != VMA_NULL &&
11086  currNode->type == Node::TYPE_FREE);
11087  RemoveFromFreeList(currLevel, currNode);
11088 
11089  // Convert to allocation node.
11090  currNode->type = Node::TYPE_ALLOCATION;
11091  currNode->allocation.alloc = hAllocation;
11092 
11093  ++m_AllocationCount;
11094  --m_FreeCount;
11095  m_SumFreeSize -= allocSize;
11096 }
11097 
11098 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
11099 {
11100  if(node->type == Node::TYPE_SPLIT)
11101  {
11102  DeleteNode(node->split.leftChild->buddy);
11103  DeleteNode(node->split.leftChild);
11104  }
11105 
11106  vma_delete(GetAllocationCallbacks(), node);
11107 }
11108 
11109 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
11110 {
11111  VMA_VALIDATE(level < m_LevelCount);
11112  VMA_VALIDATE(curr->parent == parent);
11113  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
11114  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
11115  switch(curr->type)
11116  {
11117  case Node::TYPE_FREE:
11118  // curr->free.prev, next are validated separately.
11119  ctx.calculatedSumFreeSize += levelNodeSize;
11120  ++ctx.calculatedFreeCount;
11121  break;
11122  case Node::TYPE_ALLOCATION:
11123  ++ctx.calculatedAllocationCount;
11124  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
11125  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
11126  break;
11127  case Node::TYPE_SPLIT:
11128  {
11129  const uint32_t childrenLevel = level + 1;
11130  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
11131  const Node* const leftChild = curr->split.leftChild;
11132  VMA_VALIDATE(leftChild != VMA_NULL);
11133  VMA_VALIDATE(leftChild->offset == curr->offset);
11134  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
11135  {
11136  VMA_VALIDATE(false && "ValidateNode for left child failed.");
11137  }
11138  const Node* const rightChild = leftChild->buddy;
11139  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
11140  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
11141  {
11142  VMA_VALIDATE(false && "ValidateNode for right child failed.");
11143  }
11144  }
11145  break;
11146  default:
11147  return false;
11148  }
11149 
11150  return true;
11151 }
11152 
11153 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
11154 {
11155  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
11156  uint32_t level = 0;
11157  VkDeviceSize currLevelNodeSize = m_UsableSize;
11158  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
11159  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
11160  {
11161  ++level;
11162  currLevelNodeSize = nextLevelNodeSize;
11163  nextLevelNodeSize = currLevelNodeSize >> 1;
11164  }
11165  return level;
11166 }
11167 
11168 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
11169 {
11170  // Find node and level.
11171  Node* node = m_Root;
11172  VkDeviceSize nodeOffset = 0;
11173  uint32_t level = 0;
11174  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
11175  while(node->type == Node::TYPE_SPLIT)
11176  {
11177  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
11178  if(offset < nodeOffset + nextLevelSize)
11179  {
11180  node = node->split.leftChild;
11181  }
11182  else
11183  {
11184  node = node->split.leftChild->buddy;
11185  nodeOffset += nextLevelSize;
11186  }
11187  ++level;
11188  levelNodeSize = nextLevelSize;
11189  }
11190 
11191  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
11192  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
11193 
11194  ++m_FreeCount;
11195  --m_AllocationCount;
11196  m_SumFreeSize += alloc->GetSize();
11197 
11198  node->type = Node::TYPE_FREE;
11199 
11200  // Join free nodes if possible.
11201  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
11202  {
11203  RemoveFromFreeList(level, node->buddy);
11204  Node* const parent = node->parent;
11205 
11206  vma_delete(GetAllocationCallbacks(), node->buddy);
11207  vma_delete(GetAllocationCallbacks(), node);
11208  parent->type = Node::TYPE_FREE;
11209 
11210  node = parent;
11211  --level;
11212  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
11213  --m_FreeCount;
11214  }
11215 
11216  AddToFreeListFront(level, node);
11217 }
11218 
11219 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
11220 {
11221  switch(node->type)
11222  {
11223  case Node::TYPE_FREE:
11224  ++outInfo.unusedRangeCount;
11225  outInfo.unusedBytes += levelNodeSize;
11226  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
11227  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
11228  break;
11229  case Node::TYPE_ALLOCATION:
11230  {
11231  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11232  ++outInfo.allocationCount;
11233  outInfo.usedBytes += allocSize;
11234  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
11235  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
11236 
11237  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
11238  if(unusedRangeSize > 0)
11239  {
11240  ++outInfo.unusedRangeCount;
11241  outInfo.unusedBytes += unusedRangeSize;
11242  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
11243  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
11244  }
11245  }
11246  break;
11247  case Node::TYPE_SPLIT:
11248  {
11249  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11250  const Node* const leftChild = node->split.leftChild;
11251  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
11252  const Node* const rightChild = leftChild->buddy;
11253  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
11254  }
11255  break;
11256  default:
11257  VMA_ASSERT(0);
11258  }
11259 }
11260 
11261 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
11262 {
11263  VMA_ASSERT(node->type == Node::TYPE_FREE);
11264 
11265  // List is empty.
11266  Node* const frontNode = m_FreeList[level].front;
11267  if(frontNode == VMA_NULL)
11268  {
11269  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
11270  node->free.prev = node->free.next = VMA_NULL;
11271  m_FreeList[level].front = m_FreeList[level].back = node;
11272  }
11273  else
11274  {
11275  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
11276  node->free.prev = VMA_NULL;
11277  node->free.next = frontNode;
11278  frontNode->free.prev = node;
11279  m_FreeList[level].front = node;
11280  }
11281 }
11282 
11283 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
11284 {
11285  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
11286 
11287  // It is at the front.
11288  if(node->free.prev == VMA_NULL)
11289  {
11290  VMA_ASSERT(m_FreeList[level].front == node);
11291  m_FreeList[level].front = node->free.next;
11292  }
11293  else
11294  {
11295  Node* const prevFreeNode = node->free.prev;
11296  VMA_ASSERT(prevFreeNode->free.next == node);
11297  prevFreeNode->free.next = node->free.next;
11298  }
11299 
11300  // It is at the back.
11301  if(node->free.next == VMA_NULL)
11302  {
11303  VMA_ASSERT(m_FreeList[level].back == node);
11304  m_FreeList[level].back = node->free.prev;
11305  }
11306  else
11307  {
11308  Node* const nextFreeNode = node->free.next;
11309  VMA_ASSERT(nextFreeNode->free.prev == node);
11310  nextFreeNode->free.prev = node->free.prev;
11311  }
11312 }
11313 
11314 #if VMA_STATS_STRING_ENABLED
11315 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
11316 {
11317  switch(node->type)
11318  {
11319  case Node::TYPE_FREE:
11320  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
11321  break;
11322  case Node::TYPE_ALLOCATION:
11323  {
11324  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
11325  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
11326  if(allocSize < levelNodeSize)
11327  {
11328  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
11329  }
11330  }
11331  break;
11332  case Node::TYPE_SPLIT:
11333  {
11334  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
11335  const Node* const leftChild = node->split.leftChild;
11336  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
11337  const Node* const rightChild = leftChild->buddy;
11338  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
11339  }
11340  break;
11341  default:
11342  VMA_ASSERT(0);
11343  }
11344 }
11345 #endif // #if VMA_STATS_STRING_ENABLED
11346 
11347 
11349 // class VmaDeviceMemoryBlock
11350 
11351 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
11352  m_pMetadata(VMA_NULL),
11353  m_MemoryTypeIndex(UINT32_MAX),
11354  m_Id(0),
11355  m_hMemory(VK_NULL_HANDLE),
11356  m_MapCount(0),
11357  m_pMappedData(VMA_NULL)
11358 {
11359 }
11360 
11361 void VmaDeviceMemoryBlock::Init(
11362  VmaAllocator hAllocator,
11363  VmaPool hParentPool,
11364  uint32_t newMemoryTypeIndex,
11365  VkDeviceMemory newMemory,
11366  VkDeviceSize newSize,
11367  uint32_t id,
11368  uint32_t algorithm)
11369 {
11370  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11371 
11372  m_hParentPool = hParentPool;
11373  m_MemoryTypeIndex = newMemoryTypeIndex;
11374  m_Id = id;
11375  m_hMemory = newMemory;
11376 
11377  switch(algorithm)
11378  {
11380  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
11381  break;
11383  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
11384  break;
11385  default:
11386  VMA_ASSERT(0);
11387  // Fall-through.
11388  case 0:
11389  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
11390  }
11391  m_pMetadata->Init(newSize);
11392 }
11393 
11394 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11395 {
11396  // This is the most important assert in the entire library.
11397  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11398  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11399 
11400  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11401  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11402  m_hMemory = VK_NULL_HANDLE;
11403 
11404  vma_delete(allocator, m_pMetadata);
11405  m_pMetadata = VMA_NULL;
11406 }
11407 
11408 bool VmaDeviceMemoryBlock::Validate() const
11409 {
11410  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11411  (m_pMetadata->GetSize() != 0));
11412 
11413  return m_pMetadata->Validate();
11414 }
11415 
11416 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11417 {
11418  void* pData = nullptr;
11419  VkResult res = Map(hAllocator, 1, &pData);
11420  if(res != VK_SUCCESS)
11421  {
11422  return res;
11423  }
11424 
11425  res = m_pMetadata->CheckCorruption(pData);
11426 
11427  Unmap(hAllocator, 1);
11428 
11429  return res;
11430 }
11431 
11432 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11433 {
11434  if(count == 0)
11435  {
11436  return VK_SUCCESS;
11437  }
11438 
11439  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11440  if(m_MapCount != 0)
11441  {
11442  m_MapCount += count;
11443  VMA_ASSERT(m_pMappedData != VMA_NULL);
11444  if(ppData != VMA_NULL)
11445  {
11446  *ppData = m_pMappedData;
11447  }
11448  return VK_SUCCESS;
11449  }
11450  else
11451  {
11452  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11453  hAllocator->m_hDevice,
11454  m_hMemory,
11455  0, // offset
11456  VK_WHOLE_SIZE,
11457  0, // flags
11458  &m_pMappedData);
11459  if(result == VK_SUCCESS)
11460  {
11461  if(ppData != VMA_NULL)
11462  {
11463  *ppData = m_pMappedData;
11464  }
11465  m_MapCount = count;
11466  }
11467  return result;
11468  }
11469 }
11470 
11471 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11472 {
11473  if(count == 0)
11474  {
11475  return;
11476  }
11477 
11478  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11479  if(m_MapCount >= count)
11480  {
11481  m_MapCount -= count;
11482  if(m_MapCount == 0)
11483  {
11484  m_pMappedData = VMA_NULL;
11485  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11486  }
11487  }
11488  else
11489  {
11490  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11491  }
11492 }
11493 
11494 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11495 {
11496  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11497  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11498 
11499  void* pData;
11500  VkResult res = Map(hAllocator, 1, &pData);
11501  if(res != VK_SUCCESS)
11502  {
11503  return res;
11504  }
11505 
11506  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11507  VmaWriteMagicValue(pData, allocOffset + allocSize);
11508 
11509  Unmap(hAllocator, 1);
11510 
11511  return VK_SUCCESS;
11512 }
11513 
11514 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11515 {
11516  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11517  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11518 
11519  void* pData;
11520  VkResult res = Map(hAllocator, 1, &pData);
11521  if(res != VK_SUCCESS)
11522  {
11523  return res;
11524  }
11525 
11526  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11527  {
11528  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11529  }
11530  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11531  {
11532  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11533  }
11534 
11535  Unmap(hAllocator, 1);
11536 
11537  return VK_SUCCESS;
11538 }
11539 
11540 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11541  const VmaAllocator hAllocator,
11542  const VmaAllocation hAllocation,
11543  VkDeviceSize allocationLocalOffset,
11544  VkBuffer hBuffer,
11545  const void* pNext)
11546 {
11547  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11548  hAllocation->GetBlock() == this);
11549  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11550  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11551  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11552  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11553  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11554  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11555 }
11556 
11557 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11558  const VmaAllocator hAllocator,
11559  const VmaAllocation hAllocation,
11560  VkDeviceSize allocationLocalOffset,
11561  VkImage hImage,
11562  const void* pNext)
11563 {
11564  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11565  hAllocation->GetBlock() == this);
11566  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11567  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11568  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11569  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11570  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11571  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11572 }
11573 
11574 static void InitStatInfo(VmaStatInfo& outInfo)
11575 {
11576  memset(&outInfo, 0, sizeof(outInfo));
11577  outInfo.allocationSizeMin = UINT64_MAX;
11578  outInfo.unusedRangeSizeMin = UINT64_MAX;
11579 }
11580 
11581 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11582 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11583 {
11584  inoutInfo.blockCount += srcInfo.blockCount;
11585  inoutInfo.allocationCount += srcInfo.allocationCount;
11586  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11587  inoutInfo.usedBytes += srcInfo.usedBytes;
11588  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11589  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11590  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11591  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11592  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11593 }
11594 
11595 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11596 {
11597  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11598  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11599  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11600  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11601 }
11602 
11603 VmaPool_T::VmaPool_T(
11604  VmaAllocator hAllocator,
11605  const VmaPoolCreateInfo& createInfo,
11606  VkDeviceSize preferredBlockSize) :
11607  m_BlockVector(
11608  hAllocator,
11609  this, // hParentPool
11610  createInfo.memoryTypeIndex,
11611  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11612  createInfo.minBlockCount,
11613  createInfo.maxBlockCount,
11614  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11615  createInfo.frameInUseCount,
11616  true, // isCustomPool
11617  createInfo.blockSize != 0, // explicitBlockSize
11618  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11619  m_Id(0)
11620 {
11621 }
11622 
11623 VmaPool_T::~VmaPool_T()
11624 {
11625 }
11626 
11627 #if VMA_STATS_STRING_ENABLED
11628 
11629 #endif // #if VMA_STATS_STRING_ENABLED
11630 
11631 VmaBlockVector::VmaBlockVector(
11632  VmaAllocator hAllocator,
11633  VmaPool hParentPool,
11634  uint32_t memoryTypeIndex,
11635  VkDeviceSize preferredBlockSize,
11636  size_t minBlockCount,
11637  size_t maxBlockCount,
11638  VkDeviceSize bufferImageGranularity,
11639  uint32_t frameInUseCount,
11640  bool isCustomPool,
11641  bool explicitBlockSize,
11642  uint32_t algorithm) :
11643  m_hAllocator(hAllocator),
11644  m_hParentPool(hParentPool),
11645  m_MemoryTypeIndex(memoryTypeIndex),
11646  m_PreferredBlockSize(preferredBlockSize),
11647  m_MinBlockCount(minBlockCount),
11648  m_MaxBlockCount(maxBlockCount),
11649  m_BufferImageGranularity(bufferImageGranularity),
11650  m_FrameInUseCount(frameInUseCount),
11651  m_IsCustomPool(isCustomPool),
11652  m_ExplicitBlockSize(explicitBlockSize),
11653  m_Algorithm(algorithm),
11654  m_HasEmptyBlock(false),
11655  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11656  m_NextBlockId(0)
11657 {
11658 }
11659 
11660 VmaBlockVector::~VmaBlockVector()
11661 {
11662  for(size_t i = m_Blocks.size(); i--; )
11663  {
11664  m_Blocks[i]->Destroy(m_hAllocator);
11665  vma_delete(m_hAllocator, m_Blocks[i]);
11666  }
11667 }
11668 
11669 VkResult VmaBlockVector::CreateMinBlocks()
11670 {
11671  for(size_t i = 0; i < m_MinBlockCount; ++i)
11672  {
11673  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11674  if(res != VK_SUCCESS)
11675  {
11676  return res;
11677  }
11678  }
11679  return VK_SUCCESS;
11680 }
11681 
11682 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11683 {
11684  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11685 
11686  const size_t blockCount = m_Blocks.size();
11687 
11688  pStats->size = 0;
11689  pStats->unusedSize = 0;
11690  pStats->allocationCount = 0;
11691  pStats->unusedRangeCount = 0;
11692  pStats->unusedRangeSizeMax = 0;
11693  pStats->blockCount = blockCount;
11694 
11695  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11696  {
11697  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11698  VMA_ASSERT(pBlock);
11699  VMA_HEAVY_ASSERT(pBlock->Validate());
11700  pBlock->m_pMetadata->AddPoolStats(*pStats);
11701  }
11702 }
11703 
11704 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11705 {
11706  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11707  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11708  (VMA_DEBUG_MARGIN > 0) &&
11709  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
11710  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11711 }
11712 
11713 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11714 
11715 VkResult VmaBlockVector::Allocate(
11716  uint32_t currentFrameIndex,
11717  VkDeviceSize size,
11718  VkDeviceSize alignment,
11719  const VmaAllocationCreateInfo& createInfo,
11720  VmaSuballocationType suballocType,
11721  size_t allocationCount,
11722  VmaAllocation* pAllocations)
11723 {
11724  size_t allocIndex;
11725  VkResult res = VK_SUCCESS;
11726 
11727  if(IsCorruptionDetectionEnabled())
11728  {
11729  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11730  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
11731  }
11732 
11733  {
11734  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11735  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11736  {
11737  res = AllocatePage(
11738  currentFrameIndex,
11739  size,
11740  alignment,
11741  createInfo,
11742  suballocType,
11743  pAllocations + allocIndex);
11744  if(res != VK_SUCCESS)
11745  {
11746  break;
11747  }
11748  }
11749  }
11750 
11751  if(res != VK_SUCCESS)
11752  {
11753  // Free all already created allocations.
11754  while(allocIndex--)
11755  {
11756  Free(pAllocations[allocIndex]);
11757  }
11758  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
11759  }
11760 
11761  return res;
11762 }
11763 
11764 VkResult VmaBlockVector::AllocatePage(
11765  uint32_t currentFrameIndex,
11766  VkDeviceSize size,
11767  VkDeviceSize alignment,
11768  const VmaAllocationCreateInfo& createInfo,
11769  VmaSuballocationType suballocType,
11770  VmaAllocation* pAllocation)
11771 {
11772  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11773  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11774  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11775  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11776 
11777  const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0;
11778  VkDeviceSize freeMemory;
11779  {
11780  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
11781  VmaBudget heapBudget = {};
11782  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
11783  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
11784  }
11785 
11786  const bool canCreateNewBlock =
11787  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11788  (m_Blocks.size() < m_MaxBlockCount) &&
11789  freeMemory >= size;
11790  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11791 
11792  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11793  // Which in turn is available only when maxBlockCount = 1.
11794  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11795  {
11796  canMakeOtherLost = false;
11797  }
11798 
11799  // Upper address can only be used with linear allocator and within single memory block.
11800  if(isUpperAddress &&
11801  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11802  {
11803  return VK_ERROR_FEATURE_NOT_PRESENT;
11804  }
11805 
11806  // Validate strategy.
11807  switch(strategy)
11808  {
11809  case 0:
11811  break;
11815  break;
11816  default:
11817  return VK_ERROR_FEATURE_NOT_PRESENT;
11818  }
11819 
11820  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11821  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11822  {
11823  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11824  }
11825 
11826  /*
11827  Under certain condition, this whole section can be skipped for optimization, so
11828  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11829  e.g. for custom pools with linear algorithm.
11830  */
11831  if(!canMakeOtherLost || canCreateNewBlock)
11832  {
11833  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11834  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11836 
11837  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11838  {
11839  // Use only last block.
11840  if(!m_Blocks.empty())
11841  {
11842  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11843  VMA_ASSERT(pCurrBlock);
11844  VkResult res = AllocateFromBlock(
11845  pCurrBlock,
11846  currentFrameIndex,
11847  size,
11848  alignment,
11849  allocFlagsCopy,
11850  createInfo.pUserData,
11851  suballocType,
11852  strategy,
11853  pAllocation);
11854  if(res == VK_SUCCESS)
11855  {
11856  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
11857  return VK_SUCCESS;
11858  }
11859  }
11860  }
11861  else
11862  {
11864  {
11865  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11866  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11867  {
11868  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11869  VMA_ASSERT(pCurrBlock);
11870  VkResult res = AllocateFromBlock(
11871  pCurrBlock,
11872  currentFrameIndex,
11873  size,
11874  alignment,
11875  allocFlagsCopy,
11876  createInfo.pUserData,
11877  suballocType,
11878  strategy,
11879  pAllocation);
11880  if(res == VK_SUCCESS)
11881  {
11882  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
11883  return VK_SUCCESS;
11884  }
11885  }
11886  }
11887  else // WORST_FIT, FIRST_FIT
11888  {
11889  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11890  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11891  {
11892  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11893  VMA_ASSERT(pCurrBlock);
11894  VkResult res = AllocateFromBlock(
11895  pCurrBlock,
11896  currentFrameIndex,
11897  size,
11898  alignment,
11899  allocFlagsCopy,
11900  createInfo.pUserData,
11901  suballocType,
11902  strategy,
11903  pAllocation);
11904  if(res == VK_SUCCESS)
11905  {
11906  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
11907  return VK_SUCCESS;
11908  }
11909  }
11910  }
11911  }
11912 
11913  // 2. Try to create new block.
11914  if(canCreateNewBlock)
11915  {
11916  // Calculate optimal size for new block.
11917  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11918  uint32_t newBlockSizeShift = 0;
11919  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11920 
11921  if(!m_ExplicitBlockSize)
11922  {
11923  // Allocate 1/8, 1/4, 1/2 as first blocks.
11924  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11925  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11926  {
11927  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11928  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11929  {
11930  newBlockSize = smallerNewBlockSize;
11931  ++newBlockSizeShift;
11932  }
11933  else
11934  {
11935  break;
11936  }
11937  }
11938  }
11939 
11940  size_t newBlockIndex = 0;
11941  VkResult res = newBlockSize <= freeMemory ?
11942  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
11943  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11944  if(!m_ExplicitBlockSize)
11945  {
11946  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11947  {
11948  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11949  if(smallerNewBlockSize >= size)
11950  {
11951  newBlockSize = smallerNewBlockSize;
11952  ++newBlockSizeShift;
11953  res = newBlockSize <= freeMemory ?
11954  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
11955  }
11956  else
11957  {
11958  break;
11959  }
11960  }
11961  }
11962 
11963  if(res == VK_SUCCESS)
11964  {
11965  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11966  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11967 
11968  res = AllocateFromBlock(
11969  pBlock,
11970  currentFrameIndex,
11971  size,
11972  alignment,
11973  allocFlagsCopy,
11974  createInfo.pUserData,
11975  suballocType,
11976  strategy,
11977  pAllocation);
11978  if(res == VK_SUCCESS)
11979  {
11980  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
11981  return VK_SUCCESS;
11982  }
11983  else
11984  {
11985  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11986  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11987  }
11988  }
11989  }
11990  }
11991 
11992  // 3. Try to allocate from existing blocks with making other allocations lost.
11993  if(canMakeOtherLost)
11994  {
11995  uint32_t tryIndex = 0;
11996  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11997  {
11998  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11999  VmaAllocationRequest bestRequest = {};
12000  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12001 
12002  // 1. Search existing allocations.
12004  {
12005  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12006  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12007  {
12008  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12009  VMA_ASSERT(pCurrBlock);
12010  VmaAllocationRequest currRequest = {};
12011  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12012  currentFrameIndex,
12013  m_FrameInUseCount,
12014  m_BufferImageGranularity,
12015  size,
12016  alignment,
12017  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12018  suballocType,
12019  canMakeOtherLost,
12020  strategy,
12021  &currRequest))
12022  {
12023  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12024  if(pBestRequestBlock == VMA_NULL ||
12025  currRequestCost < bestRequestCost)
12026  {
12027  pBestRequestBlock = pCurrBlock;
12028  bestRequest = currRequest;
12029  bestRequestCost = currRequestCost;
12030 
12031  if(bestRequestCost == 0)
12032  {
12033  break;
12034  }
12035  }
12036  }
12037  }
12038  }
12039  else // WORST_FIT, FIRST_FIT
12040  {
12041  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12042  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12043  {
12044  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12045  VMA_ASSERT(pCurrBlock);
12046  VmaAllocationRequest currRequest = {};
12047  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12048  currentFrameIndex,
12049  m_FrameInUseCount,
12050  m_BufferImageGranularity,
12051  size,
12052  alignment,
12053  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12054  suballocType,
12055  canMakeOtherLost,
12056  strategy,
12057  &currRequest))
12058  {
12059  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12060  if(pBestRequestBlock == VMA_NULL ||
12061  currRequestCost < bestRequestCost ||
12063  {
12064  pBestRequestBlock = pCurrBlock;
12065  bestRequest = currRequest;
12066  bestRequestCost = currRequestCost;
12067 
12068  if(bestRequestCost == 0 ||
12070  {
12071  break;
12072  }
12073  }
12074  }
12075  }
12076  }
12077 
12078  if(pBestRequestBlock != VMA_NULL)
12079  {
12080  if(mapped)
12081  {
12082  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
12083  if(res != VK_SUCCESS)
12084  {
12085  return res;
12086  }
12087  }
12088 
12089  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
12090  currentFrameIndex,
12091  m_FrameInUseCount,
12092  &bestRequest))
12093  {
12094  // We no longer have an empty Allocation.
12095  if(pBestRequestBlock->m_pMetadata->IsEmpty())
12096  {
12097  m_HasEmptyBlock = false;
12098  }
12099  // Allocate from this pBlock.
12100  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12101  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12102  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
12103  (*pAllocation)->InitBlockAllocation(
12104  pBestRequestBlock,
12105  bestRequest.offset,
12106  alignment,
12107  size,
12108  suballocType,
12109  mapped,
12110  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12111  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
12112  VMA_DEBUG_LOG(" Returned from existing block");
12113  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
12114  m_hAllocator->m_Budget.m_AllocationBytes[m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex)] += size;
12115  ++m_hAllocator->m_Budget.m_OperationsSinceBudgetFetch;
12116  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12117  {
12118  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12119  }
12120  if(IsCorruptionDetectionEnabled())
12121  {
12122  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
12123  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12124  }
12125  return VK_SUCCESS;
12126  }
12127  // else: Some allocations must have been touched while we are here. Next try.
12128  }
12129  else
12130  {
12131  // Could not find place in any of the blocks - break outer loop.
12132  break;
12133  }
12134  }
12135  /* Maximum number of tries exceeded - a very unlike event when many other
12136  threads are simultaneously touching allocations making it impossible to make
12137  lost at the same time as we try to allocate. */
12138  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
12139  {
12140  return VK_ERROR_TOO_MANY_OBJECTS;
12141  }
12142  }
12143 
12144  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12145 }
12146 
12147 void VmaBlockVector::Free(
12148  const VmaAllocation hAllocation)
12149 {
12150  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12151 
12152  // Scope for lock.
12153  {
12154  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12155 
12156  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12157 
12158  if(IsCorruptionDetectionEnabled())
12159  {
12160  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12161  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12162  }
12163 
12164  if(hAllocation->IsPersistentMap())
12165  {
12166  pBlock->Unmap(m_hAllocator, 1);
12167  }
12168 
12169  pBlock->m_pMetadata->Free(hAllocation);
12170  VMA_HEAVY_ASSERT(pBlock->Validate());
12171 
12172  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12173 
12174  // pBlock became empty after this deallocation.
12175  if(pBlock->m_pMetadata->IsEmpty())
12176  {
12177  // Already has empty Allocation. We don't want to have two, so delete this one.
12178  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
12179  {
12180  pBlockToDelete = pBlock;
12181  Remove(pBlock);
12182  }
12183  // We now have first empty block.
12184  else
12185  {
12186  m_HasEmptyBlock = true;
12187  }
12188  }
12189  // pBlock didn't become empty, but we have another empty block - find and free that one.
12190  // (This is optional, heuristics.)
12191  else if(m_HasEmptyBlock)
12192  {
12193  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12194  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
12195  {
12196  pBlockToDelete = pLastBlock;
12197  m_Blocks.pop_back();
12198  m_HasEmptyBlock = false;
12199  }
12200  }
12201 
12202  IncrementallySortBlocks();
12203  }
12204 
12205  // Destruction of a free Allocation. Deferred until this point, outside of mutex
12206  // lock, for performance reason.
12207  if(pBlockToDelete != VMA_NULL)
12208  {
12209  VMA_DEBUG_LOG(" Deleted empty allocation");
12210  pBlockToDelete->Destroy(m_hAllocator);
12211  vma_delete(m_hAllocator, pBlockToDelete);
12212  }
12213 }
12214 
12215 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12216 {
12217  VkDeviceSize result = 0;
12218  for(size_t i = m_Blocks.size(); i--; )
12219  {
12220  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12221  if(result >= m_PreferredBlockSize)
12222  {
12223  break;
12224  }
12225  }
12226  return result;
12227 }
12228 
12229 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12230 {
12231  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12232  {
12233  if(m_Blocks[blockIndex] == pBlock)
12234  {
12235  VmaVectorRemove(m_Blocks, blockIndex);
12236  return;
12237  }
12238  }
12239  VMA_ASSERT(0);
12240 }
12241 
12242 void VmaBlockVector::IncrementallySortBlocks()
12243 {
12244  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12245  {
12246  // Bubble sort only until first swap.
12247  for(size_t i = 1; i < m_Blocks.size(); ++i)
12248  {
12249  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12250  {
12251  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12252  return;
12253  }
12254  }
12255  }
12256 }
12257 
12258 VkResult VmaBlockVector::AllocateFromBlock(
12259  VmaDeviceMemoryBlock* pBlock,
12260  uint32_t currentFrameIndex,
12261  VkDeviceSize size,
12262  VkDeviceSize alignment,
12263  VmaAllocationCreateFlags allocFlags,
12264  void* pUserData,
12265  VmaSuballocationType suballocType,
12266  uint32_t strategy,
12267  VmaAllocation* pAllocation)
12268 {
12269  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
12270  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12271  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12272  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12273 
12274  VmaAllocationRequest currRequest = {};
12275  if(pBlock->m_pMetadata->CreateAllocationRequest(
12276  currentFrameIndex,
12277  m_FrameInUseCount,
12278  m_BufferImageGranularity,
12279  size,
12280  alignment,
12281  isUpperAddress,
12282  suballocType,
12283  false, // canMakeOtherLost
12284  strategy,
12285  &currRequest))
12286  {
12287  // Allocate from pCurrBlock.
12288  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
12289 
12290  if(mapped)
12291  {
12292  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12293  if(res != VK_SUCCESS)
12294  {
12295  return res;
12296  }
12297  }
12298 
12299  // We no longer have an empty Allocation.
12300  if(pBlock->m_pMetadata->IsEmpty())
12301  {
12302  m_HasEmptyBlock = false;
12303  }
12304 
12305  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
12306  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
12307  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
12308  (*pAllocation)->InitBlockAllocation(
12309  pBlock,
12310  currRequest.offset,
12311  alignment,
12312  size,
12313  suballocType,
12314  mapped,
12315  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
12316  VMA_HEAVY_ASSERT(pBlock->Validate());
12317  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12318  m_hAllocator->m_Budget.m_AllocationBytes[m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex)] += size;
12319  ++m_hAllocator->m_Budget.m_OperationsSinceBudgetFetch;
12320  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12321  {
12322  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12323  }
12324  if(IsCorruptionDetectionEnabled())
12325  {
12326  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
12327  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12328  }
12329  return VK_SUCCESS;
12330  }
12331  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12332 }
12333 
12334 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12335 {
12336  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12337  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12338  allocInfo.allocationSize = blockSize;
12339  VkDeviceMemory mem = VK_NULL_HANDLE;
12340  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12341  if(res < 0)
12342  {
12343  return res;
12344  }
12345 
12346  // New VkDeviceMemory successfully created.
12347 
12348  // Create new Allocation for it.
12349  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12350  pBlock->Init(
12351  m_hAllocator,
12352  m_hParentPool,
12353  m_MemoryTypeIndex,
12354  mem,
12355  allocInfo.allocationSize,
12356  m_NextBlockId++,
12357  m_Algorithm);
12358 
12359  m_Blocks.push_back(pBlock);
12360  if(pNewBlockIndex != VMA_NULL)
12361  {
12362  *pNewBlockIndex = m_Blocks.size() - 1;
12363  }
12364 
12365  return VK_SUCCESS;
12366 }
12367 
12368 void VmaBlockVector::ApplyDefragmentationMovesCpu(
12369  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12370  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
12371 {
12372  const size_t blockCount = m_Blocks.size();
12373  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
12374 
12375  enum BLOCK_FLAG
12376  {
12377  BLOCK_FLAG_USED = 0x00000001,
12378  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
12379  };
12380 
12381  struct BlockInfo
12382  {
12383  uint32_t flags;
12384  void* pMappedData;
12385  };
12386  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
12387  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
12388  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
12389 
12390  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12391  const size_t moveCount = moves.size();
12392  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12393  {
12394  const VmaDefragmentationMove& move = moves[moveIndex];
12395  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
12396  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
12397  }
12398 
12399  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12400 
12401  // Go over all blocks. Get mapped pointer or map if necessary.
12402  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12403  {
12404  BlockInfo& currBlockInfo = blockInfo[blockIndex];
12405  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12406  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
12407  {
12408  currBlockInfo.pMappedData = pBlock->GetMappedData();
12409  // It is not originally mapped - map it.
12410  if(currBlockInfo.pMappedData == VMA_NULL)
12411  {
12412  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
12413  if(pDefragCtx->res == VK_SUCCESS)
12414  {
12415  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
12416  }
12417  }
12418  }
12419  }
12420 
12421  // Go over all moves. Do actual data transfer.
12422  if(pDefragCtx->res == VK_SUCCESS)
12423  {
12424  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12425  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12426 
12427  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12428  {
12429  const VmaDefragmentationMove& move = moves[moveIndex];
12430 
12431  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
12432  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
12433 
12434  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
12435 
12436  // Invalidate source.
12437  if(isNonCoherent)
12438  {
12439  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
12440  memRange.memory = pSrcBlock->GetDeviceMemory();
12441  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
12442  memRange.size = VMA_MIN(
12443  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
12444  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
12445  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12446  }
12447 
12448  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
12449  memmove(
12450  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
12451  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
12452  static_cast<size_t>(move.size));
12453 
12454  if(IsCorruptionDetectionEnabled())
12455  {
12456  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
12457  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
12458  }
12459 
12460  // Flush destination.
12461  if(isNonCoherent)
12462  {
12463  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
12464  memRange.memory = pDstBlock->GetDeviceMemory();
12465  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
12466  memRange.size = VMA_MIN(
12467  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
12468  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
12469  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
12470  }
12471  }
12472  }
12473 
12474  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
12475  // Regardless of pCtx->res == VK_SUCCESS.
12476  for(size_t blockIndex = blockCount; blockIndex--; )
12477  {
12478  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
12479  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
12480  {
12481  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12482  pBlock->Unmap(m_hAllocator, 1);
12483  }
12484  }
12485 }
12486 
12487 void VmaBlockVector::ApplyDefragmentationMovesGpu(
12488  class VmaBlockVectorDefragmentationContext* pDefragCtx,
12489  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12490  VkCommandBuffer commandBuffer)
12491 {
12492  const size_t blockCount = m_Blocks.size();
12493 
12494  pDefragCtx->blockContexts.resize(blockCount);
12495  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
12496 
12497  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
12498  const size_t moveCount = moves.size();
12499  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12500  {
12501  const VmaDefragmentationMove& move = moves[moveIndex];
12502  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12503  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
12504  }
12505 
12506  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
12507 
12508  // Go over all blocks. Create and bind buffer for whole block if necessary.
12509  {
12510  VkBufferCreateInfo bufCreateInfo;
12511  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
12512 
12513  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
12514  {
12515  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
12516  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12517  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
12518  {
12519  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
12520  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
12521  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
12522  if(pDefragCtx->res == VK_SUCCESS)
12523  {
12524  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
12525  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
12526  }
12527  }
12528  }
12529  }
12530 
12531  // Go over all moves. Post data transfer commands to command buffer.
12532  if(pDefragCtx->res == VK_SUCCESS)
12533  {
12534  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
12535  {
12536  const VmaDefragmentationMove& move = moves[moveIndex];
12537 
12538  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
12539  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
12540 
12541  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
12542 
12543  VkBufferCopy region = {
12544  move.srcOffset,
12545  move.dstOffset,
12546  move.size };
12547  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12548  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12549  }
12550  }
12551 
12552  // Save buffers to defrag context for later destruction.
12553  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12554  {
12555  pDefragCtx->res = VK_NOT_READY;
12556  }
12557 }
12558 
12559 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12560 {
12561  m_HasEmptyBlock = false;
12562  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12563  {
12564  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12565  if(pBlock->m_pMetadata->IsEmpty())
12566  {
12567  if(m_Blocks.size() > m_MinBlockCount)
12568  {
12569  if(pDefragmentationStats != VMA_NULL)
12570  {
12571  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12572  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12573  }
12574 
12575  VmaVectorRemove(m_Blocks, blockIndex);
12576  pBlock->Destroy(m_hAllocator);
12577  vma_delete(m_hAllocator, pBlock);
12578  }
12579  else
12580  {
12581  m_HasEmptyBlock = true;
12582  }
12583  }
12584  }
12585 }
12586 
12587 #if VMA_STATS_STRING_ENABLED
12588 
12589 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12590 {
12591  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12592 
12593  json.BeginObject();
12594 
12595  if(m_IsCustomPool)
12596  {
12597  json.WriteString("MemoryTypeIndex");
12598  json.WriteNumber(m_MemoryTypeIndex);
12599 
12600  json.WriteString("BlockSize");
12601  json.WriteNumber(m_PreferredBlockSize);
12602 
12603  json.WriteString("BlockCount");
12604  json.BeginObject(true);
12605  if(m_MinBlockCount > 0)
12606  {
12607  json.WriteString("Min");
12608  json.WriteNumber((uint64_t)m_MinBlockCount);
12609  }
12610  if(m_MaxBlockCount < SIZE_MAX)
12611  {
12612  json.WriteString("Max");
12613  json.WriteNumber((uint64_t)m_MaxBlockCount);
12614  }
12615  json.WriteString("Cur");
12616  json.WriteNumber((uint64_t)m_Blocks.size());
12617  json.EndObject();
12618 
12619  if(m_FrameInUseCount > 0)
12620  {
12621  json.WriteString("FrameInUseCount");
12622  json.WriteNumber(m_FrameInUseCount);
12623  }
12624 
12625  if(m_Algorithm != 0)
12626  {
12627  json.WriteString("Algorithm");
12628  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12629  }
12630  }
12631  else
12632  {
12633  json.WriteString("PreferredBlockSize");
12634  json.WriteNumber(m_PreferredBlockSize);
12635  }
12636 
12637  json.WriteString("Blocks");
12638  json.BeginObject();
12639  for(size_t i = 0; i < m_Blocks.size(); ++i)
12640  {
12641  json.BeginString();
12642  json.ContinueString(m_Blocks[i]->GetId());
12643  json.EndString();
12644 
12645  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12646  }
12647  json.EndObject();
12648 
12649  json.EndObject();
12650 }
12651 
12652 #endif // #if VMA_STATS_STRING_ENABLED
12653 
12654 void VmaBlockVector::Defragment(
12655  class VmaBlockVectorDefragmentationContext* pCtx,
12656  VmaDefragmentationStats* pStats,
12657  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12658  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12659  VkCommandBuffer commandBuffer)
12660 {
12661  pCtx->res = VK_SUCCESS;
12662 
12663  const VkMemoryPropertyFlags memPropFlags =
12664  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12665  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12666 
12667  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12668  isHostVisible;
12669  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12670  !IsCorruptionDetectionEnabled() &&
12671  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
12672 
12673  // There are options to defragment this memory type.
12674  if(canDefragmentOnCpu || canDefragmentOnGpu)
12675  {
12676  bool defragmentOnGpu;
12677  // There is only one option to defragment this memory type.
12678  if(canDefragmentOnGpu != canDefragmentOnCpu)
12679  {
12680  defragmentOnGpu = canDefragmentOnGpu;
12681  }
12682  // Both options are available: Heuristics to choose the best one.
12683  else
12684  {
12685  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12686  m_hAllocator->IsIntegratedGpu();
12687  }
12688 
12689  bool overlappingMoveSupported = !defragmentOnGpu;
12690 
12691  if(m_hAllocator->m_UseMutex)
12692  {
12693  m_Mutex.LockWrite();
12694  pCtx->mutexLocked = true;
12695  }
12696 
12697  pCtx->Begin(overlappingMoveSupported);
12698 
12699  // Defragment.
12700 
12701  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12702  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12703  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12704  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12705  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12706 
12707  // Accumulate statistics.
12708  if(pStats != VMA_NULL)
12709  {
12710  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12711  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12712  pStats->bytesMoved += bytesMoved;
12713  pStats->allocationsMoved += allocationsMoved;
12714  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12715  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12716  if(defragmentOnGpu)
12717  {
12718  maxGpuBytesToMove -= bytesMoved;
12719  maxGpuAllocationsToMove -= allocationsMoved;
12720  }
12721  else
12722  {
12723  maxCpuBytesToMove -= bytesMoved;
12724  maxCpuAllocationsToMove -= allocationsMoved;
12725  }
12726  }
12727 
12728  if(pCtx->res >= VK_SUCCESS)
12729  {
12730  if(defragmentOnGpu)
12731  {
12732  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12733  }
12734  else
12735  {
12736  ApplyDefragmentationMovesCpu(pCtx, moves);
12737  }
12738  }
12739  }
12740 }
12741 
12742 void VmaBlockVector::DefragmentationEnd(
12743  class VmaBlockVectorDefragmentationContext* pCtx,
12744  VmaDefragmentationStats* pStats)
12745 {
12746  // Destroy buffers.
12747  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12748  {
12749  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12750  if(blockCtx.hBuffer)
12751  {
12752  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12753  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12754  }
12755  }
12756 
12757  if(pCtx->res >= VK_SUCCESS)
12758  {
12759  FreeEmptyBlocks(pStats);
12760  }
12761 
12762  if(pCtx->mutexLocked)
12763  {
12764  VMA_ASSERT(m_hAllocator->m_UseMutex);
12765  m_Mutex.UnlockWrite();
12766  }
12767 }
12768 
12769 size_t VmaBlockVector::CalcAllocationCount() const
12770 {
12771  size_t result = 0;
12772  for(size_t i = 0; i < m_Blocks.size(); ++i)
12773  {
12774  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12775  }
12776  return result;
12777 }
12778 
12779 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12780 {
12781  if(m_BufferImageGranularity == 1)
12782  {
12783  return false;
12784  }
12785  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12786  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12787  {
12788  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12789  VMA_ASSERT(m_Algorithm == 0);
12790  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12791  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12792  {
12793  return true;
12794  }
12795  }
12796  return false;
12797 }
12798 
12799 void VmaBlockVector::MakePoolAllocationsLost(
12800  uint32_t currentFrameIndex,
12801  size_t* pLostAllocationCount)
12802 {
12803  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12804  size_t lostAllocationCount = 0;
12805  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12806  {
12807  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12808  VMA_ASSERT(pBlock);
12809  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12810  }
12811  if(pLostAllocationCount != VMA_NULL)
12812  {
12813  *pLostAllocationCount = lostAllocationCount;
12814  }
12815 }
12816 
12817 VkResult VmaBlockVector::CheckCorruption()
12818 {
12819  if(!IsCorruptionDetectionEnabled())
12820  {
12821  return VK_ERROR_FEATURE_NOT_PRESENT;
12822  }
12823 
12824  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12825  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12826  {
12827  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12828  VMA_ASSERT(pBlock);
12829  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12830  if(res != VK_SUCCESS)
12831  {
12832  return res;
12833  }
12834  }
12835  return VK_SUCCESS;
12836 }
12837 
12838 void VmaBlockVector::AddStats(VmaStats* pStats)
12839 {
12840  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12841  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12842 
12843  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12844 
12845  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12846  {
12847  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12848  VMA_ASSERT(pBlock);
12849  VMA_HEAVY_ASSERT(pBlock->Validate());
12850  VmaStatInfo allocationStatInfo;
12851  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12852  VmaAddStatInfo(pStats->total, allocationStatInfo);
12853  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12854  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12855  }
12856 }
12857 
12859 // VmaDefragmentationAlgorithm_Generic members definition
12860 
12861 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12862  VmaAllocator hAllocator,
12863  VmaBlockVector* pBlockVector,
12864  uint32_t currentFrameIndex,
12865  bool overlappingMoveSupported) :
12866  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12867  m_AllocationCount(0),
12868  m_AllAllocations(false),
12869  m_BytesMoved(0),
12870  m_AllocationsMoved(0),
12871  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12872 {
12873  // Create block info for each block.
12874  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12875  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12876  {
12877  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12878  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12879  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12880  m_Blocks.push_back(pBlockInfo);
12881  }
12882 
12883  // Sort them by m_pBlock pointer value.
12884  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12885 }
12886 
12887 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12888 {
12889  for(size_t i = m_Blocks.size(); i--; )
12890  {
12891  vma_delete(m_hAllocator, m_Blocks[i]);
12892  }
12893 }
12894 
12895 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12896 {
12897  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12898  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12899  {
12900  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12901  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12902  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12903  {
12904  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12905  (*it)->m_Allocations.push_back(allocInfo);
12906  }
12907  else
12908  {
12909  VMA_ASSERT(0);
12910  }
12911 
12912  ++m_AllocationCount;
12913  }
12914 }
12915 
12916 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12917  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12918  VkDeviceSize maxBytesToMove,
12919  uint32_t maxAllocationsToMove)
12920 {
12921  if(m_Blocks.empty())
12922  {
12923  return VK_SUCCESS;
12924  }
12925 
12926  // This is a choice based on research.
12927  // Option 1:
12928  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12929  // Option 2:
12930  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12931  // Option 3:
12932  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12933 
12934  size_t srcBlockMinIndex = 0;
12935  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12936  /*
12937  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12938  {
12939  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12940  if(blocksWithNonMovableCount > 0)
12941  {
12942  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12943  }
12944  }
12945  */
12946 
12947  size_t srcBlockIndex = m_Blocks.size() - 1;
12948  size_t srcAllocIndex = SIZE_MAX;
12949  for(;;)
12950  {
12951  // 1. Find next allocation to move.
12952  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12953  // 1.2. Then start from last to first m_Allocations.
12954  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12955  {
12956  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12957  {
12958  // Finished: no more allocations to process.
12959  if(srcBlockIndex == srcBlockMinIndex)
12960  {
12961  return VK_SUCCESS;
12962  }
12963  else
12964  {
12965  --srcBlockIndex;
12966  srcAllocIndex = SIZE_MAX;
12967  }
12968  }
12969  else
12970  {
12971  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12972  }
12973  }
12974 
12975  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12976  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12977 
12978  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12979  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12980  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12981  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12982 
12983  // 2. Try to find new place for this allocation in preceding or current block.
12984  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12985  {
12986  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12987  VmaAllocationRequest dstAllocRequest;
12988  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12989  m_CurrentFrameIndex,
12990  m_pBlockVector->GetFrameInUseCount(),
12991  m_pBlockVector->GetBufferImageGranularity(),
12992  size,
12993  alignment,
12994  false, // upperAddress
12995  suballocType,
12996  false, // canMakeOtherLost
12997  strategy,
12998  &dstAllocRequest) &&
12999  MoveMakesSense(
13000  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
13001  {
13002  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
13003 
13004  // Reached limit on number of allocations or bytes to move.
13005  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
13006  (m_BytesMoved + size > maxBytesToMove))
13007  {
13008  return VK_SUCCESS;
13009  }
13010 
13011  VmaDefragmentationMove move;
13012  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
13013  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
13014  move.srcOffset = srcOffset;
13015  move.dstOffset = dstAllocRequest.offset;
13016  move.size = size;
13017  moves.push_back(move);
13018 
13019  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
13020  dstAllocRequest,
13021  suballocType,
13022  size,
13023  allocInfo.m_hAllocation);
13024  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
13025 
13026  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
13027 
13028  if(allocInfo.m_pChanged != VMA_NULL)
13029  {
13030  *allocInfo.m_pChanged = VK_TRUE;
13031  }
13032 
13033  ++m_AllocationsMoved;
13034  m_BytesMoved += size;
13035 
13036  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
13037 
13038  break;
13039  }
13040  }
13041 
13042  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
13043 
13044  if(srcAllocIndex > 0)
13045  {
13046  --srcAllocIndex;
13047  }
13048  else
13049  {
13050  if(srcBlockIndex > 0)
13051  {
13052  --srcBlockIndex;
13053  srcAllocIndex = SIZE_MAX;
13054  }
13055  else
13056  {
13057  return VK_SUCCESS;
13058  }
13059  }
13060  }
13061 }
13062 
13063 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
13064 {
13065  size_t result = 0;
13066  for(size_t i = 0; i < m_Blocks.size(); ++i)
13067  {
13068  if(m_Blocks[i]->m_HasNonMovableAllocations)
13069  {
13070  ++result;
13071  }
13072  }
13073  return result;
13074 }
13075 
13076 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
13077  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13078  VkDeviceSize maxBytesToMove,
13079  uint32_t maxAllocationsToMove)
13080 {
13081  if(!m_AllAllocations && m_AllocationCount == 0)
13082  {
13083  return VK_SUCCESS;
13084  }
13085 
13086  const size_t blockCount = m_Blocks.size();
13087  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13088  {
13089  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
13090 
13091  if(m_AllAllocations)
13092  {
13093  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
13094  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
13095  it != pMetadata->m_Suballocations.end();
13096  ++it)
13097  {
13098  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
13099  {
13100  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
13101  pBlockInfo->m_Allocations.push_back(allocInfo);
13102  }
13103  }
13104  }
13105 
13106  pBlockInfo->CalcHasNonMovableAllocations();
13107 
13108  // This is a choice based on research.
13109  // Option 1:
13110  pBlockInfo->SortAllocationsByOffsetDescending();
13111  // Option 2:
13112  //pBlockInfo->SortAllocationsBySizeDescending();
13113  }
13114 
13115  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
13116  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
13117 
13118  // This is a choice based on research.
13119  const uint32_t roundCount = 2;
13120 
13121  // Execute defragmentation rounds (the main part).
13122  VkResult result = VK_SUCCESS;
13123  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
13124  {
13125  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
13126  }
13127 
13128  return result;
13129 }
13130 
13131 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
13132  size_t dstBlockIndex, VkDeviceSize dstOffset,
13133  size_t srcBlockIndex, VkDeviceSize srcOffset)
13134 {
13135  if(dstBlockIndex < srcBlockIndex)
13136  {
13137  return true;
13138  }
13139  if(dstBlockIndex > srcBlockIndex)
13140  {
13141  return false;
13142  }
13143  if(dstOffset < srcOffset)
13144  {
13145  return true;
13146  }
13147  return false;
13148 }
13149 
13151 // VmaDefragmentationAlgorithm_Fast
13152 
13153 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
13154  VmaAllocator hAllocator,
13155  VmaBlockVector* pBlockVector,
13156  uint32_t currentFrameIndex,
13157  bool overlappingMoveSupported) :
13158  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13159  m_OverlappingMoveSupported(overlappingMoveSupported),
13160  m_AllocationCount(0),
13161  m_AllAllocations(false),
13162  m_BytesMoved(0),
13163  m_AllocationsMoved(0),
13164  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
13165 {
13166  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
13167 
13168 }
13169 
13170 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
13171 {
13172 }
13173 
13174 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
13175  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13176  VkDeviceSize maxBytesToMove,
13177  uint32_t maxAllocationsToMove)
13178 {
13179  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
13180 
13181  const size_t blockCount = m_pBlockVector->GetBlockCount();
13182  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
13183  {
13184  return VK_SUCCESS;
13185  }
13186 
13187  PreprocessMetadata();
13188 
13189  // Sort blocks in order from most destination.
13190 
13191  m_BlockInfos.resize(blockCount);
13192  for(size_t i = 0; i < blockCount; ++i)
13193  {
13194  m_BlockInfos[i].origBlockIndex = i;
13195  }
13196 
13197  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
13198  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
13199  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
13200  });
13201 
13202  // THE MAIN ALGORITHM
13203 
13204  FreeSpaceDatabase freeSpaceDb;
13205 
13206  size_t dstBlockInfoIndex = 0;
13207  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13208  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13209  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13210  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
13211  VkDeviceSize dstOffset = 0;
13212 
13213  bool end = false;
13214  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
13215  {
13216  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
13217  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
13218  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
13219  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
13220  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
13221  {
13222  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
13223  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
13224  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
13225  if(m_AllocationsMoved == maxAllocationsToMove ||
13226  m_BytesMoved + srcAllocSize > maxBytesToMove)
13227  {
13228  end = true;
13229  break;
13230  }
13231  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
13232 
13233  // Try to place it in one of free spaces from the database.
13234  size_t freeSpaceInfoIndex;
13235  VkDeviceSize dstAllocOffset;
13236  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
13237  freeSpaceInfoIndex, dstAllocOffset))
13238  {
13239  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
13240  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
13241  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
13242 
13243  // Same block
13244  if(freeSpaceInfoIndex == srcBlockInfoIndex)
13245  {
13246  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13247 
13248  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13249 
13250  VmaSuballocation suballoc = *srcSuballocIt;
13251  suballoc.offset = dstAllocOffset;
13252  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
13253  m_BytesMoved += srcAllocSize;
13254  ++m_AllocationsMoved;
13255 
13256  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13257  ++nextSuballocIt;
13258  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13259  srcSuballocIt = nextSuballocIt;
13260 
13261  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13262 
13263  VmaDefragmentationMove move = {
13264  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13265  srcAllocOffset, dstAllocOffset,
13266  srcAllocSize };
13267  moves.push_back(move);
13268  }
13269  // Different block
13270  else
13271  {
13272  // MOVE OPTION 2: Move the allocation to a different block.
13273 
13274  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
13275 
13276  VmaSuballocation suballoc = *srcSuballocIt;
13277  suballoc.offset = dstAllocOffset;
13278  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
13279  m_BytesMoved += srcAllocSize;
13280  ++m_AllocationsMoved;
13281 
13282  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13283  ++nextSuballocIt;
13284  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13285  srcSuballocIt = nextSuballocIt;
13286 
13287  InsertSuballoc(pFreeSpaceMetadata, suballoc);
13288 
13289  VmaDefragmentationMove move = {
13290  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
13291  srcAllocOffset, dstAllocOffset,
13292  srcAllocSize };
13293  moves.push_back(move);
13294  }
13295  }
13296  else
13297  {
13298  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
13299 
13300  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
13301  while(dstBlockInfoIndex < srcBlockInfoIndex &&
13302  dstAllocOffset + srcAllocSize > dstBlockSize)
13303  {
13304  // But before that, register remaining free space at the end of dst block.
13305  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
13306 
13307  ++dstBlockInfoIndex;
13308  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
13309  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
13310  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
13311  dstBlockSize = pDstMetadata->GetSize();
13312  dstOffset = 0;
13313  dstAllocOffset = 0;
13314  }
13315 
13316  // Same block
13317  if(dstBlockInfoIndex == srcBlockInfoIndex)
13318  {
13319  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
13320 
13321  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
13322 
13323  bool skipOver = overlap;
13324  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
13325  {
13326  // If destination and source place overlap, skip if it would move it
13327  // by only < 1/64 of its size.
13328  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
13329  }
13330 
13331  if(skipOver)
13332  {
13333  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
13334 
13335  dstOffset = srcAllocOffset + srcAllocSize;
13336  ++srcSuballocIt;
13337  }
13338  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
13339  else
13340  {
13341  srcSuballocIt->offset = dstAllocOffset;
13342  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
13343  dstOffset = dstAllocOffset + srcAllocSize;
13344  m_BytesMoved += srcAllocSize;
13345  ++m_AllocationsMoved;
13346  ++srcSuballocIt;
13347  VmaDefragmentationMove move = {
13348  srcOrigBlockIndex, dstOrigBlockIndex,
13349  srcAllocOffset, dstAllocOffset,
13350  srcAllocSize };
13351  moves.push_back(move);
13352  }
13353  }
13354  // Different block
13355  else
13356  {
13357  // MOVE OPTION 2: Move the allocation to a different block.
13358 
13359  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
13360  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
13361 
13362  VmaSuballocation suballoc = *srcSuballocIt;
13363  suballoc.offset = dstAllocOffset;
13364  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
13365  dstOffset = dstAllocOffset + srcAllocSize;
13366  m_BytesMoved += srcAllocSize;
13367  ++m_AllocationsMoved;
13368 
13369  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
13370  ++nextSuballocIt;
13371  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
13372  srcSuballocIt = nextSuballocIt;
13373 
13374  pDstMetadata->m_Suballocations.push_back(suballoc);
13375 
13376  VmaDefragmentationMove move = {
13377  srcOrigBlockIndex, dstOrigBlockIndex,
13378  srcAllocOffset, dstAllocOffset,
13379  srcAllocSize };
13380  moves.push_back(move);
13381  }
13382  }
13383  }
13384  }
13385 
13386  m_BlockInfos.clear();
13387 
13388  PostprocessMetadata();
13389 
13390  return VK_SUCCESS;
13391 }
13392 
13393 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
13394 {
13395  const size_t blockCount = m_pBlockVector->GetBlockCount();
13396  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13397  {
13398  VmaBlockMetadata_Generic* const pMetadata =
13399  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13400  pMetadata->m_FreeCount = 0;
13401  pMetadata->m_SumFreeSize = pMetadata->GetSize();
13402  pMetadata->m_FreeSuballocationsBySize.clear();
13403  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13404  it != pMetadata->m_Suballocations.end(); )
13405  {
13406  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
13407  {
13408  VmaSuballocationList::iterator nextIt = it;
13409  ++nextIt;
13410  pMetadata->m_Suballocations.erase(it);
13411  it = nextIt;
13412  }
13413  else
13414  {
13415  ++it;
13416  }
13417  }
13418  }
13419 }
13420 
13421 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
13422 {
13423  const size_t blockCount = m_pBlockVector->GetBlockCount();
13424  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13425  {
13426  VmaBlockMetadata_Generic* const pMetadata =
13427  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
13428  const VkDeviceSize blockSize = pMetadata->GetSize();
13429 
13430  // No allocations in this block - entire area is free.
13431  if(pMetadata->m_Suballocations.empty())
13432  {
13433  pMetadata->m_FreeCount = 1;
13434  //pMetadata->m_SumFreeSize is already set to blockSize.
13435  VmaSuballocation suballoc = {
13436  0, // offset
13437  blockSize, // size
13438  VMA_NULL, // hAllocation
13439  VMA_SUBALLOCATION_TYPE_FREE };
13440  pMetadata->m_Suballocations.push_back(suballoc);
13441  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
13442  }
13443  // There are some allocations in this block.
13444  else
13445  {
13446  VkDeviceSize offset = 0;
13447  VmaSuballocationList::iterator it;
13448  for(it = pMetadata->m_Suballocations.begin();
13449  it != pMetadata->m_Suballocations.end();
13450  ++it)
13451  {
13452  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
13453  VMA_ASSERT(it->offset >= offset);
13454 
13455  // Need to insert preceding free space.
13456  if(it->offset > offset)
13457  {
13458  ++pMetadata->m_FreeCount;
13459  const VkDeviceSize freeSize = it->offset - offset;
13460  VmaSuballocation suballoc = {
13461  offset, // offset
13462  freeSize, // size
13463  VMA_NULL, // hAllocation
13464  VMA_SUBALLOCATION_TYPE_FREE };
13465  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13466  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13467  {
13468  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
13469  }
13470  }
13471 
13472  pMetadata->m_SumFreeSize -= it->size;
13473  offset = it->offset + it->size;
13474  }
13475 
13476  // Need to insert trailing free space.
13477  if(offset < blockSize)
13478  {
13479  ++pMetadata->m_FreeCount;
13480  const VkDeviceSize freeSize = blockSize - offset;
13481  VmaSuballocation suballoc = {
13482  offset, // offset
13483  freeSize, // size
13484  VMA_NULL, // hAllocation
13485  VMA_SUBALLOCATION_TYPE_FREE };
13486  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
13487  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
13488  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
13489  {
13490  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
13491  }
13492  }
13493 
13494  VMA_SORT(
13495  pMetadata->m_FreeSuballocationsBySize.begin(),
13496  pMetadata->m_FreeSuballocationsBySize.end(),
13497  VmaSuballocationItemSizeLess());
13498  }
13499 
13500  VMA_HEAVY_ASSERT(pMetadata->Validate());
13501  }
13502 }
13503 
13504 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
13505 {
13506  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
13507  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
13508  while(it != pMetadata->m_Suballocations.end())
13509  {
13510  if(it->offset < suballoc.offset)
13511  {
13512  ++it;
13513  }
13514  }
13515  pMetadata->m_Suballocations.insert(it, suballoc);
13516 }
13517 
13519 // VmaBlockVectorDefragmentationContext
13520 
13521 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
13522  VmaAllocator hAllocator,
13523  VmaPool hCustomPool,
13524  VmaBlockVector* pBlockVector,
13525  uint32_t currFrameIndex) :
13526  res(VK_SUCCESS),
13527  mutexLocked(false),
13528  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
13529  m_hAllocator(hAllocator),
13530  m_hCustomPool(hCustomPool),
13531  m_pBlockVector(pBlockVector),
13532  m_CurrFrameIndex(currFrameIndex),
13533  m_pAlgorithm(VMA_NULL),
13534  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
13535  m_AllAllocations(false)
13536 {
13537 }
13538 
13539 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13540 {
13541  vma_delete(m_hAllocator, m_pAlgorithm);
13542 }
13543 
13544 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13545 {
13546  AllocInfo info = { hAlloc, pChanged };
13547  m_Allocations.push_back(info);
13548 }
13549 
13550 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13551 {
13552  const bool allAllocations = m_AllAllocations ||
13553  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13554 
13555  /********************************
13556  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13557  ********************************/
13558 
13559  /*
13560  Fast algorithm is supported only when certain criteria are met:
13561  - VMA_DEBUG_MARGIN is 0.
13562  - All allocations in this block vector are moveable.
13563  - There is no possibility of image/buffer granularity conflict.
13564  */
13565  if(VMA_DEBUG_MARGIN == 0 &&
13566  allAllocations &&
13567  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13568  {
13569  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13570  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13571  }
13572  else
13573  {
13574  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13575  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13576  }
13577 
13578  if(allAllocations)
13579  {
13580  m_pAlgorithm->AddAll();
13581  }
13582  else
13583  {
13584  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13585  {
13586  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13587  }
13588  }
13589 }
13590 
13592 // VmaDefragmentationContext
13593 
13594 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13595  VmaAllocator hAllocator,
13596  uint32_t currFrameIndex,
13597  uint32_t flags,
13598  VmaDefragmentationStats* pStats) :
13599  m_hAllocator(hAllocator),
13600  m_CurrFrameIndex(currFrameIndex),
13601  m_Flags(flags),
13602  m_pStats(pStats),
13603  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13604 {
13605  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13606 }
13607 
13608 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13609 {
13610  for(size_t i = m_CustomPoolContexts.size(); i--; )
13611  {
13612  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13613  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13614  vma_delete(m_hAllocator, pBlockVectorCtx);
13615  }
13616  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13617  {
13618  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13619  if(pBlockVectorCtx)
13620  {
13621  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13622  vma_delete(m_hAllocator, pBlockVectorCtx);
13623  }
13624  }
13625 }
13626 
13627 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13628 {
13629  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13630  {
13631  VmaPool pool = pPools[poolIndex];
13632  VMA_ASSERT(pool);
13633  // Pools with algorithm other than default are not defragmented.
13634  if(pool->m_BlockVector.GetAlgorithm() == 0)
13635  {
13636  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13637 
13638  for(size_t i = m_CustomPoolContexts.size(); i--; )
13639  {
13640  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13641  {
13642  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13643  break;
13644  }
13645  }
13646 
13647  if(!pBlockVectorDefragCtx)
13648  {
13649  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13650  m_hAllocator,
13651  pool,
13652  &pool->m_BlockVector,
13653  m_CurrFrameIndex);
13654  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13655  }
13656 
13657  pBlockVectorDefragCtx->AddAll();
13658  }
13659  }
13660 }
13661 
13662 void VmaDefragmentationContext_T::AddAllocations(
13663  uint32_t allocationCount,
13664  VmaAllocation* pAllocations,
13665  VkBool32* pAllocationsChanged)
13666 {
13667  // Dispatch pAllocations among defragmentators. Create them when necessary.
13668  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13669  {
13670  const VmaAllocation hAlloc = pAllocations[allocIndex];
13671  VMA_ASSERT(hAlloc);
13672  // DedicatedAlloc cannot be defragmented.
13673  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13674  // Lost allocation cannot be defragmented.
13675  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13676  {
13677  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13678 
13679  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
13680  // This allocation belongs to custom pool.
13681  if(hAllocPool != VK_NULL_HANDLE)
13682  {
13683  // Pools with algorithm other than default are not defragmented.
13684  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13685  {
13686  for(size_t i = m_CustomPoolContexts.size(); i--; )
13687  {
13688  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13689  {
13690  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13691  break;
13692  }
13693  }
13694  if(!pBlockVectorDefragCtx)
13695  {
13696  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13697  m_hAllocator,
13698  hAllocPool,
13699  &hAllocPool->m_BlockVector,
13700  m_CurrFrameIndex);
13701  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13702  }
13703  }
13704  }
13705  // This allocation belongs to default pool.
13706  else
13707  {
13708  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13709  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13710  if(!pBlockVectorDefragCtx)
13711  {
13712  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13713  m_hAllocator,
13714  VMA_NULL, // hCustomPool
13715  m_hAllocator->m_pBlockVectors[memTypeIndex],
13716  m_CurrFrameIndex);
13717  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13718  }
13719  }
13720 
13721  if(pBlockVectorDefragCtx)
13722  {
13723  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13724  &pAllocationsChanged[allocIndex] : VMA_NULL;
13725  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13726  }
13727  }
13728  }
13729 }
13730 
13731 VkResult VmaDefragmentationContext_T::Defragment(
13732  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13733  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13734  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13735 {
13736  if(pStats)
13737  {
13738  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13739  }
13740 
13741  if(commandBuffer == VK_NULL_HANDLE)
13742  {
13743  maxGpuBytesToMove = 0;
13744  maxGpuAllocationsToMove = 0;
13745  }
13746 
13747  VkResult res = VK_SUCCESS;
13748 
13749  // Process default pools.
13750  for(uint32_t memTypeIndex = 0;
13751  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13752  ++memTypeIndex)
13753  {
13754  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13755  if(pBlockVectorCtx)
13756  {
13757  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13758  pBlockVectorCtx->GetBlockVector()->Defragment(
13759  pBlockVectorCtx,
13760  pStats,
13761  maxCpuBytesToMove, maxCpuAllocationsToMove,
13762  maxGpuBytesToMove, maxGpuAllocationsToMove,
13763  commandBuffer);
13764  if(pBlockVectorCtx->res != VK_SUCCESS)
13765  {
13766  res = pBlockVectorCtx->res;
13767  }
13768  }
13769  }
13770 
13771  // Process custom pools.
13772  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13773  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13774  ++customCtxIndex)
13775  {
13776  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13777  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13778  pBlockVectorCtx->GetBlockVector()->Defragment(
13779  pBlockVectorCtx,
13780  pStats,
13781  maxCpuBytesToMove, maxCpuAllocationsToMove,
13782  maxGpuBytesToMove, maxGpuAllocationsToMove,
13783  commandBuffer);
13784  if(pBlockVectorCtx->res != VK_SUCCESS)
13785  {
13786  res = pBlockVectorCtx->res;
13787  }
13788  }
13789 
13790  return res;
13791 }
13792 
13794 // VmaRecorder
13795 
13796 #if VMA_RECORDING_ENABLED
13797 
13798 VmaRecorder::VmaRecorder() :
13799  m_UseMutex(true),
13800  m_Flags(0),
13801  m_File(VMA_NULL),
13802  m_Freq(INT64_MAX),
13803  m_StartCounter(INT64_MAX)
13804 {
13805 }
13806 
13807 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13808 {
13809  m_UseMutex = useMutex;
13810  m_Flags = settings.flags;
13811 
13812  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13813  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13814 
13815  // Open file for writing.
13816  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13817  if(err != 0)
13818  {
13819  return VK_ERROR_INITIALIZATION_FAILED;
13820  }
13821 
13822  // Write header.
13823  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13824  fprintf(m_File, "%s\n", "1,6");
13825 
13826  return VK_SUCCESS;
13827 }
13828 
13829 VmaRecorder::~VmaRecorder()
13830 {
13831  if(m_File != VMA_NULL)
13832  {
13833  fclose(m_File);
13834  }
13835 }
13836 
13837 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13838 {
13839  CallParams callParams;
13840  GetBasicParams(callParams);
13841 
13842  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13843  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13844  Flush();
13845 }
13846 
13847 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13848 {
13849  CallParams callParams;
13850  GetBasicParams(callParams);
13851 
13852  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13853  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13854  Flush();
13855 }
13856 
13857 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13858 {
13859  CallParams callParams;
13860  GetBasicParams(callParams);
13861 
13862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13863  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13864  createInfo.memoryTypeIndex,
13865  createInfo.flags,
13866  createInfo.blockSize,
13867  (uint64_t)createInfo.minBlockCount,
13868  (uint64_t)createInfo.maxBlockCount,
13869  createInfo.frameInUseCount,
13870  pool);
13871  Flush();
13872 }
13873 
13874 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13875 {
13876  CallParams callParams;
13877  GetBasicParams(callParams);
13878 
13879  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13880  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13881  pool);
13882  Flush();
13883 }
13884 
13885 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13886  const VkMemoryRequirements& vkMemReq,
13887  const VmaAllocationCreateInfo& createInfo,
13888  VmaAllocation allocation)
13889 {
13890  CallParams callParams;
13891  GetBasicParams(callParams);
13892 
13893  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13894  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13895  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13896  vkMemReq.size,
13897  vkMemReq.alignment,
13898  vkMemReq.memoryTypeBits,
13899  createInfo.flags,
13900  createInfo.usage,
13901  createInfo.requiredFlags,
13902  createInfo.preferredFlags,
13903  createInfo.memoryTypeBits,
13904  createInfo.pool,
13905  allocation,
13906  userDataStr.GetString());
13907  Flush();
13908 }
13909 
13910 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
13911  const VkMemoryRequirements& vkMemReq,
13912  const VmaAllocationCreateInfo& createInfo,
13913  uint64_t allocationCount,
13914  const VmaAllocation* pAllocations)
13915 {
13916  CallParams callParams;
13917  GetBasicParams(callParams);
13918 
13919  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13920  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13921  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
13922  vkMemReq.size,
13923  vkMemReq.alignment,
13924  vkMemReq.memoryTypeBits,
13925  createInfo.flags,
13926  createInfo.usage,
13927  createInfo.requiredFlags,
13928  createInfo.preferredFlags,
13929  createInfo.memoryTypeBits,
13930  createInfo.pool);
13931  PrintPointerList(allocationCount, pAllocations);
13932  fprintf(m_File, ",%s\n", userDataStr.GetString());
13933  Flush();
13934 }
13935 
13936 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13937  const VkMemoryRequirements& vkMemReq,
13938  bool requiresDedicatedAllocation,
13939  bool prefersDedicatedAllocation,
13940  const VmaAllocationCreateInfo& createInfo,
13941  VmaAllocation allocation)
13942 {
13943  CallParams callParams;
13944  GetBasicParams(callParams);
13945 
13946  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13947  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13948  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13949  vkMemReq.size,
13950  vkMemReq.alignment,
13951  vkMemReq.memoryTypeBits,
13952  requiresDedicatedAllocation ? 1 : 0,
13953  prefersDedicatedAllocation ? 1 : 0,
13954  createInfo.flags,
13955  createInfo.usage,
13956  createInfo.requiredFlags,
13957  createInfo.preferredFlags,
13958  createInfo.memoryTypeBits,
13959  createInfo.pool,
13960  allocation,
13961  userDataStr.GetString());
13962  Flush();
13963 }
13964 
13965 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13966  const VkMemoryRequirements& vkMemReq,
13967  bool requiresDedicatedAllocation,
13968  bool prefersDedicatedAllocation,
13969  const VmaAllocationCreateInfo& createInfo,
13970  VmaAllocation allocation)
13971 {
13972  CallParams callParams;
13973  GetBasicParams(callParams);
13974 
13975  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13976  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13977  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13978  vkMemReq.size,
13979  vkMemReq.alignment,
13980  vkMemReq.memoryTypeBits,
13981  requiresDedicatedAllocation ? 1 : 0,
13982  prefersDedicatedAllocation ? 1 : 0,
13983  createInfo.flags,
13984  createInfo.usage,
13985  createInfo.requiredFlags,
13986  createInfo.preferredFlags,
13987  createInfo.memoryTypeBits,
13988  createInfo.pool,
13989  allocation,
13990  userDataStr.GetString());
13991  Flush();
13992 }
13993 
13994 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13995  VmaAllocation allocation)
13996 {
13997  CallParams callParams;
13998  GetBasicParams(callParams);
13999 
14000  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14001  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14002  allocation);
14003  Flush();
14004 }
14005 
14006 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
14007  uint64_t allocationCount,
14008  const VmaAllocation* pAllocations)
14009 {
14010  CallParams callParams;
14011  GetBasicParams(callParams);
14012 
14013  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14014  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
14015  PrintPointerList(allocationCount, pAllocations);
14016  fprintf(m_File, "\n");
14017  Flush();
14018 }
14019 
14020 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
14021  VmaAllocation allocation,
14022  const void* pUserData)
14023 {
14024  CallParams callParams;
14025  GetBasicParams(callParams);
14026 
14027  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14028  UserDataString userDataStr(
14029  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
14030  pUserData);
14031  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14032  allocation,
14033  userDataStr.GetString());
14034  Flush();
14035 }
14036 
14037 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
14038  VmaAllocation allocation)
14039 {
14040  CallParams callParams;
14041  GetBasicParams(callParams);
14042 
14043  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14044  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14045  allocation);
14046  Flush();
14047 }
14048 
14049 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
14050  VmaAllocation allocation)
14051 {
14052  CallParams callParams;
14053  GetBasicParams(callParams);
14054 
14055  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14056  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14057  allocation);
14058  Flush();
14059 }
14060 
14061 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
14062  VmaAllocation allocation)
14063 {
14064  CallParams callParams;
14065  GetBasicParams(callParams);
14066 
14067  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14068  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
14069  allocation);
14070  Flush();
14071 }
14072 
14073 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
14074  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14075 {
14076  CallParams callParams;
14077  GetBasicParams(callParams);
14078 
14079  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14080  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14081  allocation,
14082  offset,
14083  size);
14084  Flush();
14085 }
14086 
14087 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
14088  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14089 {
14090  CallParams callParams;
14091  GetBasicParams(callParams);
14092 
14093  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14094  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
14095  allocation,
14096  offset,
14097  size);
14098  Flush();
14099 }
14100 
14101 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
14102  const VkBufferCreateInfo& bufCreateInfo,
14103  const VmaAllocationCreateInfo& allocCreateInfo,
14104  VmaAllocation allocation)
14105 {
14106  CallParams callParams;
14107  GetBasicParams(callParams);
14108 
14109  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14110  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14111  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14112  bufCreateInfo.flags,
14113  bufCreateInfo.size,
14114  bufCreateInfo.usage,
14115  bufCreateInfo.sharingMode,
14116  allocCreateInfo.flags,
14117  allocCreateInfo.usage,
14118  allocCreateInfo.requiredFlags,
14119  allocCreateInfo.preferredFlags,
14120  allocCreateInfo.memoryTypeBits,
14121  allocCreateInfo.pool,
14122  allocation,
14123  userDataStr.GetString());
14124  Flush();
14125 }
14126 
14127 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
14128  const VkImageCreateInfo& imageCreateInfo,
14129  const VmaAllocationCreateInfo& allocCreateInfo,
14130  VmaAllocation allocation)
14131 {
14132  CallParams callParams;
14133  GetBasicParams(callParams);
14134 
14135  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14136  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
14137  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
14138  imageCreateInfo.flags,
14139  imageCreateInfo.imageType,
14140  imageCreateInfo.format,
14141  imageCreateInfo.extent.width,
14142  imageCreateInfo.extent.height,
14143  imageCreateInfo.extent.depth,
14144  imageCreateInfo.mipLevels,
14145  imageCreateInfo.arrayLayers,
14146  imageCreateInfo.samples,
14147  imageCreateInfo.tiling,
14148  imageCreateInfo.usage,
14149  imageCreateInfo.sharingMode,
14150  imageCreateInfo.initialLayout,
14151  allocCreateInfo.flags,
14152  allocCreateInfo.usage,
14153  allocCreateInfo.requiredFlags,
14154  allocCreateInfo.preferredFlags,
14155  allocCreateInfo.memoryTypeBits,
14156  allocCreateInfo.pool,
14157  allocation,
14158  userDataStr.GetString());
14159  Flush();
14160 }
14161 
14162 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
14163  VmaAllocation allocation)
14164 {
14165  CallParams callParams;
14166  GetBasicParams(callParams);
14167 
14168  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14169  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
14170  allocation);
14171  Flush();
14172 }
14173 
14174 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
14175  VmaAllocation allocation)
14176 {
14177  CallParams callParams;
14178  GetBasicParams(callParams);
14179 
14180  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14181  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
14182  allocation);
14183  Flush();
14184 }
14185 
14186 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
14187  VmaAllocation allocation)
14188 {
14189  CallParams callParams;
14190  GetBasicParams(callParams);
14191 
14192  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14193  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
14194  allocation);
14195  Flush();
14196 }
14197 
14198 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
14199  VmaAllocation allocation)
14200 {
14201  CallParams callParams;
14202  GetBasicParams(callParams);
14203 
14204  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14205  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
14206  allocation);
14207  Flush();
14208 }
14209 
14210 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
14211  VmaPool pool)
14212 {
14213  CallParams callParams;
14214  GetBasicParams(callParams);
14215 
14216  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14217  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
14218  pool);
14219  Flush();
14220 }
14221 
14222 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
14223  const VmaDefragmentationInfo2& info,
14225 {
14226  CallParams callParams;
14227  GetBasicParams(callParams);
14228 
14229  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14230  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
14231  info.flags);
14232  PrintPointerList(info.allocationCount, info.pAllocations);
14233  fprintf(m_File, ",");
14234  PrintPointerList(info.poolCount, info.pPools);
14235  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
14236  info.maxCpuBytesToMove,
14238  info.maxGpuBytesToMove,
14240  info.commandBuffer,
14241  ctx);
14242  Flush();
14243 }
14244 
14245 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
14247 {
14248  CallParams callParams;
14249  GetBasicParams(callParams);
14250 
14251  VmaMutexLock lock(m_FileMutex, m_UseMutex);
14252  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
14253  ctx);
14254  Flush();
14255 }
14256 
14257 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
14258 {
14259  if(pUserData != VMA_NULL)
14260  {
14261  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
14262  {
14263  m_Str = (const char*)pUserData;
14264  }
14265  else
14266  {
14267  sprintf_s(m_PtrStr, "%p", pUserData);
14268  m_Str = m_PtrStr;
14269  }
14270  }
14271  else
14272  {
14273  m_Str = "";
14274  }
14275 }
14276 
14277 void VmaRecorder::WriteConfiguration(
14278  const VkPhysicalDeviceProperties& devProps,
14279  const VkPhysicalDeviceMemoryProperties& memProps,
14280  bool dedicatedAllocationExtensionEnabled,
14281  bool bindMemory2ExtensionEnabled)
14282 {
14283  fprintf(m_File, "Config,Begin\n");
14284 
14285  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
14286  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
14287  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
14288  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
14289  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
14290  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
14291 
14292  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
14293  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
14294  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
14295 
14296  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
14297  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
14298  {
14299  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
14300  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
14301  }
14302  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
14303  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
14304  {
14305  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
14306  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
14307  }
14308 
14309  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
14310  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
14311 
14312  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
14313  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
14314  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
14315  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
14316  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
14317  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
14318  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
14319  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
14320  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14321 
14322  fprintf(m_File, "Config,End\n");
14323 }
14324 
14325 void VmaRecorder::GetBasicParams(CallParams& outParams)
14326 {
14327  outParams.threadId = GetCurrentThreadId();
14328 
14329  LARGE_INTEGER counter;
14330  QueryPerformanceCounter(&counter);
14331  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
14332 }
14333 
14334 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
14335 {
14336  if(count)
14337  {
14338  fprintf(m_File, "%p", pItems[0]);
14339  for(uint64_t i = 1; i < count; ++i)
14340  {
14341  fprintf(m_File, " %p", pItems[i]);
14342  }
14343  }
14344 }
14345 
14346 void VmaRecorder::Flush()
14347 {
14348  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
14349  {
14350  fflush(m_File);
14351  }
14352 }
14353 
14354 #endif // #if VMA_RECORDING_ENABLED
14355 
14357 // VmaAllocationObjectAllocator
14358 
14359 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
14360  m_Allocator(pAllocationCallbacks, 1024)
14361 {
14362 }
14363 
14364 VmaAllocation VmaAllocationObjectAllocator::Allocate()
14365 {
14366  VmaMutexLock mutexLock(m_Mutex);
14367  return m_Allocator.Alloc();
14368 }
14369 
14370 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
14371 {
14372  VmaMutexLock mutexLock(m_Mutex);
14373  m_Allocator.Free(hAlloc);
14374 }
14375 
14377 // VmaAllocator_T
14378 
14379 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
14380  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
14381  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
14382  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
14383  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
14384  m_hDevice(pCreateInfo->device),
14385  m_hInstance(pCreateInfo->instance),
14386  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
14387  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
14388  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
14389  m_AllocationObjectAllocator(&m_AllocationCallbacks),
14390  m_HeapSizeLimitMask(0),
14391  m_PreferredLargeHeapBlockSize(0),
14392  m_PhysicalDevice(pCreateInfo->physicalDevice),
14393  m_CurrentFrameIndex(0),
14394  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
14395  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
14396  m_NextPoolId(0)
14398  ,m_pRecorder(VMA_NULL)
14399 #endif
14400 {
14401  if(VMA_DEBUG_DETECT_CORRUPTION)
14402  {
14403  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14404  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14405  }
14406 
14407  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
14408 
14409 #if !(VMA_DEDICATED_ALLOCATION)
14411  {
14412  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14413  }
14414 #endif
14415 #if !(VMA_BIND_MEMORY2)
14416  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14417  {
14418  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14419  }
14420 #endif
14421 #if !(VMA_MEMORY_BUDGET)
14422  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14423  {
14424  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14425  }
14426 #endif
14427 
14428  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14429  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14430  memset(&m_MemProps, 0, sizeof(m_MemProps));
14431 
14432  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14433  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
14434  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14435 
14436  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14437  {
14438  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14439  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14440  }
14441 
14442  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14443 
14444  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14445  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14446 
14447  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
14448  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14449  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14450  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14451 
14452  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14453  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14454 
14455  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14456  {
14457  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14458  {
14459  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14460  if(limit != VK_WHOLE_SIZE)
14461  {
14462  m_HeapSizeLimitMask |= 1u << heapIndex;
14463  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14464  {
14465  m_MemProps.memoryHeaps[heapIndex].size = limit;
14466  }
14467  }
14468  }
14469  }
14470 
14471  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14472  {
14473  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14474 
14475  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14476  this,
14477  VK_NULL_HANDLE, // hParentPool
14478  memTypeIndex,
14479  preferredBlockSize,
14480  0,
14481  SIZE_MAX,
14482  GetBufferImageGranularity(),
14483  pCreateInfo->frameInUseCount,
14484  false, // isCustomPool
14485  false, // explicitBlockSize
14486  false); // linearAlgorithm
14487  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14488  // becase minBlockCount is 0.
14489  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
14490 
14491  }
14492 }
14493 
14494 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
14495 {
14496  VkResult res = VK_SUCCESS;
14497 
14498  if(pCreateInfo->pRecordSettings != VMA_NULL &&
14499  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
14500  {
14501 #if VMA_RECORDING_ENABLED
14502  m_pRecorder = vma_new(this, VmaRecorder)();
14503  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
14504  if(res != VK_SUCCESS)
14505  {
14506  return res;
14507  }
14508  m_pRecorder->WriteConfiguration(
14509  m_PhysicalDeviceProperties,
14510  m_MemProps,
14511  m_UseKhrDedicatedAllocation,
14512  m_UseKhrBindMemory2);
14513  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
14514 #else
14515  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
14516  return VK_ERROR_FEATURE_NOT_PRESENT;
14517 #endif
14518  }
14519 
14520 #if VMA_MEMORY_BUDGET
14521  if(m_UseExtMemoryBudget)
14522  {
14523  UpdateVulkanBudget();
14524  }
14525 #endif // #if VMA_MEMORY_BUDGET
14526 
14527  return res;
14528 }
14529 
14530 VmaAllocator_T::~VmaAllocator_T()
14531 {
14532 #if VMA_RECORDING_ENABLED
14533  if(m_pRecorder != VMA_NULL)
14534  {
14535  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
14536  vma_delete(this, m_pRecorder);
14537  }
14538 #endif
14539 
14540  VMA_ASSERT(m_Pools.empty());
14541 
14542  for(size_t i = GetMemoryTypeCount(); i--; )
14543  {
14544  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
14545  {
14546  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
14547  }
14548 
14549  vma_delete(this, m_pDedicatedAllocations[i]);
14550  vma_delete(this, m_pBlockVectors[i]);
14551  }
14552 }
14553 
14554 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14555 {
14556 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14557  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14558  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14559  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14560  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14561  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14562  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14563  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14564  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14565  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14566  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14567  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14568  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14569  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14570  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14571  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14572  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14573  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14574 #if VMA_DEDICATED_ALLOCATION
14575  if(m_UseKhrDedicatedAllocation)
14576  {
14577  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
14578  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
14579  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
14580  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
14581  }
14582 #endif // #if VMA_DEDICATED_ALLOCATION
14583 #if VMA_BIND_MEMORY2
14584  if(m_UseKhrBindMemory2)
14585  {
14586  m_VulkanFunctions.vkBindBufferMemory2KHR =
14587  (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR");
14588  m_VulkanFunctions.vkBindImageMemory2KHR =
14589  (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR");
14590  }
14591 #endif // #if VMA_BIND_MEMORY2
14592 #if VMA_MEMORY_BUDGET
14593  if(m_UseExtMemoryBudget)
14594  {
14595  VMA_ASSERT(m_hInstance != VK_NULL_HANDLE);
14596  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR =
14597  (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR");
14598  }
14599 #endif // #if VMA_MEMORY_BUDGET
14600 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
14601 
14602 #define VMA_COPY_IF_NOT_NULL(funcName) \
14603  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14604 
14605  if(pVulkanFunctions != VMA_NULL)
14606  {
14607  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14608  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14609  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14610  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14611  VMA_COPY_IF_NOT_NULL(vkMapMemory);
14612  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14613  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14614  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14615  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14616  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14617  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14618  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14619  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14620  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14621  VMA_COPY_IF_NOT_NULL(vkCreateImage);
14622  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14623  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14624 #if VMA_DEDICATED_ALLOCATION
14625  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14626  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14627 #endif
14628 #if VMA_BIND_MEMORY2
14629  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14630  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14631 #endif
14632 #if VMA_MEMORY_BUDGET
14633  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14634 #endif
14635  }
14636 
14637 #undef VMA_COPY_IF_NOT_NULL
14638 
14639  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
14640  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
14641  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14642  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14643  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14644  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14645  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14646  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14647  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14648  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14649  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14650  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14651  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14652  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14653  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14654  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14655  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14656  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14657  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14658 #if VMA_DEDICATED_ALLOCATION
14659  if(m_UseKhrDedicatedAllocation)
14660  {
14661  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14662  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14663  }
14664 #endif
14665 #if VMA_BIND_MEMORY2
14666  if(m_UseKhrBindMemory2)
14667  {
14668  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14669  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14670  }
14671 #endif
14672 #if VMA_MEMORY_BUDGET
14673  if(m_UseExtMemoryBudget)
14674  {
14675  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14676  }
14677 #endif
14678 }
14679 
14680 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14681 {
14682  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14683  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14684  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14685  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14686 }
14687 
14688 VkResult VmaAllocator_T::AllocateMemoryOfType(
14689  VkDeviceSize size,
14690  VkDeviceSize alignment,
14691  bool dedicatedAllocation,
14692  VkBuffer dedicatedBuffer,
14693  VkImage dedicatedImage,
14694  const VmaAllocationCreateInfo& createInfo,
14695  uint32_t memTypeIndex,
14696  VmaSuballocationType suballocType,
14697  size_t allocationCount,
14698  VmaAllocation* pAllocations)
14699 {
14700  VMA_ASSERT(pAllocations != VMA_NULL);
14701  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14702 
14703  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14704 
14705  // If memory type is not HOST_VISIBLE, disable MAPPED.
14706  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14707  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14708  {
14709  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14710  }
14711 
14712  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14713  VMA_ASSERT(blockVector);
14714 
14715  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14716  bool preferDedicatedMemory =
14717  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14718  dedicatedAllocation ||
14719  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14720  size > preferredBlockSize / 2;
14721 
14722  if(preferDedicatedMemory &&
14723  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14724  finalCreateInfo.pool == VK_NULL_HANDLE)
14725  {
14727  }
14728 
14729  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14730  {
14731  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14732  {
14733  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14734  }
14735  else
14736  {
14737  return AllocateDedicatedMemory(
14738  size,
14739  suballocType,
14740  memTypeIndex,
14741  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14742  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14743  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14744  finalCreateInfo.pUserData,
14745  dedicatedBuffer,
14746  dedicatedImage,
14747  allocationCount,
14748  pAllocations);
14749  }
14750  }
14751  else
14752  {
14753  VkResult res = blockVector->Allocate(
14754  m_CurrentFrameIndex.load(),
14755  size,
14756  alignment,
14757  finalCreateInfo,
14758  suballocType,
14759  allocationCount,
14760  pAllocations);
14761  if(res == VK_SUCCESS)
14762  {
14763  return res;
14764  }
14765 
14766  // 5. Try dedicated memory.
14767  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14768  {
14769  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14770  }
14771  else
14772  {
14773  res = AllocateDedicatedMemory(
14774  size,
14775  suballocType,
14776  memTypeIndex,
14777  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
14778  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14779  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14780  finalCreateInfo.pUserData,
14781  dedicatedBuffer,
14782  dedicatedImage,
14783  allocationCount,
14784  pAllocations);
14785  if(res == VK_SUCCESS)
14786  {
14787  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14788  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14789  return VK_SUCCESS;
14790  }
14791  else
14792  {
14793  // Everything failed: Return error code.
14794  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14795  return res;
14796  }
14797  }
14798  }
14799 }
14800 
14801 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14802  VkDeviceSize size,
14803  VmaSuballocationType suballocType,
14804  uint32_t memTypeIndex,
14805  bool withinBudget,
14806  bool map,
14807  bool isUserDataString,
14808  void* pUserData,
14809  VkBuffer dedicatedBuffer,
14810  VkImage dedicatedImage,
14811  size_t allocationCount,
14812  VmaAllocation* pAllocations)
14813 {
14814  VMA_ASSERT(allocationCount > 0 && pAllocations);
14815 
14816  if(withinBudget)
14817  {
14818  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14819  VmaBudget heapBudget = {};
14820  GetBudget(&heapBudget, heapIndex, 1);
14821  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
14822  {
14823  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14824  }
14825  }
14826 
14827  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14828  allocInfo.memoryTypeIndex = memTypeIndex;
14829  allocInfo.allocationSize = size;
14830 
14831 #if VMA_DEDICATED_ALLOCATION
14832  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14833  if(m_UseKhrDedicatedAllocation)
14834  {
14835  if(dedicatedBuffer != VK_NULL_HANDLE)
14836  {
14837  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14838  dedicatedAllocInfo.buffer = dedicatedBuffer;
14839  allocInfo.pNext = &dedicatedAllocInfo;
14840  }
14841  else if(dedicatedImage != VK_NULL_HANDLE)
14842  {
14843  dedicatedAllocInfo.image = dedicatedImage;
14844  allocInfo.pNext = &dedicatedAllocInfo;
14845  }
14846  }
14847 #endif // #if VMA_DEDICATED_ALLOCATION
14848 
14849  size_t allocIndex;
14850  VkResult res = VK_SUCCESS;
14851  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14852  {
14853  res = AllocateDedicatedMemoryPage(
14854  size,
14855  suballocType,
14856  memTypeIndex,
14857  allocInfo,
14858  map,
14859  isUserDataString,
14860  pUserData,
14861  pAllocations + allocIndex);
14862  if(res != VK_SUCCESS)
14863  {
14864  break;
14865  }
14866  }
14867 
14868  if(res == VK_SUCCESS)
14869  {
14870  // Register them in m_pDedicatedAllocations.
14871  {
14872  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14873  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14874  VMA_ASSERT(pDedicatedAllocations);
14875  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14876  {
14877  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
14878  }
14879  }
14880 
14881  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14882  }
14883  else
14884  {
14885  // Free all already created allocations.
14886  while(allocIndex--)
14887  {
14888  VmaAllocation currAlloc = pAllocations[allocIndex];
14889  VkDeviceMemory hMemory = currAlloc->GetMemory();
14890 
14891  /*
14892  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14893  before vkFreeMemory.
14894 
14895  if(currAlloc->GetMappedData() != VMA_NULL)
14896  {
14897  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14898  }
14899  */
14900 
14901  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14902  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14903  m_Budget.m_AllocationBytes[heapIndex] -= currAlloc->GetSize();
14904  ++m_Budget.m_OperationsSinceBudgetFetch;
14905  currAlloc->SetUserData(this, VMA_NULL);
14906  currAlloc->Dtor();
14907  m_AllocationObjectAllocator.Free(currAlloc);
14908  }
14909 
14910  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14911  }
14912 
14913  return res;
14914 }
14915 
14916 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14917  VkDeviceSize size,
14918  VmaSuballocationType suballocType,
14919  uint32_t memTypeIndex,
14920  const VkMemoryAllocateInfo& allocInfo,
14921  bool map,
14922  bool isUserDataString,
14923  void* pUserData,
14924  VmaAllocation* pAllocation)
14925 {
14926  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14927  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14928  if(res < 0)
14929  {
14930  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14931  return res;
14932  }
14933 
14934  void* pMappedData = VMA_NULL;
14935  if(map)
14936  {
14937  res = (*m_VulkanFunctions.vkMapMemory)(
14938  m_hDevice,
14939  hMemory,
14940  0,
14941  VK_WHOLE_SIZE,
14942  0,
14943  &pMappedData);
14944  if(res < 0)
14945  {
14946  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14947  FreeVulkanMemory(memTypeIndex, size, hMemory);
14948  return res;
14949  }
14950  }
14951 
14952  *pAllocation = m_AllocationObjectAllocator.Allocate();
14953  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
14954  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14955  (*pAllocation)->SetUserData(this, pUserData);
14956  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14957  m_Budget.m_AllocationBytes[heapIndex] += size;
14958  ++m_Budget.m_OperationsSinceBudgetFetch;
14959  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14960  {
14961  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14962  }
14963 
14964  return VK_SUCCESS;
14965 }
14966 
14967 void VmaAllocator_T::GetBufferMemoryRequirements(
14968  VkBuffer hBuffer,
14969  VkMemoryRequirements& memReq,
14970  bool& requiresDedicatedAllocation,
14971  bool& prefersDedicatedAllocation) const
14972 {
14973 #if VMA_DEDICATED_ALLOCATION
14974  if(m_UseKhrDedicatedAllocation)
14975  {
14976  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14977  memReqInfo.buffer = hBuffer;
14978 
14979  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14980 
14981  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14982  memReq2.pNext = &memDedicatedReq;
14983 
14984  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14985 
14986  memReq = memReq2.memoryRequirements;
14987  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14988  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14989  }
14990  else
14991 #endif // #if VMA_DEDICATED_ALLOCATION
14992  {
14993  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14994  requiresDedicatedAllocation = false;
14995  prefersDedicatedAllocation = false;
14996  }
14997 }
14998 
14999 void VmaAllocator_T::GetImageMemoryRequirements(
15000  VkImage hImage,
15001  VkMemoryRequirements& memReq,
15002  bool& requiresDedicatedAllocation,
15003  bool& prefersDedicatedAllocation) const
15004 {
15005 #if VMA_DEDICATED_ALLOCATION
15006  if(m_UseKhrDedicatedAllocation)
15007  {
15008  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
15009  memReqInfo.image = hImage;
15010 
15011  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
15012 
15013  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
15014  memReq2.pNext = &memDedicatedReq;
15015 
15016  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
15017 
15018  memReq = memReq2.memoryRequirements;
15019  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
15020  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
15021  }
15022  else
15023 #endif // #if VMA_DEDICATED_ALLOCATION
15024  {
15025  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
15026  requiresDedicatedAllocation = false;
15027  prefersDedicatedAllocation = false;
15028  }
15029 }
15030 
15031 VkResult VmaAllocator_T::AllocateMemory(
15032  const VkMemoryRequirements& vkMemReq,
15033  bool requiresDedicatedAllocation,
15034  bool prefersDedicatedAllocation,
15035  VkBuffer dedicatedBuffer,
15036  VkImage dedicatedImage,
15037  const VmaAllocationCreateInfo& createInfo,
15038  VmaSuballocationType suballocType,
15039  size_t allocationCount,
15040  VmaAllocation* pAllocations)
15041 {
15042  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
15043 
15044  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
15045 
15046  if(vkMemReq.size == 0)
15047  {
15048  return VK_ERROR_VALIDATION_FAILED_EXT;
15049  }
15050  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
15051  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15052  {
15053  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
15054  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15055  }
15056  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15058  {
15059  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
15060  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15061  }
15062  if(requiresDedicatedAllocation)
15063  {
15064  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
15065  {
15066  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
15067  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15068  }
15069  if(createInfo.pool != VK_NULL_HANDLE)
15070  {
15071  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
15072  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15073  }
15074  }
15075  if((createInfo.pool != VK_NULL_HANDLE) &&
15076  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
15077  {
15078  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
15079  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15080  }
15081 
15082  if(createInfo.pool != VK_NULL_HANDLE)
15083  {
15084  const VkDeviceSize alignmentForPool = VMA_MAX(
15085  vkMemReq.alignment,
15086  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
15087 
15088  VmaAllocationCreateInfo createInfoForPool = createInfo;
15089  // If memory type is not HOST_VISIBLE, disable MAPPED.
15090  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
15091  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15092  {
15093  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
15094  }
15095 
15096  return createInfo.pool->m_BlockVector.Allocate(
15097  m_CurrentFrameIndex.load(),
15098  vkMemReq.size,
15099  alignmentForPool,
15100  createInfoForPool,
15101  suballocType,
15102  allocationCount,
15103  pAllocations);
15104  }
15105  else
15106  {
15107  // Bit mask of memory Vulkan types acceptable for this allocation.
15108  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15109  uint32_t memTypeIndex = UINT32_MAX;
15110  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15111  if(res == VK_SUCCESS)
15112  {
15113  VkDeviceSize alignmentForMemType = VMA_MAX(
15114  vkMemReq.alignment,
15115  GetMemoryTypeMinAlignment(memTypeIndex));
15116 
15117  res = AllocateMemoryOfType(
15118  vkMemReq.size,
15119  alignmentForMemType,
15120  requiresDedicatedAllocation || prefersDedicatedAllocation,
15121  dedicatedBuffer,
15122  dedicatedImage,
15123  createInfo,
15124  memTypeIndex,
15125  suballocType,
15126  allocationCount,
15127  pAllocations);
15128  // Succeeded on first try.
15129  if(res == VK_SUCCESS)
15130  {
15131  return res;
15132  }
15133  // Allocation from this memory type failed. Try other compatible memory types.
15134  else
15135  {
15136  for(;;)
15137  {
15138  // Remove old memTypeIndex from list of possibilities.
15139  memoryTypeBits &= ~(1u << memTypeIndex);
15140  // Find alternative memTypeIndex.
15141  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
15142  if(res == VK_SUCCESS)
15143  {
15144  alignmentForMemType = VMA_MAX(
15145  vkMemReq.alignment,
15146  GetMemoryTypeMinAlignment(memTypeIndex));
15147 
15148  res = AllocateMemoryOfType(
15149  vkMemReq.size,
15150  alignmentForMemType,
15151  requiresDedicatedAllocation || prefersDedicatedAllocation,
15152  dedicatedBuffer,
15153  dedicatedImage,
15154  createInfo,
15155  memTypeIndex,
15156  suballocType,
15157  allocationCount,
15158  pAllocations);
15159  // Allocation from this alternative memory type succeeded.
15160  if(res == VK_SUCCESS)
15161  {
15162  return res;
15163  }
15164  // else: Allocation from this memory type failed. Try next one - next loop iteration.
15165  }
15166  // No other matching memory type index could be found.
15167  else
15168  {
15169  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15170  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15171  }
15172  }
15173  }
15174  }
15175  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15176  else
15177  return res;
15178  }
15179 }
15180 
15181 void VmaAllocator_T::FreeMemory(
15182  size_t allocationCount,
15183  const VmaAllocation* pAllocations)
15184 {
15185  VMA_ASSERT(pAllocations);
15186 
15187  for(size_t allocIndex = allocationCount; allocIndex--; )
15188  {
15189  VmaAllocation allocation = pAllocations[allocIndex];
15190 
15191  if(allocation != VK_NULL_HANDLE)
15192  {
15193  if(TouchAllocation(allocation))
15194  {
15195  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15196  {
15197  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15198  }
15199 
15200  switch(allocation->GetType())
15201  {
15202  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15203  {
15204  VmaBlockVector* pBlockVector = VMA_NULL;
15205  VmaPool hPool = allocation->GetBlock()->GetParentPool();
15206  if(hPool != VK_NULL_HANDLE)
15207  {
15208  pBlockVector = &hPool->m_BlockVector;
15209  }
15210  else
15211  {
15212  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15213  pBlockVector = m_pBlockVectors[memTypeIndex];
15214  }
15215  pBlockVector->Free(allocation);
15216  }
15217  break;
15218  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15219  FreeDedicatedMemory(allocation);
15220  break;
15221  default:
15222  VMA_ASSERT(0);
15223  }
15224  }
15225 
15226  m_Budget.m_AllocationBytes[MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex())] -= allocation->GetSize();
15227  ++m_Budget.m_OperationsSinceBudgetFetch;
15228  allocation->SetUserData(this, VMA_NULL);
15229  allocation->Dtor();
15230  m_AllocationObjectAllocator.Free(allocation);
15231  }
15232  }
15233 }
15234 
15235 VkResult VmaAllocator_T::ResizeAllocation(
15236  const VmaAllocation alloc,
15237  VkDeviceSize newSize)
15238 {
15239  // This function is deprecated and so it does nothing. It's left for backward compatibility.
15240  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
15241  {
15242  return VK_ERROR_VALIDATION_FAILED_EXT;
15243  }
15244  if(newSize == alloc->GetSize())
15245  {
15246  return VK_SUCCESS;
15247  }
15248  return VK_ERROR_OUT_OF_POOL_MEMORY;
15249 }
15250 
15251 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
15252 {
15253  // Initialize.
15254  InitStatInfo(pStats->total);
15255  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15256  InitStatInfo(pStats->memoryType[i]);
15257  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15258  InitStatInfo(pStats->memoryHeap[i]);
15259 
15260  // Process default pools.
15261  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15262  {
15263  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15264  VMA_ASSERT(pBlockVector);
15265  pBlockVector->AddStats(pStats);
15266  }
15267 
15268  // Process custom pools.
15269  {
15270  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15271  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15272  {
15273  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
15274  }
15275  }
15276 
15277  // Process dedicated allocations.
15278  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15279  {
15280  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
15281  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15282  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15283  VMA_ASSERT(pDedicatedAllocVector);
15284  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
15285  {
15286  VmaStatInfo allocationStatInfo;
15287  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
15288  VmaAddStatInfo(pStats->total, allocationStatInfo);
15289  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
15290  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
15291  }
15292  }
15293 
15294  // Postprocess.
15295  VmaPostprocessCalcStatInfo(pStats->total);
15296  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
15297  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
15298  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
15299  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
15300 }
15301 
15302 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
15303 {
15304 #if VMA_MEMORY_BUDGET
15305  if(m_UseExtMemoryBudget)
15306  {
15307  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15308  {
15309  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15310  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15311  {
15312  const uint32_t heapIndex = firstHeap + i;
15313 
15314  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15315  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15316 
15317  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15318  {
15319  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
15320  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15321  }
15322  else
15323  {
15324  outBudget->usage = 0;
15325  }
15326 
15327  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15328  outBudget->budget = VMA_MIN(
15329  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15330  }
15331  }
15332  else
15333  {
15334  UpdateVulkanBudget(); // Outside of mutex lock
15335  GetBudget(outBudget, firstHeap, heapCount); // Recursion
15336  }
15337  }
15338  else
15339 #endif
15340  {
15341  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
15342  {
15343  const uint32_t heapIndex = firstHeap + i;
15344 
15345  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
15346  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15347 
15348  outBudget->usage = outBudget->blockBytes;
15349  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15350  }
15351  }
15352 }
15353 
15354 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
15355 
15356 VkResult VmaAllocator_T::DefragmentationBegin(
15357  const VmaDefragmentationInfo2& info,
15358  VmaDefragmentationStats* pStats,
15359  VmaDefragmentationContext* pContext)
15360 {
15361  if(info.pAllocationsChanged != VMA_NULL)
15362  {
15363  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
15364  }
15365 
15366  *pContext = vma_new(this, VmaDefragmentationContext_T)(
15367  this, m_CurrentFrameIndex.load(), info.flags, pStats);
15368 
15369  (*pContext)->AddPools(info.poolCount, info.pPools);
15370  (*pContext)->AddAllocations(
15372 
15373  VkResult res = (*pContext)->Defragment(
15376  info.commandBuffer, pStats);
15377 
15378  if(res != VK_NOT_READY)
15379  {
15380  vma_delete(this, *pContext);
15381  *pContext = VMA_NULL;
15382  }
15383 
15384  return res;
15385 }
15386 
15387 VkResult VmaAllocator_T::DefragmentationEnd(
15388  VmaDefragmentationContext context)
15389 {
15390  vma_delete(this, context);
15391  return VK_SUCCESS;
15392 }
15393 
15394 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15395 {
15396  if(hAllocation->CanBecomeLost())
15397  {
15398  /*
15399  Warning: This is a carefully designed algorithm.
15400  Do not modify unless you really know what you're doing :)
15401  */
15402  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15403  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15404  for(;;)
15405  {
15406  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15407  {
15408  pAllocationInfo->memoryType = UINT32_MAX;
15409  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
15410  pAllocationInfo->offset = 0;
15411  pAllocationInfo->size = hAllocation->GetSize();
15412  pAllocationInfo->pMappedData = VMA_NULL;
15413  pAllocationInfo->pUserData = hAllocation->GetUserData();
15414  return;
15415  }
15416  else if(localLastUseFrameIndex == localCurrFrameIndex)
15417  {
15418  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15419  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15420  pAllocationInfo->offset = hAllocation->GetOffset();
15421  pAllocationInfo->size = hAllocation->GetSize();
15422  pAllocationInfo->pMappedData = VMA_NULL;
15423  pAllocationInfo->pUserData = hAllocation->GetUserData();
15424  return;
15425  }
15426  else // Last use time earlier than current time.
15427  {
15428  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15429  {
15430  localLastUseFrameIndex = localCurrFrameIndex;
15431  }
15432  }
15433  }
15434  }
15435  else
15436  {
15437 #if VMA_STATS_STRING_ENABLED
15438  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15439  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15440  for(;;)
15441  {
15442  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15443  if(localLastUseFrameIndex == localCurrFrameIndex)
15444  {
15445  break;
15446  }
15447  else // Last use time earlier than current time.
15448  {
15449  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15450  {
15451  localLastUseFrameIndex = localCurrFrameIndex;
15452  }
15453  }
15454  }
15455 #endif
15456 
15457  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15458  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15459  pAllocationInfo->offset = hAllocation->GetOffset();
15460  pAllocationInfo->size = hAllocation->GetSize();
15461  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15462  pAllocationInfo->pUserData = hAllocation->GetUserData();
15463  }
15464 }
15465 
15466 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
15467 {
15468  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
15469  if(hAllocation->CanBecomeLost())
15470  {
15471  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15472  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15473  for(;;)
15474  {
15475  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
15476  {
15477  return false;
15478  }
15479  else if(localLastUseFrameIndex == localCurrFrameIndex)
15480  {
15481  return true;
15482  }
15483  else // Last use time earlier than current time.
15484  {
15485  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15486  {
15487  localLastUseFrameIndex = localCurrFrameIndex;
15488  }
15489  }
15490  }
15491  }
15492  else
15493  {
15494 #if VMA_STATS_STRING_ENABLED
15495  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
15496  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
15497  for(;;)
15498  {
15499  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
15500  if(localLastUseFrameIndex == localCurrFrameIndex)
15501  {
15502  break;
15503  }
15504  else // Last use time earlier than current time.
15505  {
15506  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
15507  {
15508  localLastUseFrameIndex = localCurrFrameIndex;
15509  }
15510  }
15511  }
15512 #endif
15513 
15514  return true;
15515  }
15516 }
15517 
15518 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15519 {
15520  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15521 
15522  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15523 
15524  if(newCreateInfo.maxBlockCount == 0)
15525  {
15526  newCreateInfo.maxBlockCount = SIZE_MAX;
15527  }
15528  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15529  {
15530  return VK_ERROR_INITIALIZATION_FAILED;
15531  }
15532 
15533  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15534 
15535  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15536 
15537  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15538  if(res != VK_SUCCESS)
15539  {
15540  vma_delete(this, *pPool);
15541  *pPool = VMA_NULL;
15542  return res;
15543  }
15544 
15545  // Add to m_Pools.
15546  {
15547  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15548  (*pPool)->SetId(m_NextPoolId++);
15549  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
15550  }
15551 
15552  return VK_SUCCESS;
15553 }
15554 
15555 void VmaAllocator_T::DestroyPool(VmaPool pool)
15556 {
15557  // Remove from m_Pools.
15558  {
15559  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15560  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
15561  VMA_ASSERT(success && "Pool not found in Allocator.");
15562  }
15563 
15564  vma_delete(this, pool);
15565 }
15566 
15567 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
15568 {
15569  pool->m_BlockVector.GetPoolStats(pPoolStats);
15570 }
15571 
15572 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15573 {
15574  m_CurrentFrameIndex.store(frameIndex);
15575 
15576 #if VMA_MEMORY_BUDGET
15577  if(m_UseExtMemoryBudget)
15578  {
15579  UpdateVulkanBudget();
15580  }
15581 #endif // #if VMA_MEMORY_BUDGET
15582 }
15583 
15584 void VmaAllocator_T::MakePoolAllocationsLost(
15585  VmaPool hPool,
15586  size_t* pLostAllocationCount)
15587 {
15588  hPool->m_BlockVector.MakePoolAllocationsLost(
15589  m_CurrentFrameIndex.load(),
15590  pLostAllocationCount);
15591 }
15592 
15593 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15594 {
15595  return hPool->m_BlockVector.CheckCorruption();
15596 }
15597 
15598 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15599 {
15600  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
15601 
15602  // Process default pools.
15603  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15604  {
15605  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
15606  {
15607  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15608  VMA_ASSERT(pBlockVector);
15609  VkResult localRes = pBlockVector->CheckCorruption();
15610  switch(localRes)
15611  {
15612  case VK_ERROR_FEATURE_NOT_PRESENT:
15613  break;
15614  case VK_SUCCESS:
15615  finalRes = VK_SUCCESS;
15616  break;
15617  default:
15618  return localRes;
15619  }
15620  }
15621  }
15622 
15623  // Process custom pools.
15624  {
15625  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15626  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
15627  {
15628  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15629  {
15630  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
15631  switch(localRes)
15632  {
15633  case VK_ERROR_FEATURE_NOT_PRESENT:
15634  break;
15635  case VK_SUCCESS:
15636  finalRes = VK_SUCCESS;
15637  break;
15638  default:
15639  return localRes;
15640  }
15641  }
15642  }
15643  }
15644 
15645  return finalRes;
15646 }
15647 
15648 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
15649 {
15650  *pAllocation = m_AllocationObjectAllocator.Allocate();
15651  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
15652  (*pAllocation)->InitLost();
15653 }
15654 
15655 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15656 {
15657  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15658 
15659  // HeapSizeLimit is in effect for this heap.
15660  if((m_HeapSizeLimitMask | (1u << heapIndex)) != 0)
15661  {
15662  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15663  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15664  for(;;)
15665  {
15666  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15667  if(blockBytesAfterAllocation > heapSize)
15668  {
15669  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
15670  }
15671  if(m_Budget.m_BlockBytes->compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15672  {
15673  break;
15674  }
15675  }
15676  }
15677  else
15678  {
15679  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15680  }
15681 
15682  // VULKAN CALL vkAllocateMemory.
15683  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15684 
15685  if(res == VK_SUCCESS)
15686  {
15687  ++m_Budget.m_OperationsSinceBudgetFetch;
15688 
15689  // Informative callback.
15690  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15691  {
15692  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
15693  }
15694  }
15695  else
15696  {
15697  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15698  }
15699 
15700  return res;
15701 }
15702 
15703 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15704 {
15705  // Informative callback.
15706  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15707  {
15708  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
15709  }
15710 
15711  // VULKAN CALL vkFreeMemory.
15712  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15713 
15714  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15715  m_Budget.m_BlockBytes[heapIndex] -= size;
15716  ++m_Budget.m_OperationsSinceBudgetFetch;
15717 }
15718 
15719 VkResult VmaAllocator_T::BindVulkanBuffer(
15720  VkDeviceMemory memory,
15721  VkDeviceSize memoryOffset,
15722  VkBuffer buffer,
15723  const void* pNext)
15724 {
15725  if(pNext != VMA_NULL)
15726  {
15727 #if VMA_BIND_MEMORY2
15728  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15729  {
15730  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
15731  bindBufferMemoryInfo.pNext = pNext;
15732  bindBufferMemoryInfo.buffer = buffer;
15733  bindBufferMemoryInfo.memory = memory;
15734  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15735  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15736  }
15737  else
15738 #endif // #if VMA_BIND_MEMORY2
15739  {
15740  return VK_ERROR_EXTENSION_NOT_PRESENT;
15741  }
15742  }
15743  else
15744  {
15745  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15746  }
15747 }
15748 
15749 VkResult VmaAllocator_T::BindVulkanImage(
15750  VkDeviceMemory memory,
15751  VkDeviceSize memoryOffset,
15752  VkImage image,
15753  const void* pNext)
15754 {
15755  if(pNext != VMA_NULL)
15756  {
15757 #if VMA_BIND_MEMORY2
15758  if(m_UseKhrBindMemory2 && m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15759  {
15760  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
15761  bindBufferMemoryInfo.pNext = pNext;
15762  bindBufferMemoryInfo.image = image;
15763  bindBufferMemoryInfo.memory = memory;
15764  bindBufferMemoryInfo.memoryOffset = memoryOffset;
15765  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15766  }
15767  else
15768 #endif // #if VMA_BIND_MEMORY2
15769  {
15770  return VK_ERROR_EXTENSION_NOT_PRESENT;
15771  }
15772  }
15773  else
15774  {
15775  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15776  }
15777 }
15778 
15779 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15780 {
15781  if(hAllocation->CanBecomeLost())
15782  {
15783  return VK_ERROR_MEMORY_MAP_FAILED;
15784  }
15785 
15786  switch(hAllocation->GetType())
15787  {
15788  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15789  {
15790  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15791  char *pBytes = VMA_NULL;
15792  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15793  if(res == VK_SUCCESS)
15794  {
15795  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15796  hAllocation->BlockAllocMap();
15797  }
15798  return res;
15799  }
15800  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15801  return hAllocation->DedicatedAllocMap(this, ppData);
15802  default:
15803  VMA_ASSERT(0);
15804  return VK_ERROR_MEMORY_MAP_FAILED;
15805  }
15806 }
15807 
15808 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15809 {
15810  switch(hAllocation->GetType())
15811  {
15812  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15813  {
15814  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15815  hAllocation->BlockAllocUnmap();
15816  pBlock->Unmap(this, 1);
15817  }
15818  break;
15819  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15820  hAllocation->DedicatedAllocUnmap(this);
15821  break;
15822  default:
15823  VMA_ASSERT(0);
15824  }
15825 }
15826 
15827 VkResult VmaAllocator_T::BindBufferMemory(
15828  VmaAllocation hAllocation,
15829  VkDeviceSize allocationLocalOffset,
15830  VkBuffer hBuffer,
15831  const void* pNext)
15832 {
15833  VkResult res = VK_SUCCESS;
15834  switch(hAllocation->GetType())
15835  {
15836  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15837  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
15838  break;
15839  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15840  {
15841  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15842  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
15843  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
15844  break;
15845  }
15846  default:
15847  VMA_ASSERT(0);
15848  }
15849  return res;
15850 }
15851 
15852 VkResult VmaAllocator_T::BindImageMemory(
15853  VmaAllocation hAllocation,
15854  VkDeviceSize allocationLocalOffset,
15855  VkImage hImage,
15856  const void* pNext)
15857 {
15858  VkResult res = VK_SUCCESS;
15859  switch(hAllocation->GetType())
15860  {
15861  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15862  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
15863  break;
15864  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15865  {
15866  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15867  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
15868  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
15869  break;
15870  }
15871  default:
15872  VMA_ASSERT(0);
15873  }
15874  return res;
15875 }
15876 
15877 void VmaAllocator_T::FlushOrInvalidateAllocation(
15878  VmaAllocation hAllocation,
15879  VkDeviceSize offset, VkDeviceSize size,
15880  VMA_CACHE_OPERATION op)
15881 {
15882  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
15883  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15884  {
15885  const VkDeviceSize allocationSize = hAllocation->GetSize();
15886  VMA_ASSERT(offset <= allocationSize);
15887 
15888  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15889 
15890  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15891  memRange.memory = hAllocation->GetMemory();
15892 
15893  switch(hAllocation->GetType())
15894  {
15895  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15896  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15897  if(size == VK_WHOLE_SIZE)
15898  {
15899  memRange.size = allocationSize - memRange.offset;
15900  }
15901  else
15902  {
15903  VMA_ASSERT(offset + size <= allocationSize);
15904  memRange.size = VMA_MIN(
15905  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15906  allocationSize - memRange.offset);
15907  }
15908  break;
15909 
15910  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15911  {
15912  // 1. Still within this allocation.
15913  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15914  if(size == VK_WHOLE_SIZE)
15915  {
15916  size = allocationSize - offset;
15917  }
15918  else
15919  {
15920  VMA_ASSERT(offset + size <= allocationSize);
15921  }
15922  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15923 
15924  // 2. Adjust to whole block.
15925  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15926  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15927  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15928  memRange.offset += allocationOffset;
15929  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15930 
15931  break;
15932  }
15933 
15934  default:
15935  VMA_ASSERT(0);
15936  }
15937 
15938  switch(op)
15939  {
15940  case VMA_CACHE_FLUSH:
15941  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15942  break;
15943  case VMA_CACHE_INVALIDATE:
15944  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15945  break;
15946  default:
15947  VMA_ASSERT(0);
15948  }
15949  }
15950  // else: Just ignore this call.
15951 }
15952 
15953 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
15954 {
15955  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15956 
15957  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15958  {
15959  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15960  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15961  VMA_ASSERT(pDedicatedAllocations);
15962  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15963  VMA_ASSERT(success);
15964  }
15965 
15966  VkDeviceMemory hMemory = allocation->GetMemory();
15967 
15968  /*
15969  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15970  before vkFreeMemory.
15971 
15972  if(allocation->GetMappedData() != VMA_NULL)
15973  {
15974  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15975  }
15976  */
15977 
15978  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15979 
15980  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15981 }
15982 
15983 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15984 {
15985  VkBufferCreateInfo dummyBufCreateInfo;
15986  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15987 
15988  uint32_t memoryTypeBits = 0;
15989 
15990  // Create buffer.
15991  VkBuffer buf = VK_NULL_HANDLE;
15992  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15993  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15994  if(res == VK_SUCCESS)
15995  {
15996  // Query for supported memory types.
15997  VkMemoryRequirements memReq;
15998  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15999  memoryTypeBits = memReq.memoryTypeBits;
16000 
16001  // Destroy buffer.
16002  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
16003  }
16004 
16005  return memoryTypeBits;
16006 }
16007 
16008 #if VMA_MEMORY_BUDGET
16009 
16010 void VmaAllocator_T::UpdateVulkanBudget()
16011 {
16012  VMA_ASSERT(m_UseExtMemoryBudget);
16013 
16014  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
16015 
16016  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
16017  memProps.pNext = &budgetProps;
16018 
16019  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
16020 
16021  {
16022  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
16023 
16024  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16025  {
16026  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
16027  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
16028  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
16029  }
16030  m_Budget.m_OperationsSinceBudgetFetch = 0;
16031  }
16032 }
16033 
16034 #endif // #if VMA_MEMORY_BUDGET
16035 
16036 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
16037 {
16038  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
16039  !hAllocation->CanBecomeLost() &&
16040  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16041  {
16042  void* pData = VMA_NULL;
16043  VkResult res = Map(hAllocation, &pData);
16044  if(res == VK_SUCCESS)
16045  {
16046  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
16047  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
16048  Unmap(hAllocation);
16049  }
16050  else
16051  {
16052  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
16053  }
16054  }
16055 }
16056 
16057 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
16058 {
16059  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
16060  if(memoryTypeBits == UINT32_MAX)
16061  {
16062  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
16063  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
16064  }
16065  return memoryTypeBits;
16066 }
16067 
16068 #if VMA_STATS_STRING_ENABLED
16069 
16070 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
16071 {
16072  bool dedicatedAllocationsStarted = false;
16073  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16074  {
16075  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16076  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16077  VMA_ASSERT(pDedicatedAllocVector);
16078  if(pDedicatedAllocVector->empty() == false)
16079  {
16080  if(dedicatedAllocationsStarted == false)
16081  {
16082  dedicatedAllocationsStarted = true;
16083  json.WriteString("DedicatedAllocations");
16084  json.BeginObject();
16085  }
16086 
16087  json.BeginString("Type ");
16088  json.ContinueString(memTypeIndex);
16089  json.EndString();
16090 
16091  json.BeginArray();
16092 
16093  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
16094  {
16095  json.BeginObject(true);
16096  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
16097  hAlloc->PrintParameters(json);
16098  json.EndObject();
16099  }
16100 
16101  json.EndArray();
16102  }
16103  }
16104  if(dedicatedAllocationsStarted)
16105  {
16106  json.EndObject();
16107  }
16108 
16109  {
16110  bool allocationsStarted = false;
16111  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16112  {
16113  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
16114  {
16115  if(allocationsStarted == false)
16116  {
16117  allocationsStarted = true;
16118  json.WriteString("DefaultPools");
16119  json.BeginObject();
16120  }
16121 
16122  json.BeginString("Type ");
16123  json.ContinueString(memTypeIndex);
16124  json.EndString();
16125 
16126  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
16127  }
16128  }
16129  if(allocationsStarted)
16130  {
16131  json.EndObject();
16132  }
16133  }
16134 
16135  // Custom pools
16136  {
16137  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16138  const size_t poolCount = m_Pools.size();
16139  if(poolCount > 0)
16140  {
16141  json.WriteString("Pools");
16142  json.BeginObject();
16143  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
16144  {
16145  json.BeginString();
16146  json.ContinueString(m_Pools[poolIndex]->GetId());
16147  json.EndString();
16148 
16149  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
16150  }
16151  json.EndObject();
16152  }
16153  }
16154 }
16155 
16156 #endif // #if VMA_STATS_STRING_ENABLED
16157 
16159 // Public interface
16160 
16162  const VmaAllocatorCreateInfo* pCreateInfo,
16163  VmaAllocator* pAllocator)
16164 {
16165  VMA_ASSERT(pCreateInfo && pAllocator);
16166  VMA_DEBUG_LOG("vmaCreateAllocator");
16167  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16168  return (*pAllocator)->Init(pCreateInfo);
16169 }
16170 
16172  VmaAllocator allocator)
16173 {
16174  if(allocator != VK_NULL_HANDLE)
16175  {
16176  VMA_DEBUG_LOG("vmaDestroyAllocator");
16177  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
16178  vma_delete(&allocationCallbacks, allocator);
16179  }
16180 }
16181 
16183  VmaAllocator allocator,
16184  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16185 {
16186  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16187  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16188 }
16189 
16191  VmaAllocator allocator,
16192  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16193 {
16194  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16195  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16196 }
16197 
16199  VmaAllocator allocator,
16200  uint32_t memoryTypeIndex,
16201  VkMemoryPropertyFlags* pFlags)
16202 {
16203  VMA_ASSERT(allocator && pFlags);
16204  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16205  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16206 }
16207 
16209  VmaAllocator allocator,
16210  uint32_t frameIndex)
16211 {
16212  VMA_ASSERT(allocator);
16213  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
16214 
16215  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16216 
16217  allocator->SetCurrentFrameIndex(frameIndex);
16218 }
16219 
16221  VmaAllocator allocator,
16222  VmaStats* pStats)
16223 {
16224  VMA_ASSERT(allocator && pStats);
16225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16226  allocator->CalculateStats(pStats);
16227 }
16228 
16230  VmaAllocator allocator,
16231  VmaBudget* pBudget)
16232 {
16233  VMA_ASSERT(allocator && pBudget);
16234  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16235  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
16236 }
16237 
16238 #if VMA_STATS_STRING_ENABLED
16239 
16241  VmaAllocator allocator,
16242  char** ppStatsString,
16243  VkBool32 detailedMap)
16244 {
16245  VMA_ASSERT(allocator && ppStatsString);
16246  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16247 
16248  VmaStringBuilder sb(allocator);
16249  {
16250  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16251  json.BeginObject();
16252 
16253  VmaStats stats;
16254  allocator->CalculateStats(&stats);
16255 
16256  json.WriteString("Total");
16257  VmaPrintStatInfo(json, stats.total);
16258 
16259  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16260  {
16261  json.BeginString("Heap ");
16262  json.ContinueString(heapIndex);
16263  json.EndString();
16264  json.BeginObject();
16265 
16266  json.WriteString("Size");
16267  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
16268 
16269  json.WriteString("Flags");
16270  json.BeginArray(true);
16271  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
16272  {
16273  json.WriteString("DEVICE_LOCAL");
16274  }
16275  json.EndArray();
16276 
16277  if(stats.memoryHeap[heapIndex].blockCount > 0)
16278  {
16279  json.WriteString("Stats");
16280  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
16281  }
16282 
16283  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16284  {
16285  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16286  {
16287  json.BeginString("Type ");
16288  json.ContinueString(typeIndex);
16289  json.EndString();
16290 
16291  json.BeginObject();
16292 
16293  json.WriteString("Flags");
16294  json.BeginArray(true);
16295  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16296  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
16297  {
16298  json.WriteString("DEVICE_LOCAL");
16299  }
16300  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
16301  {
16302  json.WriteString("HOST_VISIBLE");
16303  }
16304  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
16305  {
16306  json.WriteString("HOST_COHERENT");
16307  }
16308  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
16309  {
16310  json.WriteString("HOST_CACHED");
16311  }
16312  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
16313  {
16314  json.WriteString("LAZILY_ALLOCATED");
16315  }
16316  json.EndArray();
16317 
16318  if(stats.memoryType[typeIndex].blockCount > 0)
16319  {
16320  json.WriteString("Stats");
16321  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
16322  }
16323 
16324  json.EndObject();
16325  }
16326  }
16327 
16328  json.EndObject();
16329  }
16330  if(detailedMap == VK_TRUE)
16331  {
16332  allocator->PrintDetailedMap(json);
16333  }
16334 
16335  json.EndObject();
16336  }
16337 
16338  const size_t len = sb.GetLength();
16339  char* const pChars = vma_new_array(allocator, char, len + 1);
16340  if(len > 0)
16341  {
16342  memcpy(pChars, sb.GetData(), len);
16343  }
16344  pChars[len] = '\0';
16345  *ppStatsString = pChars;
16346 }
16347 
16349  VmaAllocator allocator,
16350  char* pStatsString)
16351 {
16352  if(pStatsString != VMA_NULL)
16353  {
16354  VMA_ASSERT(allocator);
16355  size_t len = strlen(pStatsString);
16356  vma_delete_array(allocator, pStatsString, len + 1);
16357  }
16358 }
16359 
16360 #endif // #if VMA_STATS_STRING_ENABLED
16361 
16362 /*
16363 This function is not protected by any mutex because it just reads immutable data.
16364 */
16366  VmaAllocator allocator,
16367  uint32_t memoryTypeBits,
16368  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16369  uint32_t* pMemoryTypeIndex)
16370 {
16371  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16372  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16373  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16374 
16375  if(pAllocationCreateInfo->memoryTypeBits != 0)
16376  {
16377  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
16378  }
16379 
16380  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
16381  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
16382 
16383  // Convert usage to requiredFlags and preferredFlags.
16384  switch(pAllocationCreateInfo->usage)
16385  {
16387  break;
16389  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16390  {
16391  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16392  }
16393  break;
16395  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
16396  break;
16398  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16399  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16400  {
16401  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
16402  }
16403  break;
16405  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
16406  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
16407  break;
16408  default:
16409  break;
16410  }
16411 
16412  *pMemoryTypeIndex = UINT32_MAX;
16413  uint32_t minCost = UINT32_MAX;
16414  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
16415  memTypeIndex < allocator->GetMemoryTypeCount();
16416  ++memTypeIndex, memTypeBit <<= 1)
16417  {
16418  // This memory type is acceptable according to memoryTypeBits bitmask.
16419  if((memTypeBit & memoryTypeBits) != 0)
16420  {
16421  const VkMemoryPropertyFlags currFlags =
16422  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16423  // This memory type contains requiredFlags.
16424  if((requiredFlags & ~currFlags) == 0)
16425  {
16426  // Calculate cost as number of bits from preferredFlags not present in this memory type.
16427  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
16428  // Remember memory type with lowest cost.
16429  if(currCost < minCost)
16430  {
16431  *pMemoryTypeIndex = memTypeIndex;
16432  if(currCost == 0)
16433  {
16434  return VK_SUCCESS;
16435  }
16436  minCost = currCost;
16437  }
16438  }
16439  }
16440  }
16441  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
16442 }
16443 
16445  VmaAllocator allocator,
16446  const VkBufferCreateInfo* pBufferCreateInfo,
16447  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16448  uint32_t* pMemoryTypeIndex)
16449 {
16450  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16451  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16452  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16453  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16454 
16455  const VkDevice hDev = allocator->m_hDevice;
16456  VkBuffer hBuffer = VK_NULL_HANDLE;
16457  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
16458  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16459  if(res == VK_SUCCESS)
16460  {
16461  VkMemoryRequirements memReq = {};
16462  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
16463  hDev, hBuffer, &memReq);
16464 
16465  res = vmaFindMemoryTypeIndex(
16466  allocator,
16467  memReq.memoryTypeBits,
16468  pAllocationCreateInfo,
16469  pMemoryTypeIndex);
16470 
16471  allocator->GetVulkanFunctions().vkDestroyBuffer(
16472  hDev, hBuffer, allocator->GetAllocationCallbacks());
16473  }
16474  return res;
16475 }
16476 
16478  VmaAllocator allocator,
16479  const VkImageCreateInfo* pImageCreateInfo,
16480  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16481  uint32_t* pMemoryTypeIndex)
16482 {
16483  VMA_ASSERT(allocator != VK_NULL_HANDLE);
16484  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16485  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16486  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16487 
16488  const VkDevice hDev = allocator->m_hDevice;
16489  VkImage hImage = VK_NULL_HANDLE;
16490  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
16491  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16492  if(res == VK_SUCCESS)
16493  {
16494  VkMemoryRequirements memReq = {};
16495  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
16496  hDev, hImage, &memReq);
16497 
16498  res = vmaFindMemoryTypeIndex(
16499  allocator,
16500  memReq.memoryTypeBits,
16501  pAllocationCreateInfo,
16502  pMemoryTypeIndex);
16503 
16504  allocator->GetVulkanFunctions().vkDestroyImage(
16505  hDev, hImage, allocator->GetAllocationCallbacks());
16506  }
16507  return res;
16508 }
16509 
16511  VmaAllocator allocator,
16512  const VmaPoolCreateInfo* pCreateInfo,
16513  VmaPool* pPool)
16514 {
16515  VMA_ASSERT(allocator && pCreateInfo && pPool);
16516 
16517  VMA_DEBUG_LOG("vmaCreatePool");
16518 
16519  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16520 
16521  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
16522 
16523 #if VMA_RECORDING_ENABLED
16524  if(allocator->GetRecorder() != VMA_NULL)
16525  {
16526  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
16527  }
16528 #endif
16529 
16530  return res;
16531 }
16532 
16534  VmaAllocator allocator,
16535  VmaPool pool)
16536 {
16537  VMA_ASSERT(allocator);
16538 
16539  if(pool == VK_NULL_HANDLE)
16540  {
16541  return;
16542  }
16543 
16544  VMA_DEBUG_LOG("vmaDestroyPool");
16545 
16546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16547 
16548 #if VMA_RECORDING_ENABLED
16549  if(allocator->GetRecorder() != VMA_NULL)
16550  {
16551  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
16552  }
16553 #endif
16554 
16555  allocator->DestroyPool(pool);
16556 }
16557 
16559  VmaAllocator allocator,
16560  VmaPool pool,
16561  VmaPoolStats* pPoolStats)
16562 {
16563  VMA_ASSERT(allocator && pool && pPoolStats);
16564 
16565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16566 
16567  allocator->GetPoolStats(pool, pPoolStats);
16568 }
16569 
16571  VmaAllocator allocator,
16572  VmaPool pool,
16573  size_t* pLostAllocationCount)
16574 {
16575  VMA_ASSERT(allocator && pool);
16576 
16577  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16578 
16579 #if VMA_RECORDING_ENABLED
16580  if(allocator->GetRecorder() != VMA_NULL)
16581  {
16582  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
16583  }
16584 #endif
16585 
16586  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
16587 }
16588 
16590 {
16591  VMA_ASSERT(allocator && pool);
16592 
16593  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16594 
16595  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16596 
16597  return allocator->CheckPoolCorruption(pool);
16598 }
16599 
16601  VmaAllocator allocator,
16602  const VkMemoryRequirements* pVkMemoryRequirements,
16603  const VmaAllocationCreateInfo* pCreateInfo,
16604  VmaAllocation* pAllocation,
16605  VmaAllocationInfo* pAllocationInfo)
16606 {
16607  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16608 
16609  VMA_DEBUG_LOG("vmaAllocateMemory");
16610 
16611  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16612 
16613  VkResult result = allocator->AllocateMemory(
16614  *pVkMemoryRequirements,
16615  false, // requiresDedicatedAllocation
16616  false, // prefersDedicatedAllocation
16617  VK_NULL_HANDLE, // dedicatedBuffer
16618  VK_NULL_HANDLE, // dedicatedImage
16619  *pCreateInfo,
16620  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16621  1, // allocationCount
16622  pAllocation);
16623 
16624 #if VMA_RECORDING_ENABLED
16625  if(allocator->GetRecorder() != VMA_NULL)
16626  {
16627  allocator->GetRecorder()->RecordAllocateMemory(
16628  allocator->GetCurrentFrameIndex(),
16629  *pVkMemoryRequirements,
16630  *pCreateInfo,
16631  *pAllocation);
16632  }
16633 #endif
16634 
16635  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16636  {
16637  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16638  }
16639 
16640  return result;
16641 }
16642 
16644  VmaAllocator allocator,
16645  const VkMemoryRequirements* pVkMemoryRequirements,
16646  const VmaAllocationCreateInfo* pCreateInfo,
16647  size_t allocationCount,
16648  VmaAllocation* pAllocations,
16649  VmaAllocationInfo* pAllocationInfo)
16650 {
16651  if(allocationCount == 0)
16652  {
16653  return VK_SUCCESS;
16654  }
16655 
16656  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16657 
16658  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16659 
16660  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16661 
16662  VkResult result = allocator->AllocateMemory(
16663  *pVkMemoryRequirements,
16664  false, // requiresDedicatedAllocation
16665  false, // prefersDedicatedAllocation
16666  VK_NULL_HANDLE, // dedicatedBuffer
16667  VK_NULL_HANDLE, // dedicatedImage
16668  *pCreateInfo,
16669  VMA_SUBALLOCATION_TYPE_UNKNOWN,
16670  allocationCount,
16671  pAllocations);
16672 
16673 #if VMA_RECORDING_ENABLED
16674  if(allocator->GetRecorder() != VMA_NULL)
16675  {
16676  allocator->GetRecorder()->RecordAllocateMemoryPages(
16677  allocator->GetCurrentFrameIndex(),
16678  *pVkMemoryRequirements,
16679  *pCreateInfo,
16680  (uint64_t)allocationCount,
16681  pAllocations);
16682  }
16683 #endif
16684 
16685  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16686  {
16687  for(size_t i = 0; i < allocationCount; ++i)
16688  {
16689  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16690  }
16691  }
16692 
16693  return result;
16694 }
16695 
16697  VmaAllocator allocator,
16698  VkBuffer buffer,
16699  const VmaAllocationCreateInfo* pCreateInfo,
16700  VmaAllocation* pAllocation,
16701  VmaAllocationInfo* pAllocationInfo)
16702 {
16703  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16704 
16705  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16706 
16707  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16708 
16709  VkMemoryRequirements vkMemReq = {};
16710  bool requiresDedicatedAllocation = false;
16711  bool prefersDedicatedAllocation = false;
16712  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16713  requiresDedicatedAllocation,
16714  prefersDedicatedAllocation);
16715 
16716  VkResult result = allocator->AllocateMemory(
16717  vkMemReq,
16718  requiresDedicatedAllocation,
16719  prefersDedicatedAllocation,
16720  buffer, // dedicatedBuffer
16721  VK_NULL_HANDLE, // dedicatedImage
16722  *pCreateInfo,
16723  VMA_SUBALLOCATION_TYPE_BUFFER,
16724  1, // allocationCount
16725  pAllocation);
16726 
16727 #if VMA_RECORDING_ENABLED
16728  if(allocator->GetRecorder() != VMA_NULL)
16729  {
16730  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
16731  allocator->GetCurrentFrameIndex(),
16732  vkMemReq,
16733  requiresDedicatedAllocation,
16734  prefersDedicatedAllocation,
16735  *pCreateInfo,
16736  *pAllocation);
16737  }
16738 #endif
16739 
16740  if(pAllocationInfo && result == VK_SUCCESS)
16741  {
16742  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16743  }
16744 
16745  return result;
16746 }
16747 
16749  VmaAllocator allocator,
16750  VkImage image,
16751  const VmaAllocationCreateInfo* pCreateInfo,
16752  VmaAllocation* pAllocation,
16753  VmaAllocationInfo* pAllocationInfo)
16754 {
16755  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16756 
16757  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16758 
16759  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16760 
16761  VkMemoryRequirements vkMemReq = {};
16762  bool requiresDedicatedAllocation = false;
16763  bool prefersDedicatedAllocation = false;
16764  allocator->GetImageMemoryRequirements(image, vkMemReq,
16765  requiresDedicatedAllocation, prefersDedicatedAllocation);
16766 
16767  VkResult result = allocator->AllocateMemory(
16768  vkMemReq,
16769  requiresDedicatedAllocation,
16770  prefersDedicatedAllocation,
16771  VK_NULL_HANDLE, // dedicatedBuffer
16772  image, // dedicatedImage
16773  *pCreateInfo,
16774  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16775  1, // allocationCount
16776  pAllocation);
16777 
16778 #if VMA_RECORDING_ENABLED
16779  if(allocator->GetRecorder() != VMA_NULL)
16780  {
16781  allocator->GetRecorder()->RecordAllocateMemoryForImage(
16782  allocator->GetCurrentFrameIndex(),
16783  vkMemReq,
16784  requiresDedicatedAllocation,
16785  prefersDedicatedAllocation,
16786  *pCreateInfo,
16787  *pAllocation);
16788  }
16789 #endif
16790 
16791  if(pAllocationInfo && result == VK_SUCCESS)
16792  {
16793  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16794  }
16795 
16796  return result;
16797 }
16798 
16800  VmaAllocator allocator,
16801  VmaAllocation allocation)
16802 {
16803  VMA_ASSERT(allocator);
16804 
16805  if(allocation == VK_NULL_HANDLE)
16806  {
16807  return;
16808  }
16809 
16810  VMA_DEBUG_LOG("vmaFreeMemory");
16811 
16812  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16813 
16814 #if VMA_RECORDING_ENABLED
16815  if(allocator->GetRecorder() != VMA_NULL)
16816  {
16817  allocator->GetRecorder()->RecordFreeMemory(
16818  allocator->GetCurrentFrameIndex(),
16819  allocation);
16820  }
16821 #endif
16822 
16823  allocator->FreeMemory(
16824  1, // allocationCount
16825  &allocation);
16826 }
16827 
16829  VmaAllocator allocator,
16830  size_t allocationCount,
16831  VmaAllocation* pAllocations)
16832 {
16833  if(allocationCount == 0)
16834  {
16835  return;
16836  }
16837 
16838  VMA_ASSERT(allocator);
16839 
16840  VMA_DEBUG_LOG("vmaFreeMemoryPages");
16841 
16842  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16843 
16844 #if VMA_RECORDING_ENABLED
16845  if(allocator->GetRecorder() != VMA_NULL)
16846  {
16847  allocator->GetRecorder()->RecordFreeMemoryPages(
16848  allocator->GetCurrentFrameIndex(),
16849  (uint64_t)allocationCount,
16850  pAllocations);
16851  }
16852 #endif
16853 
16854  allocator->FreeMemory(allocationCount, pAllocations);
16855 }
16856 
16858  VmaAllocator allocator,
16859  VmaAllocation allocation,
16860  VkDeviceSize newSize)
16861 {
16862  VMA_ASSERT(allocator && allocation);
16863 
16864  VMA_DEBUG_LOG("vmaResizeAllocation");
16865 
16866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16867 
16868  return allocator->ResizeAllocation(allocation, newSize);
16869 }
16870 
16872  VmaAllocator allocator,
16873  VmaAllocation allocation,
16874  VmaAllocationInfo* pAllocationInfo)
16875 {
16876  VMA_ASSERT(allocator && allocation && pAllocationInfo);
16877 
16878  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16879 
16880 #if VMA_RECORDING_ENABLED
16881  if(allocator->GetRecorder() != VMA_NULL)
16882  {
16883  allocator->GetRecorder()->RecordGetAllocationInfo(
16884  allocator->GetCurrentFrameIndex(),
16885  allocation);
16886  }
16887 #endif
16888 
16889  allocator->GetAllocationInfo(allocation, pAllocationInfo);
16890 }
16891 
16893  VmaAllocator allocator,
16894  VmaAllocation allocation)
16895 {
16896  VMA_ASSERT(allocator && allocation);
16897 
16898  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16899 
16900 #if VMA_RECORDING_ENABLED
16901  if(allocator->GetRecorder() != VMA_NULL)
16902  {
16903  allocator->GetRecorder()->RecordTouchAllocation(
16904  allocator->GetCurrentFrameIndex(),
16905  allocation);
16906  }
16907 #endif
16908 
16909  return allocator->TouchAllocation(allocation);
16910 }
16911 
16913  VmaAllocator allocator,
16914  VmaAllocation allocation,
16915  void* pUserData)
16916 {
16917  VMA_ASSERT(allocator && allocation);
16918 
16919  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16920 
16921  allocation->SetUserData(allocator, pUserData);
16922 
16923 #if VMA_RECORDING_ENABLED
16924  if(allocator->GetRecorder() != VMA_NULL)
16925  {
16926  allocator->GetRecorder()->RecordSetAllocationUserData(
16927  allocator->GetCurrentFrameIndex(),
16928  allocation,
16929  pUserData);
16930  }
16931 #endif
16932 }
16933 
16935  VmaAllocator allocator,
16936  VmaAllocation* pAllocation)
16937 {
16938  VMA_ASSERT(allocator && pAllocation);
16939 
16940  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
16941 
16942  allocator->CreateLostAllocation(pAllocation);
16943 
16944 #if VMA_RECORDING_ENABLED
16945  if(allocator->GetRecorder() != VMA_NULL)
16946  {
16947  allocator->GetRecorder()->RecordCreateLostAllocation(
16948  allocator->GetCurrentFrameIndex(),
16949  *pAllocation);
16950  }
16951 #endif
16952 }
16953 
16955  VmaAllocator allocator,
16956  VmaAllocation allocation,
16957  void** ppData)
16958 {
16959  VMA_ASSERT(allocator && allocation && ppData);
16960 
16961  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16962 
16963  VkResult res = allocator->Map(allocation, ppData);
16964 
16965 #if VMA_RECORDING_ENABLED
16966  if(allocator->GetRecorder() != VMA_NULL)
16967  {
16968  allocator->GetRecorder()->RecordMapMemory(
16969  allocator->GetCurrentFrameIndex(),
16970  allocation);
16971  }
16972 #endif
16973 
16974  return res;
16975 }
16976 
16978  VmaAllocator allocator,
16979  VmaAllocation allocation)
16980 {
16981  VMA_ASSERT(allocator && allocation);
16982 
16983  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16984 
16985 #if VMA_RECORDING_ENABLED
16986  if(allocator->GetRecorder() != VMA_NULL)
16987  {
16988  allocator->GetRecorder()->RecordUnmapMemory(
16989  allocator->GetCurrentFrameIndex(),
16990  allocation);
16991  }
16992 #endif
16993 
16994  allocator->Unmap(allocation);
16995 }
16996 
16997 VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
16998 {
16999  VMA_ASSERT(allocator && allocation);
17000 
17001  VMA_DEBUG_LOG("vmaFlushAllocation");
17002 
17003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17004 
17005  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
17006 
17007 #if VMA_RECORDING_ENABLED
17008  if(allocator->GetRecorder() != VMA_NULL)
17009  {
17010  allocator->GetRecorder()->RecordFlushAllocation(
17011  allocator->GetCurrentFrameIndex(),
17012  allocation, offset, size);
17013  }
17014 #endif
17015 }
17016 
17017 VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
17018 {
17019  VMA_ASSERT(allocator && allocation);
17020 
17021  VMA_DEBUG_LOG("vmaInvalidateAllocation");
17022 
17023  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17024 
17025  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
17026 
17027 #if VMA_RECORDING_ENABLED
17028  if(allocator->GetRecorder() != VMA_NULL)
17029  {
17030  allocator->GetRecorder()->RecordInvalidateAllocation(
17031  allocator->GetCurrentFrameIndex(),
17032  allocation, offset, size);
17033  }
17034 #endif
17035 }
17036 
17037 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
17038 {
17039  VMA_ASSERT(allocator);
17040 
17041  VMA_DEBUG_LOG("vmaCheckCorruption");
17042 
17043  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17044 
17045  return allocator->CheckCorruption(memoryTypeBits);
17046 }
17047 
17049  VmaAllocator allocator,
17050  VmaAllocation* pAllocations,
17051  size_t allocationCount,
17052  VkBool32* pAllocationsChanged,
17053  const VmaDefragmentationInfo *pDefragmentationInfo,
17054  VmaDefragmentationStats* pDefragmentationStats)
17055 {
17056  // Deprecated interface, reimplemented using new one.
17057 
17058  VmaDefragmentationInfo2 info2 = {};
17059  info2.allocationCount = (uint32_t)allocationCount;
17060  info2.pAllocations = pAllocations;
17061  info2.pAllocationsChanged = pAllocationsChanged;
17062  if(pDefragmentationInfo != VMA_NULL)
17063  {
17064  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
17065  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
17066  }
17067  else
17068  {
17069  info2.maxCpuAllocationsToMove = UINT32_MAX;
17070  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
17071  }
17072  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
17073 
17075  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
17076  if(res == VK_NOT_READY)
17077  {
17078  res = vmaDefragmentationEnd( allocator, ctx);
17079  }
17080  return res;
17081 }
17082 
17084  VmaAllocator allocator,
17085  const VmaDefragmentationInfo2* pInfo,
17086  VmaDefragmentationStats* pStats,
17087  VmaDefragmentationContext *pContext)
17088 {
17089  VMA_ASSERT(allocator && pInfo && pContext);
17090 
17091  // Degenerate case: Nothing to defragment.
17092  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
17093  {
17094  return VK_SUCCESS;
17095  }
17096 
17097  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
17098  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
17099  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
17100  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
17101 
17102  VMA_DEBUG_LOG("vmaDefragmentationBegin");
17103 
17104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17105 
17106  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
17107 
17108 #if VMA_RECORDING_ENABLED
17109  if(allocator->GetRecorder() != VMA_NULL)
17110  {
17111  allocator->GetRecorder()->RecordDefragmentationBegin(
17112  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
17113  }
17114 #endif
17115 
17116  return res;
17117 }
17118 
17120  VmaAllocator allocator,
17121  VmaDefragmentationContext context)
17122 {
17123  VMA_ASSERT(allocator);
17124 
17125  VMA_DEBUG_LOG("vmaDefragmentationEnd");
17126 
17127  if(context != VK_NULL_HANDLE)
17128  {
17129  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17130 
17131 #if VMA_RECORDING_ENABLED
17132  if(allocator->GetRecorder() != VMA_NULL)
17133  {
17134  allocator->GetRecorder()->RecordDefragmentationEnd(
17135  allocator->GetCurrentFrameIndex(), context);
17136  }
17137 #endif
17138 
17139  return allocator->DefragmentationEnd(context);
17140  }
17141  else
17142  {
17143  return VK_SUCCESS;
17144  }
17145 }
17146 
17148  VmaAllocator allocator,
17149  VmaAllocation allocation,
17150  VkBuffer buffer)
17151 {
17152  VMA_ASSERT(allocator && allocation && buffer);
17153 
17154  VMA_DEBUG_LOG("vmaBindBufferMemory");
17155 
17156  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17157 
17158  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
17159 }
17160 
17162  VmaAllocator allocator,
17163  VmaAllocation allocation,
17164  VkDeviceSize allocationLocalOffset,
17165  VkBuffer buffer,
17166  const void* pNext)
17167 {
17168  VMA_ASSERT(allocator && allocation && buffer);
17169 
17170  VMA_DEBUG_LOG("vmaBindBufferMemory2");
17171 
17172  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17173 
17174  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
17175 }
17176 
17178  VmaAllocator allocator,
17179  VmaAllocation allocation,
17180  VkImage image)
17181 {
17182  VMA_ASSERT(allocator && allocation && image);
17183 
17184  VMA_DEBUG_LOG("vmaBindImageMemory");
17185 
17186  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17187 
17188  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
17189 }
17190 
17192  VmaAllocator allocator,
17193  VmaAllocation allocation,
17194  VkDeviceSize allocationLocalOffset,
17195  VkImage image,
17196  const void* pNext)
17197 {
17198  VMA_ASSERT(allocator && allocation && image);
17199 
17200  VMA_DEBUG_LOG("vmaBindImageMemory2");
17201 
17202  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17203 
17204  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
17205 }
17206 
17208  VmaAllocator allocator,
17209  const VkBufferCreateInfo* pBufferCreateInfo,
17210  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17211  VkBuffer* pBuffer,
17212  VmaAllocation* pAllocation,
17213  VmaAllocationInfo* pAllocationInfo)
17214 {
17215  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
17216 
17217  if(pBufferCreateInfo->size == 0)
17218  {
17219  return VK_ERROR_VALIDATION_FAILED_EXT;
17220  }
17221 
17222  VMA_DEBUG_LOG("vmaCreateBuffer");
17223 
17224  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17225 
17226  *pBuffer = VK_NULL_HANDLE;
17227  *pAllocation = VK_NULL_HANDLE;
17228 
17229  // 1. Create VkBuffer.
17230  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17231  allocator->m_hDevice,
17232  pBufferCreateInfo,
17233  allocator->GetAllocationCallbacks(),
17234  pBuffer);
17235  if(res >= 0)
17236  {
17237  // 2. vkGetBufferMemoryRequirements.
17238  VkMemoryRequirements vkMemReq = {};
17239  bool requiresDedicatedAllocation = false;
17240  bool prefersDedicatedAllocation = false;
17241  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17242  requiresDedicatedAllocation, prefersDedicatedAllocation);
17243 
17244  // Make sure alignment requirements for specific buffer usages reported
17245  // in Physical Device Properties are included in alignment reported by memory requirements.
17246  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
17247  {
17248  VMA_ASSERT(vkMemReq.alignment %
17249  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
17250  }
17251  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
17252  {
17253  VMA_ASSERT(vkMemReq.alignment %
17254  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
17255  }
17256  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
17257  {
17258  VMA_ASSERT(vkMemReq.alignment %
17259  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
17260  }
17261 
17262  // 3. Allocate memory using allocator.
17263  res = allocator->AllocateMemory(
17264  vkMemReq,
17265  requiresDedicatedAllocation,
17266  prefersDedicatedAllocation,
17267  *pBuffer, // dedicatedBuffer
17268  VK_NULL_HANDLE, // dedicatedImage
17269  *pAllocationCreateInfo,
17270  VMA_SUBALLOCATION_TYPE_BUFFER,
17271  1, // allocationCount
17272  pAllocation);
17273 
17274 #if VMA_RECORDING_ENABLED
17275  if(allocator->GetRecorder() != VMA_NULL)
17276  {
17277  allocator->GetRecorder()->RecordCreateBuffer(
17278  allocator->GetCurrentFrameIndex(),
17279  *pBufferCreateInfo,
17280  *pAllocationCreateInfo,
17281  *pAllocation);
17282  }
17283 #endif
17284 
17285  if(res >= 0)
17286  {
17287  // 3. Bind buffer with memory.
17288  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17289  {
17290  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17291  }
17292  if(res >= 0)
17293  {
17294  // All steps succeeded.
17295  #if VMA_STATS_STRING_ENABLED
17296  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17297  #endif
17298  if(pAllocationInfo != VMA_NULL)
17299  {
17300  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17301  }
17302 
17303  return VK_SUCCESS;
17304  }
17305  allocator->FreeMemory(
17306  1, // allocationCount
17307  pAllocation);
17308  *pAllocation = VK_NULL_HANDLE;
17309  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17310  *pBuffer = VK_NULL_HANDLE;
17311  return res;
17312  }
17313  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17314  *pBuffer = VK_NULL_HANDLE;
17315  return res;
17316  }
17317  return res;
17318 }
17319 
17321  VmaAllocator allocator,
17322  VkBuffer buffer,
17323  VmaAllocation allocation)
17324 {
17325  VMA_ASSERT(allocator);
17326 
17327  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17328  {
17329  return;
17330  }
17331 
17332  VMA_DEBUG_LOG("vmaDestroyBuffer");
17333 
17334  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17335 
17336 #if VMA_RECORDING_ENABLED
17337  if(allocator->GetRecorder() != VMA_NULL)
17338  {
17339  allocator->GetRecorder()->RecordDestroyBuffer(
17340  allocator->GetCurrentFrameIndex(),
17341  allocation);
17342  }
17343 #endif
17344 
17345  if(buffer != VK_NULL_HANDLE)
17346  {
17347  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17348  }
17349 
17350  if(allocation != VK_NULL_HANDLE)
17351  {
17352  allocator->FreeMemory(
17353  1, // allocationCount
17354  &allocation);
17355  }
17356 }
17357 
17359  VmaAllocator allocator,
17360  const VkImageCreateInfo* pImageCreateInfo,
17361  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17362  VkImage* pImage,
17363  VmaAllocation* pAllocation,
17364  VmaAllocationInfo* pAllocationInfo)
17365 {
17366  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17367 
17368  if(pImageCreateInfo->extent.width == 0 ||
17369  pImageCreateInfo->extent.height == 0 ||
17370  pImageCreateInfo->extent.depth == 0 ||
17371  pImageCreateInfo->mipLevels == 0 ||
17372  pImageCreateInfo->arrayLayers == 0)
17373  {
17374  return VK_ERROR_VALIDATION_FAILED_EXT;
17375  }
17376 
17377  VMA_DEBUG_LOG("vmaCreateImage");
17378 
17379  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17380 
17381  *pImage = VK_NULL_HANDLE;
17382  *pAllocation = VK_NULL_HANDLE;
17383 
17384  // 1. Create VkImage.
17385  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17386  allocator->m_hDevice,
17387  pImageCreateInfo,
17388  allocator->GetAllocationCallbacks(),
17389  pImage);
17390  if(res >= 0)
17391  {
17392  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17393  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17394  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17395 
17396  // 2. Allocate memory using allocator.
17397  VkMemoryRequirements vkMemReq = {};
17398  bool requiresDedicatedAllocation = false;
17399  bool prefersDedicatedAllocation = false;
17400  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17401  requiresDedicatedAllocation, prefersDedicatedAllocation);
17402 
17403  res = allocator->AllocateMemory(
17404  vkMemReq,
17405  requiresDedicatedAllocation,
17406  prefersDedicatedAllocation,
17407  VK_NULL_HANDLE, // dedicatedBuffer
17408  *pImage, // dedicatedImage
17409  *pAllocationCreateInfo,
17410  suballocType,
17411  1, // allocationCount
17412  pAllocation);
17413 
17414 #if VMA_RECORDING_ENABLED
17415  if(allocator->GetRecorder() != VMA_NULL)
17416  {
17417  allocator->GetRecorder()->RecordCreateImage(
17418  allocator->GetCurrentFrameIndex(),
17419  *pImageCreateInfo,
17420  *pAllocationCreateInfo,
17421  *pAllocation);
17422  }
17423 #endif
17424 
17425  if(res >= 0)
17426  {
17427  // 3. Bind image with memory.
17428  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17429  {
17430  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17431  }
17432  if(res >= 0)
17433  {
17434  // All steps succeeded.
17435  #if VMA_STATS_STRING_ENABLED
17436  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17437  #endif
17438  if(pAllocationInfo != VMA_NULL)
17439  {
17440  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17441  }
17442 
17443  return VK_SUCCESS;
17444  }
17445  allocator->FreeMemory(
17446  1, // allocationCount
17447  pAllocation);
17448  *pAllocation = VK_NULL_HANDLE;
17449  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17450  *pImage = VK_NULL_HANDLE;
17451  return res;
17452  }
17453  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17454  *pImage = VK_NULL_HANDLE;
17455  return res;
17456  }
17457  return res;
17458 }
17459 
17461  VmaAllocator allocator,
17462  VkImage image,
17463  VmaAllocation allocation)
17464 {
17465  VMA_ASSERT(allocator);
17466 
17467  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17468  {
17469  return;
17470  }
17471 
17472  VMA_DEBUG_LOG("vmaDestroyImage");
17473 
17474  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17475 
17476 #if VMA_RECORDING_ENABLED
17477  if(allocator->GetRecorder() != VMA_NULL)
17478  {
17479  allocator->GetRecorder()->RecordDestroyImage(
17480  allocator->GetCurrentFrameIndex(),
17481  allocation);
17482  }
17483 #endif
17484 
17485  if(image != VK_NULL_HANDLE)
17486  {
17487  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17488  }
17489  if(allocation != VK_NULL_HANDLE)
17490  {
17491  allocator->FreeMemory(
17492  1, // allocationCount
17493  &allocation);
17494  }
17495 }
17496 
17497 #endif // #ifdef VMA_IMPLEMENTATION
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1961
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1919
vmaFindMemoryTypeIndexForBufferInfo
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1851
vmaGetPoolStats
VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1956
vmaCreateLostAllocation
VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1917
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1828
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1982
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
vmaFreeMemory
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
vmaCalculateStats
VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2530
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3046
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2008
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:1906
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2149
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2602
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2359
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1860
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2608
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2339
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1948
vmaCreateImage
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaAllocatorCreateInfo
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1847
VMA_CALL_POST
#define VMA_CALL_POST
Definition: vk_mem_alloc.h:1813
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2326
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1988
VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1954
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1922
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2724
vmaAllocateMemoryPages
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
vmaSetAllocationUserData
VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2117
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2433
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2123
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1921
vmaDestroyAllocator
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
vmaCheckCorruption
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1765
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2370
vmaGetMemoryProperties
VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2171
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1933
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1976
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2300
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3032
vmaMapMemory
VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2621
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2363
vmaTouchAllocation
VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1882
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3126
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2354
vmaBindImageMemory
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2229
vmaCreateBuffer
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2113
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2558
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:2570
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3064
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:2611
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1910
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:1908
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3031
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:2729
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2282
vmaDestroyPool
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1918
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2551
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2426
vmaDefragment
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2131
vmaBindBufferMemory
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2289
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:1894
vmaFreeMemoryPages
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3043
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1926
vmaInvalidateAllocation
VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3095
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3130
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2605
vmaUnmapMemory
VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1920
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1958
VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2261
VmaDefragmentationInfo2::pPools
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3080
VmaAllocation
Represents single memory allocation.
vmaDestroyBuffer
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2052
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1925
vmaGetPhysicalDeviceProperties
VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
vmaGetBudget
VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1927
vmaCreateAllocator
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaFreeStatsString
VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3109
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1834
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2127
vmaFindMemoryTypeIndex
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2575
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2110
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3134
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_CALL_PRE
#define VMA_CALL_PRE
Definition: vk_mem_alloc.h:1810
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2251
vmaDefragmentationEnd
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2121
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2119
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1991
vmaMakePoolAllocationsLost
VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1855
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2583
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2555
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1849
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaPool
Represents custom memory pool.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2277
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2333
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2561
VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2278
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2115
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1924
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3040
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3121
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2153
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2549
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2407
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2380
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2122
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1994
VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2384
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1985
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
vmaGetMemoryTypeProperties
VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2033
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2182
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1916
vmaBindBufferMemory2
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2743
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1979
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3034
vmaFlushAllocation
VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1931
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2123
VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2268
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2377
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2374
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2402
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2122
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2734
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1964
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1923
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2748
vmaAllocateMemoryForBuffer
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2513
vmaAllocateMemory
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2130
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1.
Definition: vk_mem_alloc.h:2045
vmaSetCurrentFrameIndex
VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2624
vmaFindMemoryTypeIndexForImageInfo
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaDefragmentationBegin
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2391
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2129
VmaAllocatorCreateInfo::instance
VkInstance instance
Optional handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2057
vmaCreatePool
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2234
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3100
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1930
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2597
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1932
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3085
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2715
vmaDestroyImage
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2313
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1929
vmaCheckPoolCorruption
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2614
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2495
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3132
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2123
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2344
vmaAllocateMemoryForImage
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
vmaBuildStatsString
VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2396
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1928
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2393
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2412
vmaBindImageMemory2
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
vmaResizeAllocation
VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3061
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3136
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2420
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3138
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1972
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2122
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2710
vmaGetAllocationInfo
VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2541
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2161
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2389
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VmaDefragmentationInfo2::pAllocations
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3055
VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2545
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3090
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3116
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2350