Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1460 /*
1461 Define this macro to 0/1 to disable/enable support for recording functionality,
1462 available through VmaAllocatorCreateInfo::pRecordSettings.
1463 */
1464 #ifndef VMA_RECORDING_ENABLED
1465  #ifdef _WIN32
1466  #define VMA_RECORDING_ENABLED 1
1467  #else
1468  #define VMA_RECORDING_ENABLED 0
1469  #endif
1470 #endif
1471 
1472 #ifndef NOMINMAX
1473  #define NOMINMAX // For windows.h
1474 #endif
1475 
1476 #include <vulkan/vulkan.h>
1477 
1478 #if VMA_RECORDING_ENABLED
1479  #include <windows.h>
1480 #endif
1481 
1482 #if !defined(VMA_DEDICATED_ALLOCATION)
1483  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1484  #define VMA_DEDICATED_ALLOCATION 1
1485  #else
1486  #define VMA_DEDICATED_ALLOCATION 0
1487  #endif
1488 #endif
1489 
1499 VK_DEFINE_HANDLE(VmaAllocator)
1500 
1501 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1503  VmaAllocator allocator,
1504  uint32_t memoryType,
1505  VkDeviceMemory memory,
1506  VkDeviceSize size);
1508 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1509  VmaAllocator allocator,
1510  uint32_t memoryType,
1511  VkDeviceMemory memory,
1512  VkDeviceSize size);
1513 
1527 
1557 
1560 typedef VkFlags VmaAllocatorCreateFlags;
1561 
1566 typedef struct VmaVulkanFunctions {
1567  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1568  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1569  PFN_vkAllocateMemory vkAllocateMemory;
1570  PFN_vkFreeMemory vkFreeMemory;
1571  PFN_vkMapMemory vkMapMemory;
1572  PFN_vkUnmapMemory vkUnmapMemory;
1573  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1574  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1575  PFN_vkBindBufferMemory vkBindBufferMemory;
1576  PFN_vkBindImageMemory vkBindImageMemory;
1577  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1578  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1579  PFN_vkCreateBuffer vkCreateBuffer;
1580  PFN_vkDestroyBuffer vkDestroyBuffer;
1581  PFN_vkCreateImage vkCreateImage;
1582  PFN_vkDestroyImage vkDestroyImage;
1583 #if VMA_DEDICATED_ALLOCATION
1584  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1585  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1586 #endif
1588 
1590 typedef enum VmaRecordFlagBits {
1597 
1600 typedef VkFlags VmaRecordFlags;
1601 
1603 typedef struct VmaRecordSettings
1604 {
1614  const char* pFilePath;
1616 
1619 {
1623 
1624  VkPhysicalDevice physicalDevice;
1626 
1627  VkDevice device;
1629 
1632 
1633  const VkAllocationCallbacks* pAllocationCallbacks;
1635 
1674  const VkDeviceSize* pHeapSizeLimit;
1695 
1697 VkResult vmaCreateAllocator(
1698  const VmaAllocatorCreateInfo* pCreateInfo,
1699  VmaAllocator* pAllocator);
1700 
1702 void vmaDestroyAllocator(
1703  VmaAllocator allocator);
1704 
1710  VmaAllocator allocator,
1711  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1712 
1718  VmaAllocator allocator,
1719  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1720 
1728  VmaAllocator allocator,
1729  uint32_t memoryTypeIndex,
1730  VkMemoryPropertyFlags* pFlags);
1731 
1741  VmaAllocator allocator,
1742  uint32_t frameIndex);
1743 
1746 typedef struct VmaStatInfo
1747 {
1749  uint32_t blockCount;
1755  VkDeviceSize usedBytes;
1757  VkDeviceSize unusedBytes;
1760 } VmaStatInfo;
1761 
1763 typedef struct VmaStats
1764 {
1765  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1766  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1768 } VmaStats;
1769 
1771 void vmaCalculateStats(
1772  VmaAllocator allocator,
1773  VmaStats* pStats);
1774 
1775 #define VMA_STATS_STRING_ENABLED 1
1776 
1777 #if VMA_STATS_STRING_ENABLED
1778 
1780 
1782 void vmaBuildStatsString(
1783  VmaAllocator allocator,
1784  char** ppStatsString,
1785  VkBool32 detailedMap);
1786 
1787 void vmaFreeStatsString(
1788  VmaAllocator allocator,
1789  char* pStatsString);
1790 
1791 #endif // #if VMA_STATS_STRING_ENABLED
1792 
1801 VK_DEFINE_HANDLE(VmaPool)
1802 
1803 typedef enum VmaMemoryUsage
1804 {
1853 } VmaMemoryUsage;
1854 
1869 
1924 
1937 
1947 
1954 
1958 
1960 {
1973  VkMemoryPropertyFlags requiredFlags;
1978  VkMemoryPropertyFlags preferredFlags;
1986  uint32_t memoryTypeBits;
1999  void* pUserData;
2001 
2018 VkResult vmaFindMemoryTypeIndex(
2019  VmaAllocator allocator,
2020  uint32_t memoryTypeBits,
2021  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2022  uint32_t* pMemoryTypeIndex);
2023 
2037  VmaAllocator allocator,
2038  const VkBufferCreateInfo* pBufferCreateInfo,
2039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2040  uint32_t* pMemoryTypeIndex);
2041 
2055  VmaAllocator allocator,
2056  const VkImageCreateInfo* pImageCreateInfo,
2057  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2058  uint32_t* pMemoryTypeIndex);
2059 
2080 
2097 
2108 
2114 
2117 typedef VkFlags VmaPoolCreateFlags;
2118 
2121 typedef struct VmaPoolCreateInfo {
2136  VkDeviceSize blockSize;
2165 
2168 typedef struct VmaPoolStats {
2171  VkDeviceSize size;
2174  VkDeviceSize unusedSize;
2187  VkDeviceSize unusedRangeSizeMax;
2190  size_t blockCount;
2191 } VmaPoolStats;
2192 
2199 VkResult vmaCreatePool(
2200  VmaAllocator allocator,
2201  const VmaPoolCreateInfo* pCreateInfo,
2202  VmaPool* pPool);
2203 
2206 void vmaDestroyPool(
2207  VmaAllocator allocator,
2208  VmaPool pool);
2209 
2216 void vmaGetPoolStats(
2217  VmaAllocator allocator,
2218  VmaPool pool,
2219  VmaPoolStats* pPoolStats);
2220 
2228  VmaAllocator allocator,
2229  VmaPool pool,
2230  size_t* pLostAllocationCount);
2231 
2246 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2247 
2272 VK_DEFINE_HANDLE(VmaAllocation)
2273 
2274 
2276 typedef struct VmaAllocationInfo {
2281  uint32_t memoryType;
2290  VkDeviceMemory deviceMemory;
2295  VkDeviceSize offset;
2300  VkDeviceSize size;
2314  void* pUserData;
2316 
2327 VkResult vmaAllocateMemory(
2328  VmaAllocator allocator,
2329  const VkMemoryRequirements* pVkMemoryRequirements,
2330  const VmaAllocationCreateInfo* pCreateInfo,
2331  VmaAllocation* pAllocation,
2332  VmaAllocationInfo* pAllocationInfo);
2333 
2341  VmaAllocator allocator,
2342  VkBuffer buffer,
2343  const VmaAllocationCreateInfo* pCreateInfo,
2344  VmaAllocation* pAllocation,
2345  VmaAllocationInfo* pAllocationInfo);
2346 
2348 VkResult vmaAllocateMemoryForImage(
2349  VmaAllocator allocator,
2350  VkImage image,
2351  const VmaAllocationCreateInfo* pCreateInfo,
2352  VmaAllocation* pAllocation,
2353  VmaAllocationInfo* pAllocationInfo);
2354 
2356 void vmaFreeMemory(
2357  VmaAllocator allocator,
2358  VmaAllocation allocation);
2359 
2377  VmaAllocator allocator,
2378  VmaAllocation allocation,
2379  VmaAllocationInfo* pAllocationInfo);
2380 
2395 VkBool32 vmaTouchAllocation(
2396  VmaAllocator allocator,
2397  VmaAllocation allocation);
2398 
2413  VmaAllocator allocator,
2414  VmaAllocation allocation,
2415  void* pUserData);
2416 
2428  VmaAllocator allocator,
2429  VmaAllocation* pAllocation);
2430 
2465 VkResult vmaMapMemory(
2466  VmaAllocator allocator,
2467  VmaAllocation allocation,
2468  void** ppData);
2469 
2474 void vmaUnmapMemory(
2475  VmaAllocator allocator,
2476  VmaAllocation allocation);
2477 
2490 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2491 
2504 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2505 
2522 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2523 
2530 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2531 
2532 typedef enum VmaDefragmentationFlagBits {
2538 
2555 
2558 typedef VkFlags VmaDefragmentationFlags;
2559 
2564 typedef struct VmaDefragmentationInfo2 {
2587  VkDeviceSize maxCpuBytesToMove;
2597  VkDeviceSize maxGpuBytesToMove;
2604 
2609 typedef struct VmaDefragmentationInfo {
2614  VkDeviceSize maxBytesToMove;
2621 
2623 typedef struct VmaDefragmentationStats {
2625  VkDeviceSize bytesMoved;
2627  VkDeviceSize bytesFreed;
2635 
2661 VkResult vmaDefragmentationBegin(
2662  VmaAllocator allocator,
2663  const VmaDefragmentationInfo2* pInfo,
2664  VmaDefragmentationStats* pStats,
2665  VmaDefragmentationContext *pContext);
2666 
2672 VkResult vmaDefragmentationEnd(
2673  VmaAllocator allocator,
2674  VmaDefragmentationContext context);
2675 
2716 VkResult vmaDefragment(
2717  VmaAllocator allocator,
2718  VmaAllocation* pAllocations,
2719  size_t allocationCount,
2720  VkBool32* pAllocationsChanged,
2721  const VmaDefragmentationInfo *pDefragmentationInfo,
2722  VmaDefragmentationStats* pDefragmentationStats);
2723 
2736 VkResult vmaBindBufferMemory(
2737  VmaAllocator allocator,
2738  VmaAllocation allocation,
2739  VkBuffer buffer);
2740 
2753 VkResult vmaBindImageMemory(
2754  VmaAllocator allocator,
2755  VmaAllocation allocation,
2756  VkImage image);
2757 
2784 VkResult vmaCreateBuffer(
2785  VmaAllocator allocator,
2786  const VkBufferCreateInfo* pBufferCreateInfo,
2787  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2788  VkBuffer* pBuffer,
2789  VmaAllocation* pAllocation,
2790  VmaAllocationInfo* pAllocationInfo);
2791 
2803 void vmaDestroyBuffer(
2804  VmaAllocator allocator,
2805  VkBuffer buffer,
2806  VmaAllocation allocation);
2807 
2809 VkResult vmaCreateImage(
2810  VmaAllocator allocator,
2811  const VkImageCreateInfo* pImageCreateInfo,
2812  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2813  VkImage* pImage,
2814  VmaAllocation* pAllocation,
2815  VmaAllocationInfo* pAllocationInfo);
2816 
2828 void vmaDestroyImage(
2829  VmaAllocator allocator,
2830  VkImage image,
2831  VmaAllocation allocation);
2832 
2833 #ifdef __cplusplus
2834 }
2835 #endif
2836 
2837 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2838 
2839 // For Visual Studio IntelliSense.
2840 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2841 #define VMA_IMPLEMENTATION
2842 #endif
2843 
2844 #ifdef VMA_IMPLEMENTATION
2845 #undef VMA_IMPLEMENTATION
2846 
2847 #include <cstdint>
2848 #include <cstdlib>
2849 #include <cstring>
2850 
2851 /*******************************************************************************
2852 CONFIGURATION SECTION
2853 
2854 Define some of these macros before each #include of this header or change them
2855 here if you need other then default behavior depending on your environment.
2856 */
2857 
2858 /*
2859 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2860 internally, like:
2861 
2862  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2863 
2864 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2865 VmaAllocatorCreateInfo::pVulkanFunctions.
2866 */
2867 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2868 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2869 #endif
2870 
2871 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2872 //#define VMA_USE_STL_CONTAINERS 1
2873 
2874 /* Set this macro to 1 to make the library including and using STL containers:
2875 std::pair, std::vector, std::list, std::unordered_map.
2876 
2877 Set it to 0 or undefined to make the library using its own implementation of
2878 the containers.
2879 */
2880 #if VMA_USE_STL_CONTAINERS
2881  #define VMA_USE_STL_VECTOR 1
2882  #define VMA_USE_STL_UNORDERED_MAP 1
2883  #define VMA_USE_STL_LIST 1
2884 #endif
2885 
2886 #if VMA_USE_STL_VECTOR
2887  #include <vector>
2888 #endif
2889 
2890 #if VMA_USE_STL_UNORDERED_MAP
2891  #include <unordered_map>
2892 #endif
2893 
2894 #if VMA_USE_STL_LIST
2895  #include <list>
2896 #endif
2897 
2898 /*
2899 Following headers are used in this CONFIGURATION section only, so feel free to
2900 remove them if not needed.
2901 */
2902 #include <cassert> // for assert
2903 #include <algorithm> // for min, max
2904 #include <mutex> // for std::mutex
2905 #include <atomic> // for std::atomic
2906 
2907 #ifndef VMA_NULL
2908  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2909  #define VMA_NULL nullptr
2910 #endif
2911 
2912 #if defined(__APPLE__) || defined(__ANDROID__)
2913 #include <cstdlib>
2914 void *aligned_alloc(size_t alignment, size_t size)
2915 {
2916  // alignment must be >= sizeof(void*)
2917  if(alignment < sizeof(void*))
2918  {
2919  alignment = sizeof(void*);
2920  }
2921 
2922  void *pointer;
2923  if(posix_memalign(&pointer, alignment, size) == 0)
2924  return pointer;
2925  return VMA_NULL;
2926 }
2927 #endif
2928 
2929 // If your compiler is not compatible with C++11 and definition of
2930 // aligned_alloc() function is missing, uncommeting following line may help:
2931 
2932 //#include <malloc.h>
2933 
2934 // Normal assert to check for programmer's errors, especially in Debug configuration.
2935 #ifndef VMA_ASSERT
2936  #ifdef _DEBUG
2937  #define VMA_ASSERT(expr) assert(expr)
2938  #else
2939  #define VMA_ASSERT(expr)
2940  #endif
2941 #endif
2942 
2943 // Assert that will be called very often, like inside data structures e.g. operator[].
2944 // Making it non-empty can make program slow.
2945 #ifndef VMA_HEAVY_ASSERT
2946  #ifdef _DEBUG
2947  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2948  #else
2949  #define VMA_HEAVY_ASSERT(expr)
2950  #endif
2951 #endif
2952 
2953 #ifndef VMA_ALIGN_OF
2954  #define VMA_ALIGN_OF(type) (__alignof(type))
2955 #endif
2956 
2957 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2958  #if defined(_WIN32)
2959  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2960  #else
2961  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2962  #endif
2963 #endif
2964 
2965 #ifndef VMA_SYSTEM_FREE
2966  #if defined(_WIN32)
2967  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2968  #else
2969  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2970  #endif
2971 #endif
2972 
2973 #ifndef VMA_MIN
2974  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2975 #endif
2976 
2977 #ifndef VMA_MAX
2978  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2979 #endif
2980 
2981 #ifndef VMA_SWAP
2982  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2983 #endif
2984 
2985 #ifndef VMA_SORT
2986  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2987 #endif
2988 
2989 #ifndef VMA_DEBUG_LOG
2990  #define VMA_DEBUG_LOG(format, ...)
2991  /*
2992  #define VMA_DEBUG_LOG(format, ...) do { \
2993  printf(format, __VA_ARGS__); \
2994  printf("\n"); \
2995  } while(false)
2996  */
2997 #endif
2998 
2999 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3000 #if VMA_STATS_STRING_ENABLED
3001  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3002  {
3003  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3004  }
3005  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3006  {
3007  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3008  }
3009  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3010  {
3011  snprintf(outStr, strLen, "%p", ptr);
3012  }
3013 #endif
3014 
3015 #ifndef VMA_MUTEX
3016  class VmaMutex
3017  {
3018  public:
3019  VmaMutex() { }
3020  ~VmaMutex() { }
3021  void Lock() { m_Mutex.lock(); }
3022  void Unlock() { m_Mutex.unlock(); }
3023  private:
3024  std::mutex m_Mutex;
3025  };
3026  #define VMA_MUTEX VmaMutex
3027 #endif
3028 
3029 /*
3030 If providing your own implementation, you need to implement a subset of std::atomic:
3031 
3032 - Constructor(uint32_t desired)
3033 - uint32_t load() const
3034 - void store(uint32_t desired)
3035 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3036 */
3037 #ifndef VMA_ATOMIC_UINT32
3038  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3039 #endif
3040 
3041 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3042 
3046  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3047 #endif
3048 
3049 #ifndef VMA_DEBUG_ALIGNMENT
3050 
3054  #define VMA_DEBUG_ALIGNMENT (1)
3055 #endif
3056 
3057 #ifndef VMA_DEBUG_MARGIN
3058 
3062  #define VMA_DEBUG_MARGIN (0)
3063 #endif
3064 
3065 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3066 
3070  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3071 #endif
3072 
3073 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3074 
3079  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3080 #endif
3081 
3082 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3083 
3087  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3088 #endif
3089 
3090 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3091 
3095  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3096 #endif
3097 
3098 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3099  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3101 #endif
3102 
3103 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3104  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3106 #endif
3107 
3108 #ifndef VMA_CLASS_NO_COPY
3109  #define VMA_CLASS_NO_COPY(className) \
3110  private: \
3111  className(const className&) = delete; \
3112  className& operator=(const className&) = delete;
3113 #endif
3114 
3115 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3116 
3117 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3118 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3119 
3120 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3121 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3122 
3123 /*******************************************************************************
3124 END OF CONFIGURATION
3125 */
3126 
3127 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3128  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3129 
3130 // Returns number of bits set to 1 in (v).
3131 static inline uint32_t VmaCountBitsSet(uint32_t v)
3132 {
3133  uint32_t c = v - ((v >> 1) & 0x55555555);
3134  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3135  c = ((c >> 4) + c) & 0x0F0F0F0F;
3136  c = ((c >> 8) + c) & 0x00FF00FF;
3137  c = ((c >> 16) + c) & 0x0000FFFF;
3138  return c;
3139 }
3140 
3141 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3142 // Use types like uint32_t, uint64_t as T.
3143 template <typename T>
3144 static inline T VmaAlignUp(T val, T align)
3145 {
3146  return (val + align - 1) / align * align;
3147 }
3148 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3149 // Use types like uint32_t, uint64_t as T.
3150 template <typename T>
3151 static inline T VmaAlignDown(T val, T align)
3152 {
3153  return val / align * align;
3154 }
3155 
3156 // Division with mathematical rounding to nearest number.
3157 template <typename T>
3158 static inline T VmaRoundDiv(T x, T y)
3159 {
3160  return (x + (y / (T)2)) / y;
3161 }
3162 
3163 /*
3164 Returns true if given number is a power of two.
3165 T must be unsigned integer number or signed integer but always nonnegative.
3166 For 0 returns true.
3167 */
3168 template <typename T>
3169 inline bool VmaIsPow2(T x)
3170 {
3171  return (x & (x-1)) == 0;
3172 }
3173 
3174 // Returns smallest power of 2 greater or equal to v.
3175 static inline uint32_t VmaNextPow2(uint32_t v)
3176 {
3177  v--;
3178  v |= v >> 1;
3179  v |= v >> 2;
3180  v |= v >> 4;
3181  v |= v >> 8;
3182  v |= v >> 16;
3183  v++;
3184  return v;
3185 }
3186 static inline uint64_t VmaNextPow2(uint64_t v)
3187 {
3188  v--;
3189  v |= v >> 1;
3190  v |= v >> 2;
3191  v |= v >> 4;
3192  v |= v >> 8;
3193  v |= v >> 16;
3194  v |= v >> 32;
3195  v++;
3196  return v;
3197 }
3198 
3199 // Returns largest power of 2 less or equal to v.
3200 static inline uint32_t VmaPrevPow2(uint32_t v)
3201 {
3202  v |= v >> 1;
3203  v |= v >> 2;
3204  v |= v >> 4;
3205  v |= v >> 8;
3206  v |= v >> 16;
3207  v = v ^ (v >> 1);
3208  return v;
3209 }
3210 static inline uint64_t VmaPrevPow2(uint64_t v)
3211 {
3212  v |= v >> 1;
3213  v |= v >> 2;
3214  v |= v >> 4;
3215  v |= v >> 8;
3216  v |= v >> 16;
3217  v |= v >> 32;
3218  v = v ^ (v >> 1);
3219  return v;
3220 }
3221 
3222 static inline bool VmaStrIsEmpty(const char* pStr)
3223 {
3224  return pStr == VMA_NULL || *pStr == '\0';
3225 }
3226 
3227 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3228 {
3229  switch(algorithm)
3230  {
3232  return "Linear";
3234  return "Buddy";
3235  case 0:
3236  return "Default";
3237  default:
3238  VMA_ASSERT(0);
3239  return "";
3240  }
3241 }
3242 
3243 #ifndef VMA_SORT
3244 
3245 template<typename Iterator, typename Compare>
3246 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3247 {
3248  Iterator centerValue = end; --centerValue;
3249  Iterator insertIndex = beg;
3250  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3251  {
3252  if(cmp(*memTypeIndex, *centerValue))
3253  {
3254  if(insertIndex != memTypeIndex)
3255  {
3256  VMA_SWAP(*memTypeIndex, *insertIndex);
3257  }
3258  ++insertIndex;
3259  }
3260  }
3261  if(insertIndex != centerValue)
3262  {
3263  VMA_SWAP(*insertIndex, *centerValue);
3264  }
3265  return insertIndex;
3266 }
3267 
3268 template<typename Iterator, typename Compare>
3269 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3270 {
3271  if(beg < end)
3272  {
3273  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3274  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3275  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3276  }
3277 }
3278 
3279 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3280 
3281 #endif // #ifndef VMA_SORT
3282 
3283 /*
3284 Returns true if two memory blocks occupy overlapping pages.
3285 ResourceA must be in less memory offset than ResourceB.
3286 
3287 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3288 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3289 */
3290 static inline bool VmaBlocksOnSamePage(
3291  VkDeviceSize resourceAOffset,
3292  VkDeviceSize resourceASize,
3293  VkDeviceSize resourceBOffset,
3294  VkDeviceSize pageSize)
3295 {
3296  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3297  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3298  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3299  VkDeviceSize resourceBStart = resourceBOffset;
3300  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3301  return resourceAEndPage == resourceBStartPage;
3302 }
3303 
3304 enum VmaSuballocationType
3305 {
3306  VMA_SUBALLOCATION_TYPE_FREE = 0,
3307  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3308  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3309  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3310  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3311  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3312  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3313 };
3314 
3315 /*
3316 Returns true if given suballocation types could conflict and must respect
3317 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3318 or linear image and another one is optimal image. If type is unknown, behave
3319 conservatively.
3320 */
3321 static inline bool VmaIsBufferImageGranularityConflict(
3322  VmaSuballocationType suballocType1,
3323  VmaSuballocationType suballocType2)
3324 {
3325  if(suballocType1 > suballocType2)
3326  {
3327  VMA_SWAP(suballocType1, suballocType2);
3328  }
3329 
3330  switch(suballocType1)
3331  {
3332  case VMA_SUBALLOCATION_TYPE_FREE:
3333  return false;
3334  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3335  return true;
3336  case VMA_SUBALLOCATION_TYPE_BUFFER:
3337  return
3338  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3339  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3340  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3341  return
3342  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3343  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3344  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3345  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3346  return
3347  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3348  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3349  return false;
3350  default:
3351  VMA_ASSERT(0);
3352  return true;
3353  }
3354 }
3355 
3356 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3357 {
3358  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3359  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3360  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3361  {
3362  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3363  }
3364 }
3365 
3366 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3367 {
3368  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3369  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3370  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3371  {
3372  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3373  {
3374  return false;
3375  }
3376  }
3377  return true;
3378 }
3379 
3380 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3381 struct VmaMutexLock
3382 {
3383  VMA_CLASS_NO_COPY(VmaMutexLock)
3384 public:
3385  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3386  m_pMutex(useMutex ? &mutex : VMA_NULL)
3387  {
3388  if(m_pMutex)
3389  {
3390  m_pMutex->Lock();
3391  }
3392  }
3393 
3394  ~VmaMutexLock()
3395  {
3396  if(m_pMutex)
3397  {
3398  m_pMutex->Unlock();
3399  }
3400  }
3401 
3402 private:
3403  VMA_MUTEX* m_pMutex;
3404 };
3405 
3406 #if VMA_DEBUG_GLOBAL_MUTEX
3407  static VMA_MUTEX gDebugGlobalMutex;
3408  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3409 #else
3410  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3411 #endif
3412 
3413 // Minimum size of a free suballocation to register it in the free suballocation collection.
3414 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3415 
3416 /*
3417 Performs binary search and returns iterator to first element that is greater or
3418 equal to (key), according to comparison (cmp).
3419 
3420 Cmp should return true if first argument is less than second argument.
3421 
3422 Returned value is the found element, if present in the collection or place where
3423 new element with value (key) should be inserted.
3424 */
3425 template <typename CmpLess, typename IterT, typename KeyT>
3426 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3427 {
3428  size_t down = 0, up = (end - beg);
3429  while(down < up)
3430  {
3431  const size_t mid = (down + up) / 2;
3432  if(cmp(*(beg+mid), key))
3433  {
3434  down = mid + 1;
3435  }
3436  else
3437  {
3438  up = mid;
3439  }
3440  }
3441  return beg + down;
3442 }
3443 
3445 // Memory allocation
3446 
3447 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3448 {
3449  if((pAllocationCallbacks != VMA_NULL) &&
3450  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3451  {
3452  return (*pAllocationCallbacks->pfnAllocation)(
3453  pAllocationCallbacks->pUserData,
3454  size,
3455  alignment,
3456  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3457  }
3458  else
3459  {
3460  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3461  }
3462 }
3463 
3464 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3465 {
3466  if((pAllocationCallbacks != VMA_NULL) &&
3467  (pAllocationCallbacks->pfnFree != VMA_NULL))
3468  {
3469  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3470  }
3471  else
3472  {
3473  VMA_SYSTEM_FREE(ptr);
3474  }
3475 }
3476 
3477 template<typename T>
3478 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3479 {
3480  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3481 }
3482 
3483 template<typename T>
3484 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3485 {
3486  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3487 }
3488 
3489 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3490 
3491 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3492 
3493 template<typename T>
3494 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3495 {
3496  ptr->~T();
3497  VmaFree(pAllocationCallbacks, ptr);
3498 }
3499 
3500 template<typename T>
3501 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3502 {
3503  if(ptr != VMA_NULL)
3504  {
3505  for(size_t i = count; i--; )
3506  {
3507  ptr[i].~T();
3508  }
3509  VmaFree(pAllocationCallbacks, ptr);
3510  }
3511 }
3512 
3513 // STL-compatible allocator.
3514 template<typename T>
3515 class VmaStlAllocator
3516 {
3517 public:
3518  const VkAllocationCallbacks* const m_pCallbacks;
3519  typedef T value_type;
3520 
3521  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3522  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3523 
3524  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3525  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3526 
3527  template<typename U>
3528  bool operator==(const VmaStlAllocator<U>& rhs) const
3529  {
3530  return m_pCallbacks == rhs.m_pCallbacks;
3531  }
3532  template<typename U>
3533  bool operator!=(const VmaStlAllocator<U>& rhs) const
3534  {
3535  return m_pCallbacks != rhs.m_pCallbacks;
3536  }
3537 
3538  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3539 };
3540 
3541 #if VMA_USE_STL_VECTOR
3542 
3543 #define VmaVector std::vector
3544 
3545 template<typename T, typename allocatorT>
3546 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3547 {
3548  vec.insert(vec.begin() + index, item);
3549 }
3550 
3551 template<typename T, typename allocatorT>
3552 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3553 {
3554  vec.erase(vec.begin() + index);
3555 }
3556 
3557 #else // #if VMA_USE_STL_VECTOR
3558 
3559 /* Class with interface compatible with subset of std::vector.
3560 T must be POD because constructors and destructors are not called and memcpy is
3561 used for these objects. */
3562 template<typename T, typename AllocatorT>
3563 class VmaVector
3564 {
3565 public:
3566  typedef T value_type;
3567 
3568  VmaVector(const AllocatorT& allocator) :
3569  m_Allocator(allocator),
3570  m_pArray(VMA_NULL),
3571  m_Count(0),
3572  m_Capacity(0)
3573  {
3574  }
3575 
3576  VmaVector(size_t count, const AllocatorT& allocator) :
3577  m_Allocator(allocator),
3578  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3579  m_Count(count),
3580  m_Capacity(count)
3581  {
3582  }
3583 
3584  VmaVector(const VmaVector<T, AllocatorT>& src) :
3585  m_Allocator(src.m_Allocator),
3586  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3587  m_Count(src.m_Count),
3588  m_Capacity(src.m_Count)
3589  {
3590  if(m_Count != 0)
3591  {
3592  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3593  }
3594  }
3595 
3596  ~VmaVector()
3597  {
3598  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3599  }
3600 
3601  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3602  {
3603  if(&rhs != this)
3604  {
3605  resize(rhs.m_Count);
3606  if(m_Count != 0)
3607  {
3608  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3609  }
3610  }
3611  return *this;
3612  }
3613 
3614  bool empty() const { return m_Count == 0; }
3615  size_t size() const { return m_Count; }
3616  T* data() { return m_pArray; }
3617  const T* data() const { return m_pArray; }
3618 
3619  T& operator[](size_t index)
3620  {
3621  VMA_HEAVY_ASSERT(index < m_Count);
3622  return m_pArray[index];
3623  }
3624  const T& operator[](size_t index) const
3625  {
3626  VMA_HEAVY_ASSERT(index < m_Count);
3627  return m_pArray[index];
3628  }
3629 
3630  T& front()
3631  {
3632  VMA_HEAVY_ASSERT(m_Count > 0);
3633  return m_pArray[0];
3634  }
3635  const T& front() const
3636  {
3637  VMA_HEAVY_ASSERT(m_Count > 0);
3638  return m_pArray[0];
3639  }
3640  T& back()
3641  {
3642  VMA_HEAVY_ASSERT(m_Count > 0);
3643  return m_pArray[m_Count - 1];
3644  }
3645  const T& back() const
3646  {
3647  VMA_HEAVY_ASSERT(m_Count > 0);
3648  return m_pArray[m_Count - 1];
3649  }
3650 
3651  void reserve(size_t newCapacity, bool freeMemory = false)
3652  {
3653  newCapacity = VMA_MAX(newCapacity, m_Count);
3654 
3655  if((newCapacity < m_Capacity) && !freeMemory)
3656  {
3657  newCapacity = m_Capacity;
3658  }
3659 
3660  if(newCapacity != m_Capacity)
3661  {
3662  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3663  if(m_Count != 0)
3664  {
3665  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3666  }
3667  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3668  m_Capacity = newCapacity;
3669  m_pArray = newArray;
3670  }
3671  }
3672 
3673  void resize(size_t newCount, bool freeMemory = false)
3674  {
3675  size_t newCapacity = m_Capacity;
3676  if(newCount > m_Capacity)
3677  {
3678  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3679  }
3680  else if(freeMemory)
3681  {
3682  newCapacity = newCount;
3683  }
3684 
3685  if(newCapacity != m_Capacity)
3686  {
3687  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3688  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3689  if(elementsToCopy != 0)
3690  {
3691  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3692  }
3693  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3694  m_Capacity = newCapacity;
3695  m_pArray = newArray;
3696  }
3697 
3698  m_Count = newCount;
3699  }
3700 
3701  void clear(bool freeMemory = false)
3702  {
3703  resize(0, freeMemory);
3704  }
3705 
3706  void insert(size_t index, const T& src)
3707  {
3708  VMA_HEAVY_ASSERT(index <= m_Count);
3709  const size_t oldCount = size();
3710  resize(oldCount + 1);
3711  if(index < oldCount)
3712  {
3713  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3714  }
3715  m_pArray[index] = src;
3716  }
3717 
3718  void remove(size_t index)
3719  {
3720  VMA_HEAVY_ASSERT(index < m_Count);
3721  const size_t oldCount = size();
3722  if(index < oldCount - 1)
3723  {
3724  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3725  }
3726  resize(oldCount - 1);
3727  }
3728 
3729  void push_back(const T& src)
3730  {
3731  const size_t newIndex = size();
3732  resize(newIndex + 1);
3733  m_pArray[newIndex] = src;
3734  }
3735 
3736  void pop_back()
3737  {
3738  VMA_HEAVY_ASSERT(m_Count > 0);
3739  resize(size() - 1);
3740  }
3741 
3742  void push_front(const T& src)
3743  {
3744  insert(0, src);
3745  }
3746 
3747  void pop_front()
3748  {
3749  VMA_HEAVY_ASSERT(m_Count > 0);
3750  remove(0);
3751  }
3752 
3753  typedef T* iterator;
3754 
3755  iterator begin() { return m_pArray; }
3756  iterator end() { return m_pArray + m_Count; }
3757 
3758 private:
3759  AllocatorT m_Allocator;
3760  T* m_pArray;
3761  size_t m_Count;
3762  size_t m_Capacity;
3763 };
3764 
3765 template<typename T, typename allocatorT>
3766 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3767 {
3768  vec.insert(index, item);
3769 }
3770 
3771 template<typename T, typename allocatorT>
3772 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3773 {
3774  vec.remove(index);
3775 }
3776 
3777 #endif // #if VMA_USE_STL_VECTOR
3778 
3779 template<typename CmpLess, typename VectorT>
3780 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3781 {
3782  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3783  vector.data(),
3784  vector.data() + vector.size(),
3785  value,
3786  CmpLess()) - vector.data();
3787  VmaVectorInsert(vector, indexToInsert, value);
3788  return indexToInsert;
3789 }
3790 
3791 template<typename CmpLess, typename VectorT>
3792 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3793 {
3794  CmpLess comparator;
3795  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3796  vector.begin(),
3797  vector.end(),
3798  value,
3799  comparator);
3800  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3801  {
3802  size_t indexToRemove = it - vector.begin();
3803  VmaVectorRemove(vector, indexToRemove);
3804  return true;
3805  }
3806  return false;
3807 }
3808 
3809 template<typename CmpLess, typename IterT, typename KeyT>
3810 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3811 {
3812  CmpLess comparator;
3813  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3814  beg, end, value, comparator);
3815  if(it == end ||
3816  (!comparator(*it, value) && !comparator(value, *it)))
3817  {
3818  return it;
3819  }
3820  return end;
3821 }
3822 
3824 // class VmaPoolAllocator
3825 
3826 /*
3827 Allocator for objects of type T using a list of arrays (pools) to speed up
3828 allocation. Number of elements that can be allocated is not bounded because
3829 allocator can create multiple blocks.
3830 */
3831 template<typename T>
3832 class VmaPoolAllocator
3833 {
3834  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3835 public:
3836  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3837  ~VmaPoolAllocator();
3838  void Clear();
3839  T* Alloc();
3840  void Free(T* ptr);
3841 
3842 private:
3843  union Item
3844  {
3845  uint32_t NextFreeIndex;
3846  T Value;
3847  };
3848 
3849  struct ItemBlock
3850  {
3851  Item* pItems;
3852  uint32_t FirstFreeIndex;
3853  };
3854 
3855  const VkAllocationCallbacks* m_pAllocationCallbacks;
3856  size_t m_ItemsPerBlock;
3857  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3858 
3859  ItemBlock& CreateNewBlock();
3860 };
3861 
3862 template<typename T>
3863 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3864  m_pAllocationCallbacks(pAllocationCallbacks),
3865  m_ItemsPerBlock(itemsPerBlock),
3866  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3867 {
3868  VMA_ASSERT(itemsPerBlock > 0);
3869 }
3870 
3871 template<typename T>
3872 VmaPoolAllocator<T>::~VmaPoolAllocator()
3873 {
3874  Clear();
3875 }
3876 
3877 template<typename T>
3878 void VmaPoolAllocator<T>::Clear()
3879 {
3880  for(size_t i = m_ItemBlocks.size(); i--; )
3881  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3882  m_ItemBlocks.clear();
3883 }
3884 
3885 template<typename T>
3886 T* VmaPoolAllocator<T>::Alloc()
3887 {
3888  for(size_t i = m_ItemBlocks.size(); i--; )
3889  {
3890  ItemBlock& block = m_ItemBlocks[i];
3891  // This block has some free items: Use first one.
3892  if(block.FirstFreeIndex != UINT32_MAX)
3893  {
3894  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3895  block.FirstFreeIndex = pItem->NextFreeIndex;
3896  return &pItem->Value;
3897  }
3898  }
3899 
3900  // No block has free item: Create new one and use it.
3901  ItemBlock& newBlock = CreateNewBlock();
3902  Item* const pItem = &newBlock.pItems[0];
3903  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3904  return &pItem->Value;
3905 }
3906 
3907 template<typename T>
3908 void VmaPoolAllocator<T>::Free(T* ptr)
3909 {
3910  // Search all memory blocks to find ptr.
3911  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3912  {
3913  ItemBlock& block = m_ItemBlocks[i];
3914 
3915  // Casting to union.
3916  Item* pItemPtr;
3917  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3918 
3919  // Check if pItemPtr is in address range of this block.
3920  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3921  {
3922  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3923  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3924  block.FirstFreeIndex = index;
3925  return;
3926  }
3927  }
3928  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3929 }
3930 
3931 template<typename T>
3932 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3933 {
3934  ItemBlock newBlock = {
3935  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3936 
3937  m_ItemBlocks.push_back(newBlock);
3938 
3939  // Setup singly-linked list of all free items in this block.
3940  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3941  newBlock.pItems[i].NextFreeIndex = i + 1;
3942  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3943  return m_ItemBlocks.back();
3944 }
3945 
3947 // class VmaRawList, VmaList
3948 
3949 #if VMA_USE_STL_LIST
3950 
3951 #define VmaList std::list
3952 
3953 #else // #if VMA_USE_STL_LIST
3954 
3955 template<typename T>
3956 struct VmaListItem
3957 {
3958  VmaListItem* pPrev;
3959  VmaListItem* pNext;
3960  T Value;
3961 };
3962 
3963 // Doubly linked list.
3964 template<typename T>
3965 class VmaRawList
3966 {
3967  VMA_CLASS_NO_COPY(VmaRawList)
3968 public:
3969  typedef VmaListItem<T> ItemType;
3970 
3971  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3972  ~VmaRawList();
3973  void Clear();
3974 
3975  size_t GetCount() const { return m_Count; }
3976  bool IsEmpty() const { return m_Count == 0; }
3977 
3978  ItemType* Front() { return m_pFront; }
3979  const ItemType* Front() const { return m_pFront; }
3980  ItemType* Back() { return m_pBack; }
3981  const ItemType* Back() const { return m_pBack; }
3982 
3983  ItemType* PushBack();
3984  ItemType* PushFront();
3985  ItemType* PushBack(const T& value);
3986  ItemType* PushFront(const T& value);
3987  void PopBack();
3988  void PopFront();
3989 
3990  // Item can be null - it means PushBack.
3991  ItemType* InsertBefore(ItemType* pItem);
3992  // Item can be null - it means PushFront.
3993  ItemType* InsertAfter(ItemType* pItem);
3994 
3995  ItemType* InsertBefore(ItemType* pItem, const T& value);
3996  ItemType* InsertAfter(ItemType* pItem, const T& value);
3997 
3998  void Remove(ItemType* pItem);
3999 
4000 private:
4001  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4002  VmaPoolAllocator<ItemType> m_ItemAllocator;
4003  ItemType* m_pFront;
4004  ItemType* m_pBack;
4005  size_t m_Count;
4006 };
4007 
4008 template<typename T>
4009 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4010  m_pAllocationCallbacks(pAllocationCallbacks),
4011  m_ItemAllocator(pAllocationCallbacks, 128),
4012  m_pFront(VMA_NULL),
4013  m_pBack(VMA_NULL),
4014  m_Count(0)
4015 {
4016 }
4017 
4018 template<typename T>
4019 VmaRawList<T>::~VmaRawList()
4020 {
4021  // Intentionally not calling Clear, because that would be unnecessary
4022  // computations to return all items to m_ItemAllocator as free.
4023 }
4024 
4025 template<typename T>
4026 void VmaRawList<T>::Clear()
4027 {
4028  if(IsEmpty() == false)
4029  {
4030  ItemType* pItem = m_pBack;
4031  while(pItem != VMA_NULL)
4032  {
4033  ItemType* const pPrevItem = pItem->pPrev;
4034  m_ItemAllocator.Free(pItem);
4035  pItem = pPrevItem;
4036  }
4037  m_pFront = VMA_NULL;
4038  m_pBack = VMA_NULL;
4039  m_Count = 0;
4040  }
4041 }
4042 
4043 template<typename T>
4044 VmaListItem<T>* VmaRawList<T>::PushBack()
4045 {
4046  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4047  pNewItem->pNext = VMA_NULL;
4048  if(IsEmpty())
4049  {
4050  pNewItem->pPrev = VMA_NULL;
4051  m_pFront = pNewItem;
4052  m_pBack = pNewItem;
4053  m_Count = 1;
4054  }
4055  else
4056  {
4057  pNewItem->pPrev = m_pBack;
4058  m_pBack->pNext = pNewItem;
4059  m_pBack = pNewItem;
4060  ++m_Count;
4061  }
4062  return pNewItem;
4063 }
4064 
4065 template<typename T>
4066 VmaListItem<T>* VmaRawList<T>::PushFront()
4067 {
4068  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4069  pNewItem->pPrev = VMA_NULL;
4070  if(IsEmpty())
4071  {
4072  pNewItem->pNext = VMA_NULL;
4073  m_pFront = pNewItem;
4074  m_pBack = pNewItem;
4075  m_Count = 1;
4076  }
4077  else
4078  {
4079  pNewItem->pNext = m_pFront;
4080  m_pFront->pPrev = pNewItem;
4081  m_pFront = pNewItem;
4082  ++m_Count;
4083  }
4084  return pNewItem;
4085 }
4086 
4087 template<typename T>
4088 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4089 {
4090  ItemType* const pNewItem = PushBack();
4091  pNewItem->Value = value;
4092  return pNewItem;
4093 }
4094 
4095 template<typename T>
4096 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4097 {
4098  ItemType* const pNewItem = PushFront();
4099  pNewItem->Value = value;
4100  return pNewItem;
4101 }
4102 
4103 template<typename T>
4104 void VmaRawList<T>::PopBack()
4105 {
4106  VMA_HEAVY_ASSERT(m_Count > 0);
4107  ItemType* const pBackItem = m_pBack;
4108  ItemType* const pPrevItem = pBackItem->pPrev;
4109  if(pPrevItem != VMA_NULL)
4110  {
4111  pPrevItem->pNext = VMA_NULL;
4112  }
4113  m_pBack = pPrevItem;
4114  m_ItemAllocator.Free(pBackItem);
4115  --m_Count;
4116 }
4117 
4118 template<typename T>
4119 void VmaRawList<T>::PopFront()
4120 {
4121  VMA_HEAVY_ASSERT(m_Count > 0);
4122  ItemType* const pFrontItem = m_pFront;
4123  ItemType* const pNextItem = pFrontItem->pNext;
4124  if(pNextItem != VMA_NULL)
4125  {
4126  pNextItem->pPrev = VMA_NULL;
4127  }
4128  m_pFront = pNextItem;
4129  m_ItemAllocator.Free(pFrontItem);
4130  --m_Count;
4131 }
4132 
4133 template<typename T>
4134 void VmaRawList<T>::Remove(ItemType* pItem)
4135 {
4136  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4137  VMA_HEAVY_ASSERT(m_Count > 0);
4138 
4139  if(pItem->pPrev != VMA_NULL)
4140  {
4141  pItem->pPrev->pNext = pItem->pNext;
4142  }
4143  else
4144  {
4145  VMA_HEAVY_ASSERT(m_pFront == pItem);
4146  m_pFront = pItem->pNext;
4147  }
4148 
4149  if(pItem->pNext != VMA_NULL)
4150  {
4151  pItem->pNext->pPrev = pItem->pPrev;
4152  }
4153  else
4154  {
4155  VMA_HEAVY_ASSERT(m_pBack == pItem);
4156  m_pBack = pItem->pPrev;
4157  }
4158 
4159  m_ItemAllocator.Free(pItem);
4160  --m_Count;
4161 }
4162 
4163 template<typename T>
4164 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4165 {
4166  if(pItem != VMA_NULL)
4167  {
4168  ItemType* const prevItem = pItem->pPrev;
4169  ItemType* const newItem = m_ItemAllocator.Alloc();
4170  newItem->pPrev = prevItem;
4171  newItem->pNext = pItem;
4172  pItem->pPrev = newItem;
4173  if(prevItem != VMA_NULL)
4174  {
4175  prevItem->pNext = newItem;
4176  }
4177  else
4178  {
4179  VMA_HEAVY_ASSERT(m_pFront == pItem);
4180  m_pFront = newItem;
4181  }
4182  ++m_Count;
4183  return newItem;
4184  }
4185  else
4186  return PushBack();
4187 }
4188 
4189 template<typename T>
4190 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4191 {
4192  if(pItem != VMA_NULL)
4193  {
4194  ItemType* const nextItem = pItem->pNext;
4195  ItemType* const newItem = m_ItemAllocator.Alloc();
4196  newItem->pNext = nextItem;
4197  newItem->pPrev = pItem;
4198  pItem->pNext = newItem;
4199  if(nextItem != VMA_NULL)
4200  {
4201  nextItem->pPrev = newItem;
4202  }
4203  else
4204  {
4205  VMA_HEAVY_ASSERT(m_pBack == pItem);
4206  m_pBack = newItem;
4207  }
4208  ++m_Count;
4209  return newItem;
4210  }
4211  else
4212  return PushFront();
4213 }
4214 
4215 template<typename T>
4216 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4217 {
4218  ItemType* const newItem = InsertBefore(pItem);
4219  newItem->Value = value;
4220  return newItem;
4221 }
4222 
4223 template<typename T>
4224 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4225 {
4226  ItemType* const newItem = InsertAfter(pItem);
4227  newItem->Value = value;
4228  return newItem;
4229 }
4230 
4231 template<typename T, typename AllocatorT>
4232 class VmaList
4233 {
4234  VMA_CLASS_NO_COPY(VmaList)
4235 public:
4236  class iterator
4237  {
4238  public:
4239  iterator() :
4240  m_pList(VMA_NULL),
4241  m_pItem(VMA_NULL)
4242  {
4243  }
4244 
4245  T& operator*() const
4246  {
4247  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4248  return m_pItem->Value;
4249  }
4250  T* operator->() const
4251  {
4252  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4253  return &m_pItem->Value;
4254  }
4255 
4256  iterator& operator++()
4257  {
4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4259  m_pItem = m_pItem->pNext;
4260  return *this;
4261  }
4262  iterator& operator--()
4263  {
4264  if(m_pItem != VMA_NULL)
4265  {
4266  m_pItem = m_pItem->pPrev;
4267  }
4268  else
4269  {
4270  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4271  m_pItem = m_pList->Back();
4272  }
4273  return *this;
4274  }
4275 
4276  iterator operator++(int)
4277  {
4278  iterator result = *this;
4279  ++*this;
4280  return result;
4281  }
4282  iterator operator--(int)
4283  {
4284  iterator result = *this;
4285  --*this;
4286  return result;
4287  }
4288 
4289  bool operator==(const iterator& rhs) const
4290  {
4291  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4292  return m_pItem == rhs.m_pItem;
4293  }
4294  bool operator!=(const iterator& rhs) const
4295  {
4296  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4297  return m_pItem != rhs.m_pItem;
4298  }
4299 
4300  private:
4301  VmaRawList<T>* m_pList;
4302  VmaListItem<T>* m_pItem;
4303 
4304  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4305  m_pList(pList),
4306  m_pItem(pItem)
4307  {
4308  }
4309 
4310  friend class VmaList<T, AllocatorT>;
4311  };
4312 
4313  class const_iterator
4314  {
4315  public:
4316  const_iterator() :
4317  m_pList(VMA_NULL),
4318  m_pItem(VMA_NULL)
4319  {
4320  }
4321 
4322  const_iterator(const iterator& src) :
4323  m_pList(src.m_pList),
4324  m_pItem(src.m_pItem)
4325  {
4326  }
4327 
4328  const T& operator*() const
4329  {
4330  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4331  return m_pItem->Value;
4332  }
4333  const T* operator->() const
4334  {
4335  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4336  return &m_pItem->Value;
4337  }
4338 
4339  const_iterator& operator++()
4340  {
4341  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4342  m_pItem = m_pItem->pNext;
4343  return *this;
4344  }
4345  const_iterator& operator--()
4346  {
4347  if(m_pItem != VMA_NULL)
4348  {
4349  m_pItem = m_pItem->pPrev;
4350  }
4351  else
4352  {
4353  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4354  m_pItem = m_pList->Back();
4355  }
4356  return *this;
4357  }
4358 
4359  const_iterator operator++(int)
4360  {
4361  const_iterator result = *this;
4362  ++*this;
4363  return result;
4364  }
4365  const_iterator operator--(int)
4366  {
4367  const_iterator result = *this;
4368  --*this;
4369  return result;
4370  }
4371 
4372  bool operator==(const const_iterator& rhs) const
4373  {
4374  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4375  return m_pItem == rhs.m_pItem;
4376  }
4377  bool operator!=(const const_iterator& rhs) const
4378  {
4379  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4380  return m_pItem != rhs.m_pItem;
4381  }
4382 
4383  private:
4384  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4385  m_pList(pList),
4386  m_pItem(pItem)
4387  {
4388  }
4389 
4390  const VmaRawList<T>* m_pList;
4391  const VmaListItem<T>* m_pItem;
4392 
4393  friend class VmaList<T, AllocatorT>;
4394  };
4395 
4396  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4397 
4398  bool empty() const { return m_RawList.IsEmpty(); }
4399  size_t size() const { return m_RawList.GetCount(); }
4400 
4401  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4402  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4403 
4404  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4405  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4406 
4407  void clear() { m_RawList.Clear(); }
4408  void push_back(const T& value) { m_RawList.PushBack(value); }
4409  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4410  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4411 
4412 private:
4413  VmaRawList<T> m_RawList;
4414 };
4415 
4416 #endif // #if VMA_USE_STL_LIST
4417 
4419 // class VmaMap
4420 
4421 // Unused in this version.
4422 #if 0
4423 
4424 #if VMA_USE_STL_UNORDERED_MAP
4425 
4426 #define VmaPair std::pair
4427 
4428 #define VMA_MAP_TYPE(KeyT, ValueT) \
4429  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4430 
4431 #else // #if VMA_USE_STL_UNORDERED_MAP
4432 
4433 template<typename T1, typename T2>
4434 struct VmaPair
4435 {
4436  T1 first;
4437  T2 second;
4438 
4439  VmaPair() : first(), second() { }
4440  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4441 };
4442 
4443 /* Class compatible with subset of interface of std::unordered_map.
4444 KeyT, ValueT must be POD because they will be stored in VmaVector.
4445 */
4446 template<typename KeyT, typename ValueT>
4447 class VmaMap
4448 {
4449 public:
4450  typedef VmaPair<KeyT, ValueT> PairType;
4451  typedef PairType* iterator;
4452 
4453  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4454 
4455  iterator begin() { return m_Vector.begin(); }
4456  iterator end() { return m_Vector.end(); }
4457 
4458  void insert(const PairType& pair);
4459  iterator find(const KeyT& key);
4460  void erase(iterator it);
4461 
4462 private:
4463  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4464 };
4465 
4466 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4467 
4468 template<typename FirstT, typename SecondT>
4469 struct VmaPairFirstLess
4470 {
4471  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4472  {
4473  return lhs.first < rhs.first;
4474  }
4475  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4476  {
4477  return lhs.first < rhsFirst;
4478  }
4479 };
4480 
4481 template<typename KeyT, typename ValueT>
4482 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4483 {
4484  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4485  m_Vector.data(),
4486  m_Vector.data() + m_Vector.size(),
4487  pair,
4488  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4489  VmaVectorInsert(m_Vector, indexToInsert, pair);
4490 }
4491 
4492 template<typename KeyT, typename ValueT>
4493 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4494 {
4495  PairType* it = VmaBinaryFindFirstNotLess(
4496  m_Vector.data(),
4497  m_Vector.data() + m_Vector.size(),
4498  key,
4499  VmaPairFirstLess<KeyT, ValueT>());
4500  if((it != m_Vector.end()) && (it->first == key))
4501  {
4502  return it;
4503  }
4504  else
4505  {
4506  return m_Vector.end();
4507  }
4508 }
4509 
4510 template<typename KeyT, typename ValueT>
4511 void VmaMap<KeyT, ValueT>::erase(iterator it)
4512 {
4513  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4514 }
4515 
4516 #endif // #if VMA_USE_STL_UNORDERED_MAP
4517 
4518 #endif // #if 0
4519 
4521 
4522 class VmaDeviceMemoryBlock;
4523 
4524 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4525 
4526 struct VmaAllocation_T
4527 {
4528  VMA_CLASS_NO_COPY(VmaAllocation_T)
4529 private:
4530  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4531 
4532  enum FLAGS
4533  {
4534  FLAG_USER_DATA_STRING = 0x01,
4535  };
4536 
4537 public:
4538  enum ALLOCATION_TYPE
4539  {
4540  ALLOCATION_TYPE_NONE,
4541  ALLOCATION_TYPE_BLOCK,
4542  ALLOCATION_TYPE_DEDICATED,
4543  };
4544 
4545  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4546  m_Alignment(1),
4547  m_Size(0),
4548  m_pUserData(VMA_NULL),
4549  m_LastUseFrameIndex(currentFrameIndex),
4550  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4551  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4552  m_MapCount(0),
4553  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4554  {
4555 #if VMA_STATS_STRING_ENABLED
4556  m_CreationFrameIndex = currentFrameIndex;
4557  m_BufferImageUsage = 0;
4558 #endif
4559  }
4560 
4561  ~VmaAllocation_T()
4562  {
4563  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4564 
4565  // Check if owned string was freed.
4566  VMA_ASSERT(m_pUserData == VMA_NULL);
4567  }
4568 
4569  void InitBlockAllocation(
4570  VmaPool hPool,
4571  VmaDeviceMemoryBlock* block,
4572  VkDeviceSize offset,
4573  VkDeviceSize alignment,
4574  VkDeviceSize size,
4575  VmaSuballocationType suballocationType,
4576  bool mapped,
4577  bool canBecomeLost)
4578  {
4579  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4580  VMA_ASSERT(block != VMA_NULL);
4581  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4582  m_Alignment = alignment;
4583  m_Size = size;
4584  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4585  m_SuballocationType = (uint8_t)suballocationType;
4586  m_BlockAllocation.m_hPool = hPool;
4587  m_BlockAllocation.m_Block = block;
4588  m_BlockAllocation.m_Offset = offset;
4589  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4590  }
4591 
4592  void InitLost()
4593  {
4594  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4595  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4596  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4597  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4598  m_BlockAllocation.m_Block = VMA_NULL;
4599  m_BlockAllocation.m_Offset = 0;
4600  m_BlockAllocation.m_CanBecomeLost = true;
4601  }
4602 
4603  void ChangeBlockAllocation(
4604  VmaAllocator hAllocator,
4605  VmaDeviceMemoryBlock* block,
4606  VkDeviceSize offset);
4607 
4608  // pMappedData not null means allocation is created with MAPPED flag.
4609  void InitDedicatedAllocation(
4610  uint32_t memoryTypeIndex,
4611  VkDeviceMemory hMemory,
4612  VmaSuballocationType suballocationType,
4613  void* pMappedData,
4614  VkDeviceSize size)
4615  {
4616  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4617  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4618  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4619  m_Alignment = 0;
4620  m_Size = size;
4621  m_SuballocationType = (uint8_t)suballocationType;
4622  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4623  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4624  m_DedicatedAllocation.m_hMemory = hMemory;
4625  m_DedicatedAllocation.m_pMappedData = pMappedData;
4626  }
4627 
4628  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4629  VkDeviceSize GetAlignment() const { return m_Alignment; }
4630  VkDeviceSize GetSize() const { return m_Size; }
4631  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4632  void* GetUserData() const { return m_pUserData; }
4633  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4634  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4635 
4636  VmaDeviceMemoryBlock* GetBlock() const
4637  {
4638  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4639  return m_BlockAllocation.m_Block;
4640  }
4641  VkDeviceSize GetOffset() const;
4642  VkDeviceMemory GetMemory() const;
4643  uint32_t GetMemoryTypeIndex() const;
4644  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4645  void* GetMappedData() const;
4646  bool CanBecomeLost() const;
4647  VmaPool GetPool() const;
4648 
4649  uint32_t GetLastUseFrameIndex() const
4650  {
4651  return m_LastUseFrameIndex.load();
4652  }
4653  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4654  {
4655  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4656  }
4657  /*
4658  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4659  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4660  - Else, returns false.
4661 
4662  If hAllocation is already lost, assert - you should not call it then.
4663  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4664  */
4665  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4666 
4667  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4668  {
4669  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4670  outInfo.blockCount = 1;
4671  outInfo.allocationCount = 1;
4672  outInfo.unusedRangeCount = 0;
4673  outInfo.usedBytes = m_Size;
4674  outInfo.unusedBytes = 0;
4675  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4676  outInfo.unusedRangeSizeMin = UINT64_MAX;
4677  outInfo.unusedRangeSizeMax = 0;
4678  }
4679 
4680  void BlockAllocMap();
4681  void BlockAllocUnmap();
4682  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4683  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4684 
4685 #if VMA_STATS_STRING_ENABLED
4686  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4687  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4688 
4689  void InitBufferImageUsage(uint32_t bufferImageUsage)
4690  {
4691  VMA_ASSERT(m_BufferImageUsage == 0);
4692  m_BufferImageUsage = bufferImageUsage;
4693  }
4694 
4695  void PrintParameters(class VmaJsonWriter& json) const;
4696 #endif
4697 
4698 private:
4699  VkDeviceSize m_Alignment;
4700  VkDeviceSize m_Size;
4701  void* m_pUserData;
4702  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4703  uint8_t m_Type; // ALLOCATION_TYPE
4704  uint8_t m_SuballocationType; // VmaSuballocationType
4705  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4706  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4707  uint8_t m_MapCount;
4708  uint8_t m_Flags; // enum FLAGS
4709 
4710  // Allocation out of VmaDeviceMemoryBlock.
4711  struct BlockAllocation
4712  {
4713  VmaPool m_hPool; // Null if belongs to general memory.
4714  VmaDeviceMemoryBlock* m_Block;
4715  VkDeviceSize m_Offset;
4716  bool m_CanBecomeLost;
4717  };
4718 
4719  // Allocation for an object that has its own private VkDeviceMemory.
4720  struct DedicatedAllocation
4721  {
4722  uint32_t m_MemoryTypeIndex;
4723  VkDeviceMemory m_hMemory;
4724  void* m_pMappedData; // Not null means memory is mapped.
4725  };
4726 
4727  union
4728  {
4729  // Allocation out of VmaDeviceMemoryBlock.
4730  BlockAllocation m_BlockAllocation;
4731  // Allocation for an object that has its own private VkDeviceMemory.
4732  DedicatedAllocation m_DedicatedAllocation;
4733  };
4734 
4735 #if VMA_STATS_STRING_ENABLED
4736  uint32_t m_CreationFrameIndex;
4737  uint32_t m_BufferImageUsage; // 0 if unknown.
4738 #endif
4739 
4740  void FreeUserDataString(VmaAllocator hAllocator);
4741 };
4742 
4743 /*
4744 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4745 allocated memory block or free.
4746 */
4747 struct VmaSuballocation
4748 {
4749  VkDeviceSize offset;
4750  VkDeviceSize size;
4751  VmaAllocation hAllocation;
4752  VmaSuballocationType type;
4753 };
4754 
4755 // Comparator for offsets.
4756 struct VmaSuballocationOffsetLess
4757 {
4758  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4759  {
4760  return lhs.offset < rhs.offset;
4761  }
4762 };
4763 struct VmaSuballocationOffsetGreater
4764 {
4765  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4766  {
4767  return lhs.offset > rhs.offset;
4768  }
4769 };
4770 
4771 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4772 
4773 // Cost of one additional allocation lost, as equivalent in bytes.
4774 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4775 
4776 /*
4777 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4778 
4779 If canMakeOtherLost was false:
4780 - item points to a FREE suballocation.
4781 - itemsToMakeLostCount is 0.
4782 
4783 If canMakeOtherLost was true:
4784 - item points to first of sequence of suballocations, which are either FREE,
4785  or point to VmaAllocations that can become lost.
4786 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4787  the requested allocation to succeed.
4788 */
4789 struct VmaAllocationRequest
4790 {
4791  VkDeviceSize offset;
4792  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4793  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4794  VmaSuballocationList::iterator item;
4795  size_t itemsToMakeLostCount;
4796  void* customData;
4797 
4798  VkDeviceSize CalcCost() const
4799  {
4800  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4801  }
4802 };
4803 
4804 /*
4805 Data structure used for bookkeeping of allocations and unused ranges of memory
4806 in a single VkDeviceMemory block.
4807 */
4808 class VmaBlockMetadata
4809 {
4810 public:
4811  VmaBlockMetadata(VmaAllocator hAllocator);
4812  virtual ~VmaBlockMetadata() { }
4813  virtual void Init(VkDeviceSize size) { m_Size = size; }
4814 
4815  // Validates all data structures inside this object. If not valid, returns false.
4816  virtual bool Validate() const = 0;
4817  VkDeviceSize GetSize() const { return m_Size; }
4818  virtual size_t GetAllocationCount() const = 0;
4819  virtual VkDeviceSize GetSumFreeSize() const = 0;
4820  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4821  // Returns true if this block is empty - contains only single free suballocation.
4822  virtual bool IsEmpty() const = 0;
4823 
4824  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4825  // Shouldn't modify blockCount.
4826  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4827 
4828 #if VMA_STATS_STRING_ENABLED
4829  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4830 #endif
4831 
4832  // Tries to find a place for suballocation with given parameters inside this block.
4833  // If succeeded, fills pAllocationRequest and returns true.
4834  // If failed, returns false.
4835  virtual bool CreateAllocationRequest(
4836  uint32_t currentFrameIndex,
4837  uint32_t frameInUseCount,
4838  VkDeviceSize bufferImageGranularity,
4839  VkDeviceSize allocSize,
4840  VkDeviceSize allocAlignment,
4841  bool upperAddress,
4842  VmaSuballocationType allocType,
4843  bool canMakeOtherLost,
4844  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4845  VmaAllocationRequest* pAllocationRequest) = 0;
4846 
4847  virtual bool MakeRequestedAllocationsLost(
4848  uint32_t currentFrameIndex,
4849  uint32_t frameInUseCount,
4850  VmaAllocationRequest* pAllocationRequest) = 0;
4851 
4852  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4853 
4854  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4855 
4856  // Makes actual allocation based on request. Request must already be checked and valid.
4857  virtual void Alloc(
4858  const VmaAllocationRequest& request,
4859  VmaSuballocationType type,
4860  VkDeviceSize allocSize,
4861  bool upperAddress,
4862  VmaAllocation hAllocation) = 0;
4863 
4864  // Frees suballocation assigned to given memory region.
4865  virtual void Free(const VmaAllocation allocation) = 0;
4866  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4867 
4868 protected:
4869  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4870 
4871 #if VMA_STATS_STRING_ENABLED
4872  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4873  VkDeviceSize unusedBytes,
4874  size_t allocationCount,
4875  size_t unusedRangeCount) const;
4876  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4877  VkDeviceSize offset,
4878  VmaAllocation hAllocation) const;
4879  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4880  VkDeviceSize offset,
4881  VkDeviceSize size) const;
4882  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4883 #endif
4884 
4885 private:
4886  VkDeviceSize m_Size;
4887  const VkAllocationCallbacks* m_pAllocationCallbacks;
4888 };
4889 
4890 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4891  VMA_ASSERT(0 && "Validation failed: " #cond); \
4892  return false; \
4893  } } while(false)
4894 
4895 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4896 {
4897  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4898 public:
4899  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4900  virtual ~VmaBlockMetadata_Generic();
4901  virtual void Init(VkDeviceSize size);
4902 
4903  virtual bool Validate() const;
4904  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4905  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4906  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4907  virtual bool IsEmpty() const;
4908 
4909  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4910  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4911 
4912 #if VMA_STATS_STRING_ENABLED
4913  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4914 #endif
4915 
4916  virtual bool CreateAllocationRequest(
4917  uint32_t currentFrameIndex,
4918  uint32_t frameInUseCount,
4919  VkDeviceSize bufferImageGranularity,
4920  VkDeviceSize allocSize,
4921  VkDeviceSize allocAlignment,
4922  bool upperAddress,
4923  VmaSuballocationType allocType,
4924  bool canMakeOtherLost,
4925  uint32_t strategy,
4926  VmaAllocationRequest* pAllocationRequest);
4927 
4928  virtual bool MakeRequestedAllocationsLost(
4929  uint32_t currentFrameIndex,
4930  uint32_t frameInUseCount,
4931  VmaAllocationRequest* pAllocationRequest);
4932 
4933  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4934 
4935  virtual VkResult CheckCorruption(const void* pBlockData);
4936 
4937  virtual void Alloc(
4938  const VmaAllocationRequest& request,
4939  VmaSuballocationType type,
4940  VkDeviceSize allocSize,
4941  bool upperAddress,
4942  VmaAllocation hAllocation);
4943 
4944  virtual void Free(const VmaAllocation allocation);
4945  virtual void FreeAtOffset(VkDeviceSize offset);
4946 
4947 private:
4948  uint32_t m_FreeCount;
4949  VkDeviceSize m_SumFreeSize;
4950  VmaSuballocationList m_Suballocations;
4951  // Suballocations that are free and have size greater than certain threshold.
4952  // Sorted by size, ascending.
4953  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4954 
4955  bool ValidateFreeSuballocationList() const;
4956 
4957  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4958  // If yes, fills pOffset and returns true. If no, returns false.
4959  bool CheckAllocation(
4960  uint32_t currentFrameIndex,
4961  uint32_t frameInUseCount,
4962  VkDeviceSize bufferImageGranularity,
4963  VkDeviceSize allocSize,
4964  VkDeviceSize allocAlignment,
4965  VmaSuballocationType allocType,
4966  VmaSuballocationList::const_iterator suballocItem,
4967  bool canMakeOtherLost,
4968  VkDeviceSize* pOffset,
4969  size_t* itemsToMakeLostCount,
4970  VkDeviceSize* pSumFreeSize,
4971  VkDeviceSize* pSumItemSize) const;
4972  // Given free suballocation, it merges it with following one, which must also be free.
4973  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4974  // Releases given suballocation, making it free.
4975  // Merges it with adjacent free suballocations if applicable.
4976  // Returns iterator to new free suballocation at this place.
4977  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4978  // Given free suballocation, it inserts it into sorted list of
4979  // m_FreeSuballocationsBySize if it's suitable.
4980  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4981  // Given free suballocation, it removes it from sorted list of
4982  // m_FreeSuballocationsBySize if it's suitable.
4983  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4984 };
4985 
4986 /*
4987 Allocations and their references in internal data structure look like this:
4988 
4989 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4990 
4991  0 +-------+
4992  | |
4993  | |
4994  | |
4995  +-------+
4996  | Alloc | 1st[m_1stNullItemsBeginCount]
4997  +-------+
4998  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4999  +-------+
5000  | ... |
5001  +-------+
5002  | Alloc | 1st[1st.size() - 1]
5003  +-------+
5004  | |
5005  | |
5006  | |
5007 GetSize() +-------+
5008 
5009 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5010 
5011  0 +-------+
5012  | Alloc | 2nd[0]
5013  +-------+
5014  | Alloc | 2nd[1]
5015  +-------+
5016  | ... |
5017  +-------+
5018  | Alloc | 2nd[2nd.size() - 1]
5019  +-------+
5020  | |
5021  | |
5022  | |
5023  +-------+
5024  | Alloc | 1st[m_1stNullItemsBeginCount]
5025  +-------+
5026  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5027  +-------+
5028  | ... |
5029  +-------+
5030  | Alloc | 1st[1st.size() - 1]
5031  +-------+
5032  | |
5033 GetSize() +-------+
5034 
5035 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5036 
5037  0 +-------+
5038  | |
5039  | |
5040  | |
5041  +-------+
5042  | Alloc | 1st[m_1stNullItemsBeginCount]
5043  +-------+
5044  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5045  +-------+
5046  | ... |
5047  +-------+
5048  | Alloc | 1st[1st.size() - 1]
5049  +-------+
5050  | |
5051  | |
5052  | |
5053  +-------+
5054  | Alloc | 2nd[2nd.size() - 1]
5055  +-------+
5056  | ... |
5057  +-------+
5058  | Alloc | 2nd[1]
5059  +-------+
5060  | Alloc | 2nd[0]
5061 GetSize() +-------+
5062 
5063 */
5064 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5065 {
5066  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5067 public:
5068  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5069  virtual ~VmaBlockMetadata_Linear();
5070  virtual void Init(VkDeviceSize size);
5071 
5072  virtual bool Validate() const;
5073  virtual size_t GetAllocationCount() const;
5074  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5075  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5076  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5077 
5078  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5079  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5080 
5081 #if VMA_STATS_STRING_ENABLED
5082  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5083 #endif
5084 
5085  virtual bool CreateAllocationRequest(
5086  uint32_t currentFrameIndex,
5087  uint32_t frameInUseCount,
5088  VkDeviceSize bufferImageGranularity,
5089  VkDeviceSize allocSize,
5090  VkDeviceSize allocAlignment,
5091  bool upperAddress,
5092  VmaSuballocationType allocType,
5093  bool canMakeOtherLost,
5094  uint32_t strategy,
5095  VmaAllocationRequest* pAllocationRequest);
5096 
5097  virtual bool MakeRequestedAllocationsLost(
5098  uint32_t currentFrameIndex,
5099  uint32_t frameInUseCount,
5100  VmaAllocationRequest* pAllocationRequest);
5101 
5102  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5103 
5104  virtual VkResult CheckCorruption(const void* pBlockData);
5105 
5106  virtual void Alloc(
5107  const VmaAllocationRequest& request,
5108  VmaSuballocationType type,
5109  VkDeviceSize allocSize,
5110  bool upperAddress,
5111  VmaAllocation hAllocation);
5112 
5113  virtual void Free(const VmaAllocation allocation);
5114  virtual void FreeAtOffset(VkDeviceSize offset);
5115 
5116 private:
5117  /*
5118  There are two suballocation vectors, used in ping-pong way.
5119  The one with index m_1stVectorIndex is called 1st.
5120  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5121  2nd can be non-empty only when 1st is not empty.
5122  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5123  */
5124  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5125 
5126  enum SECOND_VECTOR_MODE
5127  {
5128  SECOND_VECTOR_EMPTY,
5129  /*
5130  Suballocations in 2nd vector are created later than the ones in 1st, but they
5131  all have smaller offset.
5132  */
5133  SECOND_VECTOR_RING_BUFFER,
5134  /*
5135  Suballocations in 2nd vector are upper side of double stack.
5136  They all have offsets higher than those in 1st vector.
5137  Top of this stack means smaller offsets, but higher indices in this vector.
5138  */
5139  SECOND_VECTOR_DOUBLE_STACK,
5140  };
5141 
5142  VkDeviceSize m_SumFreeSize;
5143  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5144  uint32_t m_1stVectorIndex;
5145  SECOND_VECTOR_MODE m_2ndVectorMode;
5146 
5147  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5148  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5149  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5150  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5151 
5152  // Number of items in 1st vector with hAllocation = null at the beginning.
5153  size_t m_1stNullItemsBeginCount;
5154  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5155  size_t m_1stNullItemsMiddleCount;
5156  // Number of items in 2nd vector with hAllocation = null.
5157  size_t m_2ndNullItemsCount;
5158 
5159  bool ShouldCompact1st() const;
5160  void CleanupAfterFree();
5161 };
5162 
5163 /*
5164 - GetSize() is the original size of allocated memory block.
5165 - m_UsableSize is this size aligned down to a power of two.
5166  All allocations and calculations happen relative to m_UsableSize.
5167 - GetUnusableSize() is the difference between them.
5168  It is repoted as separate, unused range, not available for allocations.
5169 
5170 Node at level 0 has size = m_UsableSize.
5171 Each next level contains nodes with size 2 times smaller than current level.
5172 m_LevelCount is the maximum number of levels to use in the current object.
5173 */
5174 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5175 {
5176  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5177 public:
5178  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5179  virtual ~VmaBlockMetadata_Buddy();
5180  virtual void Init(VkDeviceSize size);
5181 
5182  virtual bool Validate() const;
5183  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5184  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5185  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5186  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5187 
5188  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5189  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5190 
5191 #if VMA_STATS_STRING_ENABLED
5192  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5193 #endif
5194 
5195  virtual bool CreateAllocationRequest(
5196  uint32_t currentFrameIndex,
5197  uint32_t frameInUseCount,
5198  VkDeviceSize bufferImageGranularity,
5199  VkDeviceSize allocSize,
5200  VkDeviceSize allocAlignment,
5201  bool upperAddress,
5202  VmaSuballocationType allocType,
5203  bool canMakeOtherLost,
5204  uint32_t strategy,
5205  VmaAllocationRequest* pAllocationRequest);
5206 
5207  virtual bool MakeRequestedAllocationsLost(
5208  uint32_t currentFrameIndex,
5209  uint32_t frameInUseCount,
5210  VmaAllocationRequest* pAllocationRequest);
5211 
5212  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5213 
5214  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5215 
5216  virtual void Alloc(
5217  const VmaAllocationRequest& request,
5218  VmaSuballocationType type,
5219  VkDeviceSize allocSize,
5220  bool upperAddress,
5221  VmaAllocation hAllocation);
5222 
5223  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5224  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5225 
5226 private:
5227  static const VkDeviceSize MIN_NODE_SIZE = 32;
5228  static const size_t MAX_LEVELS = 30;
5229 
5230  struct ValidationContext
5231  {
5232  size_t calculatedAllocationCount;
5233  size_t calculatedFreeCount;
5234  VkDeviceSize calculatedSumFreeSize;
5235 
5236  ValidationContext() :
5237  calculatedAllocationCount(0),
5238  calculatedFreeCount(0),
5239  calculatedSumFreeSize(0) { }
5240  };
5241 
5242  struct Node
5243  {
5244  VkDeviceSize offset;
5245  enum TYPE
5246  {
5247  TYPE_FREE,
5248  TYPE_ALLOCATION,
5249  TYPE_SPLIT,
5250  TYPE_COUNT
5251  } type;
5252  Node* parent;
5253  Node* buddy;
5254 
5255  union
5256  {
5257  struct
5258  {
5259  Node* prev;
5260  Node* next;
5261  } free;
5262  struct
5263  {
5264  VmaAllocation alloc;
5265  } allocation;
5266  struct
5267  {
5268  Node* leftChild;
5269  } split;
5270  };
5271  };
5272 
5273  // Size of the memory block aligned down to a power of two.
5274  VkDeviceSize m_UsableSize;
5275  uint32_t m_LevelCount;
5276 
5277  Node* m_Root;
5278  struct {
5279  Node* front;
5280  Node* back;
5281  } m_FreeList[MAX_LEVELS];
5282  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5283  size_t m_AllocationCount;
5284  // Number of nodes in the tree with type == TYPE_FREE.
5285  size_t m_FreeCount;
5286  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5287  VkDeviceSize m_SumFreeSize;
5288 
5289  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5290  void DeleteNode(Node* node);
5291  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5292  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5293  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5294  // Alloc passed just for validation. Can be null.
5295  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5296  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5297  // Adds node to the front of FreeList at given level.
5298  // node->type must be FREE.
5299  // node->free.prev, next can be undefined.
5300  void AddToFreeListFront(uint32_t level, Node* node);
5301  // Removes node from FreeList at given level.
5302  // node->type must be FREE.
5303  // node->free.prev, next stay untouched.
5304  void RemoveFromFreeList(uint32_t level, Node* node);
5305 
5306 #if VMA_STATS_STRING_ENABLED
5307  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5308 #endif
5309 };
5310 
5311 /*
5312 Represents a single block of device memory (`VkDeviceMemory`) with all the
5313 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5314 
5315 Thread-safety: This class must be externally synchronized.
5316 */
5317 class VmaDeviceMemoryBlock
5318 {
5319  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5320 public:
5321  VmaBlockMetadata* m_pMetadata;
5322 
5323  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5324 
5325  ~VmaDeviceMemoryBlock()
5326  {
5327  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5328  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5329  }
5330 
5331  // Always call after construction.
5332  void Init(
5333  VmaAllocator hAllocator,
5334  uint32_t newMemoryTypeIndex,
5335  VkDeviceMemory newMemory,
5336  VkDeviceSize newSize,
5337  uint32_t id,
5338  uint32_t algorithm);
5339  // Always call before destruction.
5340  void Destroy(VmaAllocator allocator);
5341 
5342  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5343  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5344  uint32_t GetId() const { return m_Id; }
5345  void* GetMappedData() const { return m_pMappedData; }
5346 
5347  // Validates all data structures inside this object. If not valid, returns false.
5348  bool Validate() const;
5349 
5350  VkResult CheckCorruption(VmaAllocator hAllocator);
5351 
5352  // ppData can be null.
5353  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5354  void Unmap(VmaAllocator hAllocator, uint32_t count);
5355 
5356  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5357  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5358 
5359  VkResult BindBufferMemory(
5360  const VmaAllocator hAllocator,
5361  const VmaAllocation hAllocation,
5362  VkBuffer hBuffer);
5363  VkResult BindImageMemory(
5364  const VmaAllocator hAllocator,
5365  const VmaAllocation hAllocation,
5366  VkImage hImage);
5367 
5368 private:
5369  uint32_t m_MemoryTypeIndex;
5370  uint32_t m_Id;
5371  VkDeviceMemory m_hMemory;
5372 
5373  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5374  // Also protects m_MapCount, m_pMappedData.
5375  VMA_MUTEX m_Mutex;
5376  uint32_t m_MapCount;
5377  void* m_pMappedData;
5378 };
5379 
5380 struct VmaPointerLess
5381 {
5382  bool operator()(const void* lhs, const void* rhs) const
5383  {
5384  return lhs < rhs;
5385  }
5386 };
5387 
5388 class VmaDefragmentator;
5389 
5390 /*
5391 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5392 Vulkan memory type.
5393 
5394 Synchronized internally with a mutex.
5395 */
5396 struct VmaBlockVector
5397 {
5398  VMA_CLASS_NO_COPY(VmaBlockVector)
5399 public:
5400  VmaBlockVector(
5401  VmaAllocator hAllocator,
5402  uint32_t memoryTypeIndex,
5403  VkDeviceSize preferredBlockSize,
5404  size_t minBlockCount,
5405  size_t maxBlockCount,
5406  VkDeviceSize bufferImageGranularity,
5407  uint32_t frameInUseCount,
5408  bool isCustomPool,
5409  bool explicitBlockSize,
5410  uint32_t algorithm);
5411  ~VmaBlockVector();
5412 
5413  VkResult CreateMinBlocks();
5414 
5415  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5416  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5417  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5418  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5419  uint32_t GetAlgorithm() const { return m_Algorithm; }
5420 
5421  void GetPoolStats(VmaPoolStats* pStats);
5422 
5423  bool IsEmpty() const { return m_Blocks.empty(); }
5424  bool IsCorruptionDetectionEnabled() const;
5425 
5426  VkResult Allocate(
5427  VmaPool hCurrentPool,
5428  uint32_t currentFrameIndex,
5429  VkDeviceSize size,
5430  VkDeviceSize alignment,
5431  const VmaAllocationCreateInfo& createInfo,
5432  VmaSuballocationType suballocType,
5433  VmaAllocation* pAllocation);
5434 
5435  void Free(
5436  VmaAllocation hAllocation);
5437 
5438  // Adds statistics of this BlockVector to pStats.
5439  void AddStats(VmaStats* pStats);
5440 
5441 #if VMA_STATS_STRING_ENABLED
5442  void PrintDetailedMap(class VmaJsonWriter& json);
5443 #endif
5444 
5445  void MakePoolAllocationsLost(
5446  uint32_t currentFrameIndex,
5447  size_t* pLostAllocationCount);
5448  VkResult CheckCorruption();
5449 
5450  VmaDefragmentator* EnsureDefragmentator(
5451  VmaAllocator hAllocator,
5452  uint32_t currentFrameIndex);
5453 
5454  VkResult Defragment(
5455  VmaDefragmentationStats* pDefragmentationStats,
5456  VkDeviceSize& maxBytesToMove,
5457  uint32_t& maxAllocationsToMove);
5458 
5459  void DestroyDefragmentator();
5460 
5461 private:
5462  friend class VmaDefragmentator;
5463 
5464  const VmaAllocator m_hAllocator;
5465  const uint32_t m_MemoryTypeIndex;
5466  const VkDeviceSize m_PreferredBlockSize;
5467  const size_t m_MinBlockCount;
5468  const size_t m_MaxBlockCount;
5469  const VkDeviceSize m_BufferImageGranularity;
5470  const uint32_t m_FrameInUseCount;
5471  const bool m_IsCustomPool;
5472  const bool m_ExplicitBlockSize;
5473  const uint32_t m_Algorithm;
5474  bool m_HasEmptyBlock;
5475  VMA_MUTEX m_Mutex;
5476  // Incrementally sorted by sumFreeSize, ascending.
5477  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5478  /* There can be at most one allocation that is completely empty - a
5479  hysteresis to avoid pessimistic case of alternating creation and destruction
5480  of a VkDeviceMemory. */
5481  VmaDefragmentator* m_pDefragmentator;
5482  uint32_t m_NextBlockId;
5483 
5484  VkDeviceSize CalcMaxBlockSize() const;
5485 
5486  // Finds and removes given block from vector.
5487  void Remove(VmaDeviceMemoryBlock* pBlock);
5488 
5489  // Performs single step in sorting m_Blocks. They may not be fully sorted
5490  // after this call.
5491  void IncrementallySortBlocks();
5492 
5493  // To be used only without CAN_MAKE_OTHER_LOST flag.
5494  VkResult AllocateFromBlock(
5495  VmaDeviceMemoryBlock* pBlock,
5496  VmaPool hCurrentPool,
5497  uint32_t currentFrameIndex,
5498  VkDeviceSize size,
5499  VkDeviceSize alignment,
5500  VmaAllocationCreateFlags allocFlags,
5501  void* pUserData,
5502  VmaSuballocationType suballocType,
5503  uint32_t strategy,
5504  VmaAllocation* pAllocation);
5505 
5506  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5507 };
5508 
5509 struct VmaPool_T
5510 {
5511  VMA_CLASS_NO_COPY(VmaPool_T)
5512 public:
5513  VmaBlockVector m_BlockVector;
5514 
5515  VmaPool_T(
5516  VmaAllocator hAllocator,
5517  const VmaPoolCreateInfo& createInfo,
5518  VkDeviceSize preferredBlockSize);
5519  ~VmaPool_T();
5520 
5521  uint32_t GetId() const { return m_Id; }
5522  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5523 
5524 #if VMA_STATS_STRING_ENABLED
5525  //void PrintDetailedMap(class VmaStringBuilder& sb);
5526 #endif
5527 
5528 private:
5529  uint32_t m_Id;
5530 };
5531 
5532 class VmaDefragmentator
5533 {
5534  VMA_CLASS_NO_COPY(VmaDefragmentator)
5535 private:
5536  const VmaAllocator m_hAllocator;
5537  VmaBlockVector* const m_pBlockVector;
5538  uint32_t m_CurrentFrameIndex;
5539  VkDeviceSize m_BytesMoved;
5540  uint32_t m_AllocationsMoved;
5541 
5542  struct AllocationInfo
5543  {
5544  VmaAllocation m_hAllocation;
5545  VkBool32* m_pChanged;
5546 
5547  AllocationInfo() :
5548  m_hAllocation(VK_NULL_HANDLE),
5549  m_pChanged(VMA_NULL)
5550  {
5551  }
5552  };
5553 
5554  struct AllocationInfoSizeGreater
5555  {
5556  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5557  {
5558  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5559  }
5560  };
5561 
5562  // Used between AddAllocation and Defragment.
5563  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5564 
5565  struct BlockInfo
5566  {
5567  VmaDeviceMemoryBlock* m_pBlock;
5568  bool m_HasNonMovableAllocations;
5569  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5570 
5571  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5572  m_pBlock(VMA_NULL),
5573  m_HasNonMovableAllocations(true),
5574  m_Allocations(pAllocationCallbacks),
5575  m_pMappedDataForDefragmentation(VMA_NULL)
5576  {
5577  }
5578 
5579  void CalcHasNonMovableAllocations()
5580  {
5581  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5582  const size_t defragmentAllocCount = m_Allocations.size();
5583  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5584  }
5585 
5586  void SortAllocationsBySizeDescecnding()
5587  {
5588  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5589  }
5590 
5591  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5592  void Unmap(VmaAllocator hAllocator);
5593 
5594  private:
5595  // Not null if mapped for defragmentation only, not originally mapped.
5596  void* m_pMappedDataForDefragmentation;
5597  };
5598 
5599  struct BlockPointerLess
5600  {
5601  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5602  {
5603  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5604  }
5605  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5606  {
5607  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5608  }
5609  };
5610 
5611  // 1. Blocks with some non-movable allocations go first.
5612  // 2. Blocks with smaller sumFreeSize go first.
5613  struct BlockInfoCompareMoveDestination
5614  {
5615  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5616  {
5617  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5618  {
5619  return true;
5620  }
5621  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5622  {
5623  return false;
5624  }
5625  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5626  {
5627  return true;
5628  }
5629  return false;
5630  }
5631  };
5632 
5633  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5634  BlockInfoVector m_Blocks;
5635 
5636  VkResult DefragmentRound(
5637  VkDeviceSize maxBytesToMove,
5638  uint32_t maxAllocationsToMove);
5639 
5640  static bool MoveMakesSense(
5641  size_t dstBlockIndex, VkDeviceSize dstOffset,
5642  size_t srcBlockIndex, VkDeviceSize srcOffset);
5643 
5644 public:
5645  VmaDefragmentator(
5646  VmaAllocator hAllocator,
5647  VmaBlockVector* pBlockVector,
5648  uint32_t currentFrameIndex);
5649 
5650  ~VmaDefragmentator();
5651 
5652  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5653  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5654 
5655  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5656 
5657  VkResult Defragment(
5658  VkDeviceSize maxBytesToMove,
5659  uint32_t maxAllocationsToMove);
5660 };
5661 
5662 struct VmaDefragmentationContext_T
5663 {
5664 public:
5665  VmaDefragmentationContext_T();
5666  ~VmaDefragmentationContext_T();
5667 
5668 private:
5669 };
5670 
5671 #if VMA_RECORDING_ENABLED
5672 
5673 class VmaRecorder
5674 {
5675 public:
5676  VmaRecorder();
5677  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5678  void WriteConfiguration(
5679  const VkPhysicalDeviceProperties& devProps,
5680  const VkPhysicalDeviceMemoryProperties& memProps,
5681  bool dedicatedAllocationExtensionEnabled);
5682  ~VmaRecorder();
5683 
5684  void RecordCreateAllocator(uint32_t frameIndex);
5685  void RecordDestroyAllocator(uint32_t frameIndex);
5686  void RecordCreatePool(uint32_t frameIndex,
5687  const VmaPoolCreateInfo& createInfo,
5688  VmaPool pool);
5689  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5690  void RecordAllocateMemory(uint32_t frameIndex,
5691  const VkMemoryRequirements& vkMemReq,
5692  const VmaAllocationCreateInfo& createInfo,
5693  VmaAllocation allocation);
5694  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5695  const VkMemoryRequirements& vkMemReq,
5696  bool requiresDedicatedAllocation,
5697  bool prefersDedicatedAllocation,
5698  const VmaAllocationCreateInfo& createInfo,
5699  VmaAllocation allocation);
5700  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5701  const VkMemoryRequirements& vkMemReq,
5702  bool requiresDedicatedAllocation,
5703  bool prefersDedicatedAllocation,
5704  const VmaAllocationCreateInfo& createInfo,
5705  VmaAllocation allocation);
5706  void RecordFreeMemory(uint32_t frameIndex,
5707  VmaAllocation allocation);
5708  void RecordSetAllocationUserData(uint32_t frameIndex,
5709  VmaAllocation allocation,
5710  const void* pUserData);
5711  void RecordCreateLostAllocation(uint32_t frameIndex,
5712  VmaAllocation allocation);
5713  void RecordMapMemory(uint32_t frameIndex,
5714  VmaAllocation allocation);
5715  void RecordUnmapMemory(uint32_t frameIndex,
5716  VmaAllocation allocation);
5717  void RecordFlushAllocation(uint32_t frameIndex,
5718  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5719  void RecordInvalidateAllocation(uint32_t frameIndex,
5720  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5721  void RecordCreateBuffer(uint32_t frameIndex,
5722  const VkBufferCreateInfo& bufCreateInfo,
5723  const VmaAllocationCreateInfo& allocCreateInfo,
5724  VmaAllocation allocation);
5725  void RecordCreateImage(uint32_t frameIndex,
5726  const VkImageCreateInfo& imageCreateInfo,
5727  const VmaAllocationCreateInfo& allocCreateInfo,
5728  VmaAllocation allocation);
5729  void RecordDestroyBuffer(uint32_t frameIndex,
5730  VmaAllocation allocation);
5731  void RecordDestroyImage(uint32_t frameIndex,
5732  VmaAllocation allocation);
5733  void RecordTouchAllocation(uint32_t frameIndex,
5734  VmaAllocation allocation);
5735  void RecordGetAllocationInfo(uint32_t frameIndex,
5736  VmaAllocation allocation);
5737  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5738  VmaPool pool);
5739 
5740 private:
5741  struct CallParams
5742  {
5743  uint32_t threadId;
5744  double time;
5745  };
5746 
5747  class UserDataString
5748  {
5749  public:
5750  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5751  const char* GetString() const { return m_Str; }
5752 
5753  private:
5754  char m_PtrStr[17];
5755  const char* m_Str;
5756  };
5757 
5758  bool m_UseMutex;
5759  VmaRecordFlags m_Flags;
5760  FILE* m_File;
5761  VMA_MUTEX m_FileMutex;
5762  int64_t m_Freq;
5763  int64_t m_StartCounter;
5764 
5765  void GetBasicParams(CallParams& outParams);
5766  void Flush();
5767 };
5768 
5769 #endif // #if VMA_RECORDING_ENABLED
5770 
5771 // Main allocator object.
5772 struct VmaAllocator_T
5773 {
5774  VMA_CLASS_NO_COPY(VmaAllocator_T)
5775 public:
5776  bool m_UseMutex;
5777  bool m_UseKhrDedicatedAllocation;
5778  VkDevice m_hDevice;
5779  bool m_AllocationCallbacksSpecified;
5780  VkAllocationCallbacks m_AllocationCallbacks;
5781  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5782 
5783  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5784  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5785  VMA_MUTEX m_HeapSizeLimitMutex;
5786 
5787  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5788  VkPhysicalDeviceMemoryProperties m_MemProps;
5789 
5790  // Default pools.
5791  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5792 
5793  // Each vector is sorted by memory (handle value).
5794  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5795  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5796  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5797 
5798  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5799  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5800  ~VmaAllocator_T();
5801 
5802  const VkAllocationCallbacks* GetAllocationCallbacks() const
5803  {
5804  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5805  }
5806  const VmaVulkanFunctions& GetVulkanFunctions() const
5807  {
5808  return m_VulkanFunctions;
5809  }
5810 
5811  VkDeviceSize GetBufferImageGranularity() const
5812  {
5813  return VMA_MAX(
5814  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5815  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5816  }
5817 
5818  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5819  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5820 
5821  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5822  {
5823  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5824  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5825  }
5826  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5827  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5828  {
5829  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5830  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5831  }
5832  // Minimum alignment for all allocations in specific memory type.
5833  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5834  {
5835  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5836  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5837  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5838  }
5839 
5840  bool IsIntegratedGpu() const
5841  {
5842  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5843  }
5844 
5845 #if VMA_RECORDING_ENABLED
5846  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5847 #endif
5848 
5849  void GetBufferMemoryRequirements(
5850  VkBuffer hBuffer,
5851  VkMemoryRequirements& memReq,
5852  bool& requiresDedicatedAllocation,
5853  bool& prefersDedicatedAllocation) const;
5854  void GetImageMemoryRequirements(
5855  VkImage hImage,
5856  VkMemoryRequirements& memReq,
5857  bool& requiresDedicatedAllocation,
5858  bool& prefersDedicatedAllocation) const;
5859 
5860  // Main allocation function.
5861  VkResult AllocateMemory(
5862  const VkMemoryRequirements& vkMemReq,
5863  bool requiresDedicatedAllocation,
5864  bool prefersDedicatedAllocation,
5865  VkBuffer dedicatedBuffer,
5866  VkImage dedicatedImage,
5867  const VmaAllocationCreateInfo& createInfo,
5868  VmaSuballocationType suballocType,
5869  VmaAllocation* pAllocation);
5870 
5871  // Main deallocation function.
5872  void FreeMemory(const VmaAllocation allocation);
5873 
5874  void CalculateStats(VmaStats* pStats);
5875 
5876 #if VMA_STATS_STRING_ENABLED
5877  void PrintDetailedMap(class VmaJsonWriter& json);
5878 #endif
5879 
5880  VkResult DefragmentationBegin(
5881  const VmaDefragmentationInfo2& info,
5882  VmaDefragmentationStats* pStats,
5883  VmaDefragmentationContext* pContext);
5884  VkResult DefragmentationEnd(
5885  VmaDefragmentationContext context);
5886 
5887  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5888  bool TouchAllocation(VmaAllocation hAllocation);
5889 
5890  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5891  void DestroyPool(VmaPool pool);
5892  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5893 
5894  void SetCurrentFrameIndex(uint32_t frameIndex);
5895  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5896 
5897  void MakePoolAllocationsLost(
5898  VmaPool hPool,
5899  size_t* pLostAllocationCount);
5900  VkResult CheckPoolCorruption(VmaPool hPool);
5901  VkResult CheckCorruption(uint32_t memoryTypeBits);
5902 
5903  void CreateLostAllocation(VmaAllocation* pAllocation);
5904 
5905  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5906  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5907 
5908  VkResult Map(VmaAllocation hAllocation, void** ppData);
5909  void Unmap(VmaAllocation hAllocation);
5910 
5911  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5912  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5913 
5914  void FlushOrInvalidateAllocation(
5915  VmaAllocation hAllocation,
5916  VkDeviceSize offset, VkDeviceSize size,
5917  VMA_CACHE_OPERATION op);
5918 
5919  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5920 
5921 private:
5922  VkDeviceSize m_PreferredLargeHeapBlockSize;
5923 
5924  VkPhysicalDevice m_PhysicalDevice;
5925  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5926 
5927  VMA_MUTEX m_PoolsMutex;
5928  // Protected by m_PoolsMutex. Sorted by pointer value.
5929  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5930  uint32_t m_NextPoolId;
5931 
5932  VmaVulkanFunctions m_VulkanFunctions;
5933 
5934 #if VMA_RECORDING_ENABLED
5935  VmaRecorder* m_pRecorder;
5936 #endif
5937 
5938  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5939 
5940  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5941 
5942  VkResult AllocateMemoryOfType(
5943  VkDeviceSize size,
5944  VkDeviceSize alignment,
5945  bool dedicatedAllocation,
5946  VkBuffer dedicatedBuffer,
5947  VkImage dedicatedImage,
5948  const VmaAllocationCreateInfo& createInfo,
5949  uint32_t memTypeIndex,
5950  VmaSuballocationType suballocType,
5951  VmaAllocation* pAllocation);
5952 
5953  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5954  VkResult AllocateDedicatedMemory(
5955  VkDeviceSize size,
5956  VmaSuballocationType suballocType,
5957  uint32_t memTypeIndex,
5958  bool map,
5959  bool isUserDataString,
5960  void* pUserData,
5961  VkBuffer dedicatedBuffer,
5962  VkImage dedicatedImage,
5963  VmaAllocation* pAllocation);
5964 
5965  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5966  void FreeDedicatedMemory(VmaAllocation allocation);
5967 };
5968 
5970 // Memory allocation #2 after VmaAllocator_T definition
5971 
5972 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5973 {
5974  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5975 }
5976 
5977 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5978 {
5979  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5980 }
5981 
5982 template<typename T>
5983 static T* VmaAllocate(VmaAllocator hAllocator)
5984 {
5985  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5986 }
5987 
5988 template<typename T>
5989 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5990 {
5991  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5992 }
5993 
5994 template<typename T>
5995 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5996 {
5997  if(ptr != VMA_NULL)
5998  {
5999  ptr->~T();
6000  VmaFree(hAllocator, ptr);
6001  }
6002 }
6003 
6004 template<typename T>
6005 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6006 {
6007  if(ptr != VMA_NULL)
6008  {
6009  for(size_t i = count; i--; )
6010  ptr[i].~T();
6011  VmaFree(hAllocator, ptr);
6012  }
6013 }
6014 
6016 // VmaStringBuilder
6017 
6018 #if VMA_STATS_STRING_ENABLED
6019 
6020 class VmaStringBuilder
6021 {
6022 public:
6023  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6024  size_t GetLength() const { return m_Data.size(); }
6025  const char* GetData() const { return m_Data.data(); }
6026 
6027  void Add(char ch) { m_Data.push_back(ch); }
6028  void Add(const char* pStr);
6029  void AddNewLine() { Add('\n'); }
6030  void AddNumber(uint32_t num);
6031  void AddNumber(uint64_t num);
6032  void AddPointer(const void* ptr);
6033 
6034 private:
6035  VmaVector< char, VmaStlAllocator<char> > m_Data;
6036 };
6037 
6038 void VmaStringBuilder::Add(const char* pStr)
6039 {
6040  const size_t strLen = strlen(pStr);
6041  if(strLen > 0)
6042  {
6043  const size_t oldCount = m_Data.size();
6044  m_Data.resize(oldCount + strLen);
6045  memcpy(m_Data.data() + oldCount, pStr, strLen);
6046  }
6047 }
6048 
6049 void VmaStringBuilder::AddNumber(uint32_t num)
6050 {
6051  char buf[11];
6052  VmaUint32ToStr(buf, sizeof(buf), num);
6053  Add(buf);
6054 }
6055 
6056 void VmaStringBuilder::AddNumber(uint64_t num)
6057 {
6058  char buf[21];
6059  VmaUint64ToStr(buf, sizeof(buf), num);
6060  Add(buf);
6061 }
6062 
6063 void VmaStringBuilder::AddPointer(const void* ptr)
6064 {
6065  char buf[21];
6066  VmaPtrToStr(buf, sizeof(buf), ptr);
6067  Add(buf);
6068 }
6069 
6070 #endif // #if VMA_STATS_STRING_ENABLED
6071 
6073 // VmaJsonWriter
6074 
6075 #if VMA_STATS_STRING_ENABLED
6076 
6077 class VmaJsonWriter
6078 {
6079  VMA_CLASS_NO_COPY(VmaJsonWriter)
6080 public:
6081  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6082  ~VmaJsonWriter();
6083 
6084  void BeginObject(bool singleLine = false);
6085  void EndObject();
6086 
6087  void BeginArray(bool singleLine = false);
6088  void EndArray();
6089 
6090  void WriteString(const char* pStr);
6091  void BeginString(const char* pStr = VMA_NULL);
6092  void ContinueString(const char* pStr);
6093  void ContinueString(uint32_t n);
6094  void ContinueString(uint64_t n);
6095  void ContinueString_Pointer(const void* ptr);
6096  void EndString(const char* pStr = VMA_NULL);
6097 
6098  void WriteNumber(uint32_t n);
6099  void WriteNumber(uint64_t n);
6100  void WriteBool(bool b);
6101  void WriteNull();
6102 
6103 private:
6104  static const char* const INDENT;
6105 
6106  enum COLLECTION_TYPE
6107  {
6108  COLLECTION_TYPE_OBJECT,
6109  COLLECTION_TYPE_ARRAY,
6110  };
6111  struct StackItem
6112  {
6113  COLLECTION_TYPE type;
6114  uint32_t valueCount;
6115  bool singleLineMode;
6116  };
6117 
6118  VmaStringBuilder& m_SB;
6119  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6120  bool m_InsideString;
6121 
6122  void BeginValue(bool isString);
6123  void WriteIndent(bool oneLess = false);
6124 };
6125 
6126 const char* const VmaJsonWriter::INDENT = " ";
6127 
6128 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6129  m_SB(sb),
6130  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6131  m_InsideString(false)
6132 {
6133 }
6134 
6135 VmaJsonWriter::~VmaJsonWriter()
6136 {
6137  VMA_ASSERT(!m_InsideString);
6138  VMA_ASSERT(m_Stack.empty());
6139 }
6140 
6141 void VmaJsonWriter::BeginObject(bool singleLine)
6142 {
6143  VMA_ASSERT(!m_InsideString);
6144 
6145  BeginValue(false);
6146  m_SB.Add('{');
6147 
6148  StackItem item;
6149  item.type = COLLECTION_TYPE_OBJECT;
6150  item.valueCount = 0;
6151  item.singleLineMode = singleLine;
6152  m_Stack.push_back(item);
6153 }
6154 
6155 void VmaJsonWriter::EndObject()
6156 {
6157  VMA_ASSERT(!m_InsideString);
6158 
6159  WriteIndent(true);
6160  m_SB.Add('}');
6161 
6162  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6163  m_Stack.pop_back();
6164 }
6165 
6166 void VmaJsonWriter::BeginArray(bool singleLine)
6167 {
6168  VMA_ASSERT(!m_InsideString);
6169 
6170  BeginValue(false);
6171  m_SB.Add('[');
6172 
6173  StackItem item;
6174  item.type = COLLECTION_TYPE_ARRAY;
6175  item.valueCount = 0;
6176  item.singleLineMode = singleLine;
6177  m_Stack.push_back(item);
6178 }
6179 
6180 void VmaJsonWriter::EndArray()
6181 {
6182  VMA_ASSERT(!m_InsideString);
6183 
6184  WriteIndent(true);
6185  m_SB.Add(']');
6186 
6187  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6188  m_Stack.pop_back();
6189 }
6190 
6191 void VmaJsonWriter::WriteString(const char* pStr)
6192 {
6193  BeginString(pStr);
6194  EndString();
6195 }
6196 
6197 void VmaJsonWriter::BeginString(const char* pStr)
6198 {
6199  VMA_ASSERT(!m_InsideString);
6200 
6201  BeginValue(true);
6202  m_SB.Add('"');
6203  m_InsideString = true;
6204  if(pStr != VMA_NULL && pStr[0] != '\0')
6205  {
6206  ContinueString(pStr);
6207  }
6208 }
6209 
6210 void VmaJsonWriter::ContinueString(const char* pStr)
6211 {
6212  VMA_ASSERT(m_InsideString);
6213 
6214  const size_t strLen = strlen(pStr);
6215  for(size_t i = 0; i < strLen; ++i)
6216  {
6217  char ch = pStr[i];
6218  if(ch == '\\')
6219  {
6220  m_SB.Add("\\\\");
6221  }
6222  else if(ch == '"')
6223  {
6224  m_SB.Add("\\\"");
6225  }
6226  else if(ch >= 32)
6227  {
6228  m_SB.Add(ch);
6229  }
6230  else switch(ch)
6231  {
6232  case '\b':
6233  m_SB.Add("\\b");
6234  break;
6235  case '\f':
6236  m_SB.Add("\\f");
6237  break;
6238  case '\n':
6239  m_SB.Add("\\n");
6240  break;
6241  case '\r':
6242  m_SB.Add("\\r");
6243  break;
6244  case '\t':
6245  m_SB.Add("\\t");
6246  break;
6247  default:
6248  VMA_ASSERT(0 && "Character not currently supported.");
6249  break;
6250  }
6251  }
6252 }
6253 
6254 void VmaJsonWriter::ContinueString(uint32_t n)
6255 {
6256  VMA_ASSERT(m_InsideString);
6257  m_SB.AddNumber(n);
6258 }
6259 
6260 void VmaJsonWriter::ContinueString(uint64_t n)
6261 {
6262  VMA_ASSERT(m_InsideString);
6263  m_SB.AddNumber(n);
6264 }
6265 
6266 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6267 {
6268  VMA_ASSERT(m_InsideString);
6269  m_SB.AddPointer(ptr);
6270 }
6271 
6272 void VmaJsonWriter::EndString(const char* pStr)
6273 {
6274  VMA_ASSERT(m_InsideString);
6275  if(pStr != VMA_NULL && pStr[0] != '\0')
6276  {
6277  ContinueString(pStr);
6278  }
6279  m_SB.Add('"');
6280  m_InsideString = false;
6281 }
6282 
6283 void VmaJsonWriter::WriteNumber(uint32_t n)
6284 {
6285  VMA_ASSERT(!m_InsideString);
6286  BeginValue(false);
6287  m_SB.AddNumber(n);
6288 }
6289 
6290 void VmaJsonWriter::WriteNumber(uint64_t n)
6291 {
6292  VMA_ASSERT(!m_InsideString);
6293  BeginValue(false);
6294  m_SB.AddNumber(n);
6295 }
6296 
6297 void VmaJsonWriter::WriteBool(bool b)
6298 {
6299  VMA_ASSERT(!m_InsideString);
6300  BeginValue(false);
6301  m_SB.Add(b ? "true" : "false");
6302 }
6303 
6304 void VmaJsonWriter::WriteNull()
6305 {
6306  VMA_ASSERT(!m_InsideString);
6307  BeginValue(false);
6308  m_SB.Add("null");
6309 }
6310 
6311 void VmaJsonWriter::BeginValue(bool isString)
6312 {
6313  if(!m_Stack.empty())
6314  {
6315  StackItem& currItem = m_Stack.back();
6316  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6317  currItem.valueCount % 2 == 0)
6318  {
6319  VMA_ASSERT(isString);
6320  }
6321 
6322  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6323  currItem.valueCount % 2 != 0)
6324  {
6325  m_SB.Add(": ");
6326  }
6327  else if(currItem.valueCount > 0)
6328  {
6329  m_SB.Add(", ");
6330  WriteIndent();
6331  }
6332  else
6333  {
6334  WriteIndent();
6335  }
6336  ++currItem.valueCount;
6337  }
6338 }
6339 
6340 void VmaJsonWriter::WriteIndent(bool oneLess)
6341 {
6342  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6343  {
6344  m_SB.AddNewLine();
6345 
6346  size_t count = m_Stack.size();
6347  if(count > 0 && oneLess)
6348  {
6349  --count;
6350  }
6351  for(size_t i = 0; i < count; ++i)
6352  {
6353  m_SB.Add(INDENT);
6354  }
6355  }
6356 }
6357 
6358 #endif // #if VMA_STATS_STRING_ENABLED
6359 
6361 
6362 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6363 {
6364  if(IsUserDataString())
6365  {
6366  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6367 
6368  FreeUserDataString(hAllocator);
6369 
6370  if(pUserData != VMA_NULL)
6371  {
6372  const char* const newStrSrc = (char*)pUserData;
6373  const size_t newStrLen = strlen(newStrSrc);
6374  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6375  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6376  m_pUserData = newStrDst;
6377  }
6378  }
6379  else
6380  {
6381  m_pUserData = pUserData;
6382  }
6383 }
6384 
6385 void VmaAllocation_T::ChangeBlockAllocation(
6386  VmaAllocator hAllocator,
6387  VmaDeviceMemoryBlock* block,
6388  VkDeviceSize offset)
6389 {
6390  VMA_ASSERT(block != VMA_NULL);
6391  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6392 
6393  // Move mapping reference counter from old block to new block.
6394  if(block != m_BlockAllocation.m_Block)
6395  {
6396  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6397  if(IsPersistentMap())
6398  ++mapRefCount;
6399  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6400  block->Map(hAllocator, mapRefCount, VMA_NULL);
6401  }
6402 
6403  m_BlockAllocation.m_Block = block;
6404  m_BlockAllocation.m_Offset = offset;
6405 }
6406 
6407 VkDeviceSize VmaAllocation_T::GetOffset() const
6408 {
6409  switch(m_Type)
6410  {
6411  case ALLOCATION_TYPE_BLOCK:
6412  return m_BlockAllocation.m_Offset;
6413  case ALLOCATION_TYPE_DEDICATED:
6414  return 0;
6415  default:
6416  VMA_ASSERT(0);
6417  return 0;
6418  }
6419 }
6420 
6421 VkDeviceMemory VmaAllocation_T::GetMemory() const
6422 {
6423  switch(m_Type)
6424  {
6425  case ALLOCATION_TYPE_BLOCK:
6426  return m_BlockAllocation.m_Block->GetDeviceMemory();
6427  case ALLOCATION_TYPE_DEDICATED:
6428  return m_DedicatedAllocation.m_hMemory;
6429  default:
6430  VMA_ASSERT(0);
6431  return VK_NULL_HANDLE;
6432  }
6433 }
6434 
6435 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6436 {
6437  switch(m_Type)
6438  {
6439  case ALLOCATION_TYPE_BLOCK:
6440  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6441  case ALLOCATION_TYPE_DEDICATED:
6442  return m_DedicatedAllocation.m_MemoryTypeIndex;
6443  default:
6444  VMA_ASSERT(0);
6445  return UINT32_MAX;
6446  }
6447 }
6448 
6449 void* VmaAllocation_T::GetMappedData() const
6450 {
6451  switch(m_Type)
6452  {
6453  case ALLOCATION_TYPE_BLOCK:
6454  if(m_MapCount != 0)
6455  {
6456  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6457  VMA_ASSERT(pBlockData != VMA_NULL);
6458  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6459  }
6460  else
6461  {
6462  return VMA_NULL;
6463  }
6464  break;
6465  case ALLOCATION_TYPE_DEDICATED:
6466  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6467  return m_DedicatedAllocation.m_pMappedData;
6468  default:
6469  VMA_ASSERT(0);
6470  return VMA_NULL;
6471  }
6472 }
6473 
6474 bool VmaAllocation_T::CanBecomeLost() const
6475 {
6476  switch(m_Type)
6477  {
6478  case ALLOCATION_TYPE_BLOCK:
6479  return m_BlockAllocation.m_CanBecomeLost;
6480  case ALLOCATION_TYPE_DEDICATED:
6481  return false;
6482  default:
6483  VMA_ASSERT(0);
6484  return false;
6485  }
6486 }
6487 
6488 VmaPool VmaAllocation_T::GetPool() const
6489 {
6490  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6491  return m_BlockAllocation.m_hPool;
6492 }
6493 
6494 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6495 {
6496  VMA_ASSERT(CanBecomeLost());
6497 
6498  /*
6499  Warning: This is a carefully designed algorithm.
6500  Do not modify unless you really know what you're doing :)
6501  */
6502  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6503  for(;;)
6504  {
6505  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6506  {
6507  VMA_ASSERT(0);
6508  return false;
6509  }
6510  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6511  {
6512  return false;
6513  }
6514  else // Last use time earlier than current time.
6515  {
6516  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6517  {
6518  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6519  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6520  return true;
6521  }
6522  }
6523  }
6524 }
6525 
6526 #if VMA_STATS_STRING_ENABLED
6527 
6528 // Correspond to values of enum VmaSuballocationType.
6529 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6530  "FREE",
6531  "UNKNOWN",
6532  "BUFFER",
6533  "IMAGE_UNKNOWN",
6534  "IMAGE_LINEAR",
6535  "IMAGE_OPTIMAL",
6536 };
6537 
6538 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6539 {
6540  json.WriteString("Type");
6541  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6542 
6543  json.WriteString("Size");
6544  json.WriteNumber(m_Size);
6545 
6546  if(m_pUserData != VMA_NULL)
6547  {
6548  json.WriteString("UserData");
6549  if(IsUserDataString())
6550  {
6551  json.WriteString((const char*)m_pUserData);
6552  }
6553  else
6554  {
6555  json.BeginString();
6556  json.ContinueString_Pointer(m_pUserData);
6557  json.EndString();
6558  }
6559  }
6560 
6561  json.WriteString("CreationFrameIndex");
6562  json.WriteNumber(m_CreationFrameIndex);
6563 
6564  json.WriteString("LastUseFrameIndex");
6565  json.WriteNumber(GetLastUseFrameIndex());
6566 
6567  if(m_BufferImageUsage != 0)
6568  {
6569  json.WriteString("Usage");
6570  json.WriteNumber(m_BufferImageUsage);
6571  }
6572 }
6573 
6574 #endif
6575 
6576 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6577 {
6578  VMA_ASSERT(IsUserDataString());
6579  if(m_pUserData != VMA_NULL)
6580  {
6581  char* const oldStr = (char*)m_pUserData;
6582  const size_t oldStrLen = strlen(oldStr);
6583  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6584  m_pUserData = VMA_NULL;
6585  }
6586 }
6587 
6588 void VmaAllocation_T::BlockAllocMap()
6589 {
6590  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6591 
6592  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6593  {
6594  ++m_MapCount;
6595  }
6596  else
6597  {
6598  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6599  }
6600 }
6601 
6602 void VmaAllocation_T::BlockAllocUnmap()
6603 {
6604  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6605 
6606  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6607  {
6608  --m_MapCount;
6609  }
6610  else
6611  {
6612  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6613  }
6614 }
6615 
6616 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6617 {
6618  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6619 
6620  if(m_MapCount != 0)
6621  {
6622  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6623  {
6624  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6625  *ppData = m_DedicatedAllocation.m_pMappedData;
6626  ++m_MapCount;
6627  return VK_SUCCESS;
6628  }
6629  else
6630  {
6631  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6632  return VK_ERROR_MEMORY_MAP_FAILED;
6633  }
6634  }
6635  else
6636  {
6637  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6638  hAllocator->m_hDevice,
6639  m_DedicatedAllocation.m_hMemory,
6640  0, // offset
6641  VK_WHOLE_SIZE,
6642  0, // flags
6643  ppData);
6644  if(result == VK_SUCCESS)
6645  {
6646  m_DedicatedAllocation.m_pMappedData = *ppData;
6647  m_MapCount = 1;
6648  }
6649  return result;
6650  }
6651 }
6652 
6653 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6654 {
6655  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6656 
6657  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6658  {
6659  --m_MapCount;
6660  if(m_MapCount == 0)
6661  {
6662  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6663  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6664  hAllocator->m_hDevice,
6665  m_DedicatedAllocation.m_hMemory);
6666  }
6667  }
6668  else
6669  {
6670  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6671  }
6672 }
6673 
6674 #if VMA_STATS_STRING_ENABLED
6675 
6676 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6677 {
6678  json.BeginObject();
6679 
6680  json.WriteString("Blocks");
6681  json.WriteNumber(stat.blockCount);
6682 
6683  json.WriteString("Allocations");
6684  json.WriteNumber(stat.allocationCount);
6685 
6686  json.WriteString("UnusedRanges");
6687  json.WriteNumber(stat.unusedRangeCount);
6688 
6689  json.WriteString("UsedBytes");
6690  json.WriteNumber(stat.usedBytes);
6691 
6692  json.WriteString("UnusedBytes");
6693  json.WriteNumber(stat.unusedBytes);
6694 
6695  if(stat.allocationCount > 1)
6696  {
6697  json.WriteString("AllocationSize");
6698  json.BeginObject(true);
6699  json.WriteString("Min");
6700  json.WriteNumber(stat.allocationSizeMin);
6701  json.WriteString("Avg");
6702  json.WriteNumber(stat.allocationSizeAvg);
6703  json.WriteString("Max");
6704  json.WriteNumber(stat.allocationSizeMax);
6705  json.EndObject();
6706  }
6707 
6708  if(stat.unusedRangeCount > 1)
6709  {
6710  json.WriteString("UnusedRangeSize");
6711  json.BeginObject(true);
6712  json.WriteString("Min");
6713  json.WriteNumber(stat.unusedRangeSizeMin);
6714  json.WriteString("Avg");
6715  json.WriteNumber(stat.unusedRangeSizeAvg);
6716  json.WriteString("Max");
6717  json.WriteNumber(stat.unusedRangeSizeMax);
6718  json.EndObject();
6719  }
6720 
6721  json.EndObject();
6722 }
6723 
6724 #endif // #if VMA_STATS_STRING_ENABLED
6725 
6726 struct VmaSuballocationItemSizeLess
6727 {
6728  bool operator()(
6729  const VmaSuballocationList::iterator lhs,
6730  const VmaSuballocationList::iterator rhs) const
6731  {
6732  return lhs->size < rhs->size;
6733  }
6734  bool operator()(
6735  const VmaSuballocationList::iterator lhs,
6736  VkDeviceSize rhsSize) const
6737  {
6738  return lhs->size < rhsSize;
6739  }
6740 };
6741 
6742 
6744 // class VmaBlockMetadata
6745 
6746 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6747  m_Size(0),
6748  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6749 {
6750 }
6751 
6752 #if VMA_STATS_STRING_ENABLED
6753 
6754 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6755  VkDeviceSize unusedBytes,
6756  size_t allocationCount,
6757  size_t unusedRangeCount) const
6758 {
6759  json.BeginObject();
6760 
6761  json.WriteString("TotalBytes");
6762  json.WriteNumber(GetSize());
6763 
6764  json.WriteString("UnusedBytes");
6765  json.WriteNumber(unusedBytes);
6766 
6767  json.WriteString("Allocations");
6768  json.WriteNumber((uint64_t)allocationCount);
6769 
6770  json.WriteString("UnusedRanges");
6771  json.WriteNumber((uint64_t)unusedRangeCount);
6772 
6773  json.WriteString("Suballocations");
6774  json.BeginArray();
6775 }
6776 
6777 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6778  VkDeviceSize offset,
6779  VmaAllocation hAllocation) const
6780 {
6781  json.BeginObject(true);
6782 
6783  json.WriteString("Offset");
6784  json.WriteNumber(offset);
6785 
6786  hAllocation->PrintParameters(json);
6787 
6788  json.EndObject();
6789 }
6790 
6791 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6792  VkDeviceSize offset,
6793  VkDeviceSize size) const
6794 {
6795  json.BeginObject(true);
6796 
6797  json.WriteString("Offset");
6798  json.WriteNumber(offset);
6799 
6800  json.WriteString("Type");
6801  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6802 
6803  json.WriteString("Size");
6804  json.WriteNumber(size);
6805 
6806  json.EndObject();
6807 }
6808 
6809 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6810 {
6811  json.EndArray();
6812  json.EndObject();
6813 }
6814 
6815 #endif // #if VMA_STATS_STRING_ENABLED
6816 
6818 // class VmaBlockMetadata_Generic
6819 
6820 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6821  VmaBlockMetadata(hAllocator),
6822  m_FreeCount(0),
6823  m_SumFreeSize(0),
6824  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6825  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6826 {
6827 }
6828 
6829 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6830 {
6831 }
6832 
6833 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6834 {
6835  VmaBlockMetadata::Init(size);
6836 
6837  m_FreeCount = 1;
6838  m_SumFreeSize = size;
6839 
6840  VmaSuballocation suballoc = {};
6841  suballoc.offset = 0;
6842  suballoc.size = size;
6843  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6844  suballoc.hAllocation = VK_NULL_HANDLE;
6845 
6846  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6847  m_Suballocations.push_back(suballoc);
6848  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6849  --suballocItem;
6850  m_FreeSuballocationsBySize.push_back(suballocItem);
6851 }
6852 
6853 bool VmaBlockMetadata_Generic::Validate() const
6854 {
6855  VMA_VALIDATE(!m_Suballocations.empty());
6856 
6857  // Expected offset of new suballocation as calculated from previous ones.
6858  VkDeviceSize calculatedOffset = 0;
6859  // Expected number of free suballocations as calculated from traversing their list.
6860  uint32_t calculatedFreeCount = 0;
6861  // Expected sum size of free suballocations as calculated from traversing their list.
6862  VkDeviceSize calculatedSumFreeSize = 0;
6863  // Expected number of free suballocations that should be registered in
6864  // m_FreeSuballocationsBySize calculated from traversing their list.
6865  size_t freeSuballocationsToRegister = 0;
6866  // True if previous visited suballocation was free.
6867  bool prevFree = false;
6868 
6869  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6870  suballocItem != m_Suballocations.cend();
6871  ++suballocItem)
6872  {
6873  const VmaSuballocation& subAlloc = *suballocItem;
6874 
6875  // Actual offset of this suballocation doesn't match expected one.
6876  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6877 
6878  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6879  // Two adjacent free suballocations are invalid. They should be merged.
6880  VMA_VALIDATE(!prevFree || !currFree);
6881 
6882  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6883 
6884  if(currFree)
6885  {
6886  calculatedSumFreeSize += subAlloc.size;
6887  ++calculatedFreeCount;
6888  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6889  {
6890  ++freeSuballocationsToRegister;
6891  }
6892 
6893  // Margin required between allocations - every free space must be at least that large.
6894  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6895  }
6896  else
6897  {
6898  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6899  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6900 
6901  // Margin required between allocations - previous allocation must be free.
6902  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6903  }
6904 
6905  calculatedOffset += subAlloc.size;
6906  prevFree = currFree;
6907  }
6908 
6909  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6910  // match expected one.
6911  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6912 
6913  VkDeviceSize lastSize = 0;
6914  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6915  {
6916  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6917 
6918  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6919  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6920  // They must be sorted by size ascending.
6921  VMA_VALIDATE(suballocItem->size >= lastSize);
6922 
6923  lastSize = suballocItem->size;
6924  }
6925 
6926  // Check if totals match calculacted values.
6927  VMA_VALIDATE(ValidateFreeSuballocationList());
6928  VMA_VALIDATE(calculatedOffset == GetSize());
6929  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6930  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6931 
6932  return true;
6933 }
6934 
6935 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6936 {
6937  if(!m_FreeSuballocationsBySize.empty())
6938  {
6939  return m_FreeSuballocationsBySize.back()->size;
6940  }
6941  else
6942  {
6943  return 0;
6944  }
6945 }
6946 
6947 bool VmaBlockMetadata_Generic::IsEmpty() const
6948 {
6949  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6950 }
6951 
6952 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6953 {
6954  outInfo.blockCount = 1;
6955 
6956  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6957  outInfo.allocationCount = rangeCount - m_FreeCount;
6958  outInfo.unusedRangeCount = m_FreeCount;
6959 
6960  outInfo.unusedBytes = m_SumFreeSize;
6961  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6962 
6963  outInfo.allocationSizeMin = UINT64_MAX;
6964  outInfo.allocationSizeMax = 0;
6965  outInfo.unusedRangeSizeMin = UINT64_MAX;
6966  outInfo.unusedRangeSizeMax = 0;
6967 
6968  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6969  suballocItem != m_Suballocations.cend();
6970  ++suballocItem)
6971  {
6972  const VmaSuballocation& suballoc = *suballocItem;
6973  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6974  {
6975  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6976  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6977  }
6978  else
6979  {
6980  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6981  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6982  }
6983  }
6984 }
6985 
6986 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6987 {
6988  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6989 
6990  inoutStats.size += GetSize();
6991  inoutStats.unusedSize += m_SumFreeSize;
6992  inoutStats.allocationCount += rangeCount - m_FreeCount;
6993  inoutStats.unusedRangeCount += m_FreeCount;
6994  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6995 }
6996 
6997 #if VMA_STATS_STRING_ENABLED
6998 
6999 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7000 {
7001  PrintDetailedMap_Begin(json,
7002  m_SumFreeSize, // unusedBytes
7003  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7004  m_FreeCount); // unusedRangeCount
7005 
7006  size_t i = 0;
7007  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7008  suballocItem != m_Suballocations.cend();
7009  ++suballocItem, ++i)
7010  {
7011  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7012  {
7013  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7014  }
7015  else
7016  {
7017  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7018  }
7019  }
7020 
7021  PrintDetailedMap_End(json);
7022 }
7023 
7024 #endif // #if VMA_STATS_STRING_ENABLED
7025 
7026 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7027  uint32_t currentFrameIndex,
7028  uint32_t frameInUseCount,
7029  VkDeviceSize bufferImageGranularity,
7030  VkDeviceSize allocSize,
7031  VkDeviceSize allocAlignment,
7032  bool upperAddress,
7033  VmaSuballocationType allocType,
7034  bool canMakeOtherLost,
7035  uint32_t strategy,
7036  VmaAllocationRequest* pAllocationRequest)
7037 {
7038  VMA_ASSERT(allocSize > 0);
7039  VMA_ASSERT(!upperAddress);
7040  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7041  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7042  VMA_HEAVY_ASSERT(Validate());
7043 
7044  // There is not enough total free space in this block to fullfill the request: Early return.
7045  if(canMakeOtherLost == false &&
7046  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7047  {
7048  return false;
7049  }
7050 
7051  // New algorithm, efficiently searching freeSuballocationsBySize.
7052  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7053  if(freeSuballocCount > 0)
7054  {
7056  {
7057  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7058  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7059  m_FreeSuballocationsBySize.data(),
7060  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7061  allocSize + 2 * VMA_DEBUG_MARGIN,
7062  VmaSuballocationItemSizeLess());
7063  size_t index = it - m_FreeSuballocationsBySize.data();
7064  for(; index < freeSuballocCount; ++index)
7065  {
7066  if(CheckAllocation(
7067  currentFrameIndex,
7068  frameInUseCount,
7069  bufferImageGranularity,
7070  allocSize,
7071  allocAlignment,
7072  allocType,
7073  m_FreeSuballocationsBySize[index],
7074  false, // canMakeOtherLost
7075  &pAllocationRequest->offset,
7076  &pAllocationRequest->itemsToMakeLostCount,
7077  &pAllocationRequest->sumFreeSize,
7078  &pAllocationRequest->sumItemSize))
7079  {
7080  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7081  return true;
7082  }
7083  }
7084  }
7085  else // WORST_FIT, FIRST_FIT
7086  {
7087  // Search staring from biggest suballocations.
7088  for(size_t index = freeSuballocCount; index--; )
7089  {
7090  if(CheckAllocation(
7091  currentFrameIndex,
7092  frameInUseCount,
7093  bufferImageGranularity,
7094  allocSize,
7095  allocAlignment,
7096  allocType,
7097  m_FreeSuballocationsBySize[index],
7098  false, // canMakeOtherLost
7099  &pAllocationRequest->offset,
7100  &pAllocationRequest->itemsToMakeLostCount,
7101  &pAllocationRequest->sumFreeSize,
7102  &pAllocationRequest->sumItemSize))
7103  {
7104  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7105  return true;
7106  }
7107  }
7108  }
7109  }
7110 
7111  if(canMakeOtherLost)
7112  {
7113  // Brute-force algorithm. TODO: Come up with something better.
7114 
7115  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7116  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7117 
7118  VmaAllocationRequest tmpAllocRequest = {};
7119  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7120  suballocIt != m_Suballocations.end();
7121  ++suballocIt)
7122  {
7123  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7124  suballocIt->hAllocation->CanBecomeLost())
7125  {
7126  if(CheckAllocation(
7127  currentFrameIndex,
7128  frameInUseCount,
7129  bufferImageGranularity,
7130  allocSize,
7131  allocAlignment,
7132  allocType,
7133  suballocIt,
7134  canMakeOtherLost,
7135  &tmpAllocRequest.offset,
7136  &tmpAllocRequest.itemsToMakeLostCount,
7137  &tmpAllocRequest.sumFreeSize,
7138  &tmpAllocRequest.sumItemSize))
7139  {
7140  tmpAllocRequest.item = suballocIt;
7141 
7142  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7144  {
7145  *pAllocationRequest = tmpAllocRequest;
7146  }
7147  }
7148  }
7149  }
7150 
7151  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7152  {
7153  return true;
7154  }
7155  }
7156 
7157  return false;
7158 }
7159 
7160 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7161  uint32_t currentFrameIndex,
7162  uint32_t frameInUseCount,
7163  VmaAllocationRequest* pAllocationRequest)
7164 {
7165  while(pAllocationRequest->itemsToMakeLostCount > 0)
7166  {
7167  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7168  {
7169  ++pAllocationRequest->item;
7170  }
7171  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7172  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7173  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7174  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7175  {
7176  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7177  --pAllocationRequest->itemsToMakeLostCount;
7178  }
7179  else
7180  {
7181  return false;
7182  }
7183  }
7184 
7185  VMA_HEAVY_ASSERT(Validate());
7186  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7187  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7188 
7189  return true;
7190 }
7191 
7192 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7193 {
7194  uint32_t lostAllocationCount = 0;
7195  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7196  it != m_Suballocations.end();
7197  ++it)
7198  {
7199  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7200  it->hAllocation->CanBecomeLost() &&
7201  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7202  {
7203  it = FreeSuballocation(it);
7204  ++lostAllocationCount;
7205  }
7206  }
7207  return lostAllocationCount;
7208 }
7209 
7210 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7211 {
7212  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7213  it != m_Suballocations.end();
7214  ++it)
7215  {
7216  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7217  {
7218  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7219  {
7220  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7221  return VK_ERROR_VALIDATION_FAILED_EXT;
7222  }
7223  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7224  {
7225  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7226  return VK_ERROR_VALIDATION_FAILED_EXT;
7227  }
7228  }
7229  }
7230 
7231  return VK_SUCCESS;
7232 }
7233 
7234 void VmaBlockMetadata_Generic::Alloc(
7235  const VmaAllocationRequest& request,
7236  VmaSuballocationType type,
7237  VkDeviceSize allocSize,
7238  bool upperAddress,
7239  VmaAllocation hAllocation)
7240 {
7241  VMA_ASSERT(!upperAddress);
7242  VMA_ASSERT(request.item != m_Suballocations.end());
7243  VmaSuballocation& suballoc = *request.item;
7244  // Given suballocation is a free block.
7245  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7246  // Given offset is inside this suballocation.
7247  VMA_ASSERT(request.offset >= suballoc.offset);
7248  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7249  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7250  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7251 
7252  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7253  // it to become used.
7254  UnregisterFreeSuballocation(request.item);
7255 
7256  suballoc.offset = request.offset;
7257  suballoc.size = allocSize;
7258  suballoc.type = type;
7259  suballoc.hAllocation = hAllocation;
7260 
7261  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7262  if(paddingEnd)
7263  {
7264  VmaSuballocation paddingSuballoc = {};
7265  paddingSuballoc.offset = request.offset + allocSize;
7266  paddingSuballoc.size = paddingEnd;
7267  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7268  VmaSuballocationList::iterator next = request.item;
7269  ++next;
7270  const VmaSuballocationList::iterator paddingEndItem =
7271  m_Suballocations.insert(next, paddingSuballoc);
7272  RegisterFreeSuballocation(paddingEndItem);
7273  }
7274 
7275  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7276  if(paddingBegin)
7277  {
7278  VmaSuballocation paddingSuballoc = {};
7279  paddingSuballoc.offset = request.offset - paddingBegin;
7280  paddingSuballoc.size = paddingBegin;
7281  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7282  const VmaSuballocationList::iterator paddingBeginItem =
7283  m_Suballocations.insert(request.item, paddingSuballoc);
7284  RegisterFreeSuballocation(paddingBeginItem);
7285  }
7286 
7287  // Update totals.
7288  m_FreeCount = m_FreeCount - 1;
7289  if(paddingBegin > 0)
7290  {
7291  ++m_FreeCount;
7292  }
7293  if(paddingEnd > 0)
7294  {
7295  ++m_FreeCount;
7296  }
7297  m_SumFreeSize -= allocSize;
7298 }
7299 
7300 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7301 {
7302  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7303  suballocItem != m_Suballocations.end();
7304  ++suballocItem)
7305  {
7306  VmaSuballocation& suballoc = *suballocItem;
7307  if(suballoc.hAllocation == allocation)
7308  {
7309  FreeSuballocation(suballocItem);
7310  VMA_HEAVY_ASSERT(Validate());
7311  return;
7312  }
7313  }
7314  VMA_ASSERT(0 && "Not found!");
7315 }
7316 
7317 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7318 {
7319  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7320  suballocItem != m_Suballocations.end();
7321  ++suballocItem)
7322  {
7323  VmaSuballocation& suballoc = *suballocItem;
7324  if(suballoc.offset == offset)
7325  {
7326  FreeSuballocation(suballocItem);
7327  return;
7328  }
7329  }
7330  VMA_ASSERT(0 && "Not found!");
7331 }
7332 
7333 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7334 {
7335  VkDeviceSize lastSize = 0;
7336  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7337  {
7338  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7339 
7340  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7341  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7342  VMA_VALIDATE(it->size >= lastSize);
7343  lastSize = it->size;
7344  }
7345  return true;
7346 }
7347 
7348 bool VmaBlockMetadata_Generic::CheckAllocation(
7349  uint32_t currentFrameIndex,
7350  uint32_t frameInUseCount,
7351  VkDeviceSize bufferImageGranularity,
7352  VkDeviceSize allocSize,
7353  VkDeviceSize allocAlignment,
7354  VmaSuballocationType allocType,
7355  VmaSuballocationList::const_iterator suballocItem,
7356  bool canMakeOtherLost,
7357  VkDeviceSize* pOffset,
7358  size_t* itemsToMakeLostCount,
7359  VkDeviceSize* pSumFreeSize,
7360  VkDeviceSize* pSumItemSize) const
7361 {
7362  VMA_ASSERT(allocSize > 0);
7363  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7364  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7365  VMA_ASSERT(pOffset != VMA_NULL);
7366 
7367  *itemsToMakeLostCount = 0;
7368  *pSumFreeSize = 0;
7369  *pSumItemSize = 0;
7370 
7371  if(canMakeOtherLost)
7372  {
7373  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7374  {
7375  *pSumFreeSize = suballocItem->size;
7376  }
7377  else
7378  {
7379  if(suballocItem->hAllocation->CanBecomeLost() &&
7380  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7381  {
7382  ++*itemsToMakeLostCount;
7383  *pSumItemSize = suballocItem->size;
7384  }
7385  else
7386  {
7387  return false;
7388  }
7389  }
7390 
7391  // Remaining size is too small for this request: Early return.
7392  if(GetSize() - suballocItem->offset < allocSize)
7393  {
7394  return false;
7395  }
7396 
7397  // Start from offset equal to beginning of this suballocation.
7398  *pOffset = suballocItem->offset;
7399 
7400  // Apply VMA_DEBUG_MARGIN at the beginning.
7401  if(VMA_DEBUG_MARGIN > 0)
7402  {
7403  *pOffset += VMA_DEBUG_MARGIN;
7404  }
7405 
7406  // Apply alignment.
7407  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7408 
7409  // Check previous suballocations for BufferImageGranularity conflicts.
7410  // Make bigger alignment if necessary.
7411  if(bufferImageGranularity > 1)
7412  {
7413  bool bufferImageGranularityConflict = false;
7414  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7415  while(prevSuballocItem != m_Suballocations.cbegin())
7416  {
7417  --prevSuballocItem;
7418  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7419  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7420  {
7421  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7422  {
7423  bufferImageGranularityConflict = true;
7424  break;
7425  }
7426  }
7427  else
7428  // Already on previous page.
7429  break;
7430  }
7431  if(bufferImageGranularityConflict)
7432  {
7433  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7434  }
7435  }
7436 
7437  // Now that we have final *pOffset, check if we are past suballocItem.
7438  // If yes, return false - this function should be called for another suballocItem as starting point.
7439  if(*pOffset >= suballocItem->offset + suballocItem->size)
7440  {
7441  return false;
7442  }
7443 
7444  // Calculate padding at the beginning based on current offset.
7445  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7446 
7447  // Calculate required margin at the end.
7448  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7449 
7450  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7451  // Another early return check.
7452  if(suballocItem->offset + totalSize > GetSize())
7453  {
7454  return false;
7455  }
7456 
7457  // Advance lastSuballocItem until desired size is reached.
7458  // Update itemsToMakeLostCount.
7459  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7460  if(totalSize > suballocItem->size)
7461  {
7462  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7463  while(remainingSize > 0)
7464  {
7465  ++lastSuballocItem;
7466  if(lastSuballocItem == m_Suballocations.cend())
7467  {
7468  return false;
7469  }
7470  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7471  {
7472  *pSumFreeSize += lastSuballocItem->size;
7473  }
7474  else
7475  {
7476  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7477  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7478  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7479  {
7480  ++*itemsToMakeLostCount;
7481  *pSumItemSize += lastSuballocItem->size;
7482  }
7483  else
7484  {
7485  return false;
7486  }
7487  }
7488  remainingSize = (lastSuballocItem->size < remainingSize) ?
7489  remainingSize - lastSuballocItem->size : 0;
7490  }
7491  }
7492 
7493  // Check next suballocations for BufferImageGranularity conflicts.
7494  // If conflict exists, we must mark more allocations lost or fail.
7495  if(bufferImageGranularity > 1)
7496  {
7497  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7498  ++nextSuballocItem;
7499  while(nextSuballocItem != m_Suballocations.cend())
7500  {
7501  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7502  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7503  {
7504  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7505  {
7506  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7507  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7508  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7509  {
7510  ++*itemsToMakeLostCount;
7511  }
7512  else
7513  {
7514  return false;
7515  }
7516  }
7517  }
7518  else
7519  {
7520  // Already on next page.
7521  break;
7522  }
7523  ++nextSuballocItem;
7524  }
7525  }
7526  }
7527  else
7528  {
7529  const VmaSuballocation& suballoc = *suballocItem;
7530  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7531 
7532  *pSumFreeSize = suballoc.size;
7533 
7534  // Size of this suballocation is too small for this request: Early return.
7535  if(suballoc.size < allocSize)
7536  {
7537  return false;
7538  }
7539 
7540  // Start from offset equal to beginning of this suballocation.
7541  *pOffset = suballoc.offset;
7542 
7543  // Apply VMA_DEBUG_MARGIN at the beginning.
7544  if(VMA_DEBUG_MARGIN > 0)
7545  {
7546  *pOffset += VMA_DEBUG_MARGIN;
7547  }
7548 
7549  // Apply alignment.
7550  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7551 
7552  // Check previous suballocations for BufferImageGranularity conflicts.
7553  // Make bigger alignment if necessary.
7554  if(bufferImageGranularity > 1)
7555  {
7556  bool bufferImageGranularityConflict = false;
7557  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7558  while(prevSuballocItem != m_Suballocations.cbegin())
7559  {
7560  --prevSuballocItem;
7561  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7562  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7563  {
7564  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7565  {
7566  bufferImageGranularityConflict = true;
7567  break;
7568  }
7569  }
7570  else
7571  // Already on previous page.
7572  break;
7573  }
7574  if(bufferImageGranularityConflict)
7575  {
7576  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7577  }
7578  }
7579 
7580  // Calculate padding at the beginning based on current offset.
7581  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7582 
7583  // Calculate required margin at the end.
7584  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7585 
7586  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7587  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7588  {
7589  return false;
7590  }
7591 
7592  // Check next suballocations for BufferImageGranularity conflicts.
7593  // If conflict exists, allocation cannot be made here.
7594  if(bufferImageGranularity > 1)
7595  {
7596  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7597  ++nextSuballocItem;
7598  while(nextSuballocItem != m_Suballocations.cend())
7599  {
7600  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7601  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7602  {
7603  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7604  {
7605  return false;
7606  }
7607  }
7608  else
7609  {
7610  // Already on next page.
7611  break;
7612  }
7613  ++nextSuballocItem;
7614  }
7615  }
7616  }
7617 
7618  // All tests passed: Success. pOffset is already filled.
7619  return true;
7620 }
7621 
7622 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7623 {
7624  VMA_ASSERT(item != m_Suballocations.end());
7625  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7626 
7627  VmaSuballocationList::iterator nextItem = item;
7628  ++nextItem;
7629  VMA_ASSERT(nextItem != m_Suballocations.end());
7630  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7631 
7632  item->size += nextItem->size;
7633  --m_FreeCount;
7634  m_Suballocations.erase(nextItem);
7635 }
7636 
7637 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7638 {
7639  // Change this suballocation to be marked as free.
7640  VmaSuballocation& suballoc = *suballocItem;
7641  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7642  suballoc.hAllocation = VK_NULL_HANDLE;
7643 
7644  // Update totals.
7645  ++m_FreeCount;
7646  m_SumFreeSize += suballoc.size;
7647 
7648  // Merge with previous and/or next suballocation if it's also free.
7649  bool mergeWithNext = false;
7650  bool mergeWithPrev = false;
7651 
7652  VmaSuballocationList::iterator nextItem = suballocItem;
7653  ++nextItem;
7654  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7655  {
7656  mergeWithNext = true;
7657  }
7658 
7659  VmaSuballocationList::iterator prevItem = suballocItem;
7660  if(suballocItem != m_Suballocations.begin())
7661  {
7662  --prevItem;
7663  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7664  {
7665  mergeWithPrev = true;
7666  }
7667  }
7668 
7669  if(mergeWithNext)
7670  {
7671  UnregisterFreeSuballocation(nextItem);
7672  MergeFreeWithNext(suballocItem);
7673  }
7674 
7675  if(mergeWithPrev)
7676  {
7677  UnregisterFreeSuballocation(prevItem);
7678  MergeFreeWithNext(prevItem);
7679  RegisterFreeSuballocation(prevItem);
7680  return prevItem;
7681  }
7682  else
7683  {
7684  RegisterFreeSuballocation(suballocItem);
7685  return suballocItem;
7686  }
7687 }
7688 
7689 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7690 {
7691  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7692  VMA_ASSERT(item->size > 0);
7693 
7694  // You may want to enable this validation at the beginning or at the end of
7695  // this function, depending on what do you want to check.
7696  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7697 
7698  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7699  {
7700  if(m_FreeSuballocationsBySize.empty())
7701  {
7702  m_FreeSuballocationsBySize.push_back(item);
7703  }
7704  else
7705  {
7706  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7707  }
7708  }
7709 
7710  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7711 }
7712 
7713 
7714 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7715 {
7716  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7717  VMA_ASSERT(item->size > 0);
7718 
7719  // You may want to enable this validation at the beginning or at the end of
7720  // this function, depending on what do you want to check.
7721  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7722 
7723  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7724  {
7725  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7726  m_FreeSuballocationsBySize.data(),
7727  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7728  item,
7729  VmaSuballocationItemSizeLess());
7730  for(size_t index = it - m_FreeSuballocationsBySize.data();
7731  index < m_FreeSuballocationsBySize.size();
7732  ++index)
7733  {
7734  if(m_FreeSuballocationsBySize[index] == item)
7735  {
7736  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7737  return;
7738  }
7739  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7740  }
7741  VMA_ASSERT(0 && "Not found.");
7742  }
7743 
7744  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7745 }
7746 
7748 // class VmaBlockMetadata_Linear
7749 
7750 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7751  VmaBlockMetadata(hAllocator),
7752  m_SumFreeSize(0),
7753  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7754  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7755  m_1stVectorIndex(0),
7756  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7757  m_1stNullItemsBeginCount(0),
7758  m_1stNullItemsMiddleCount(0),
7759  m_2ndNullItemsCount(0)
7760 {
7761 }
7762 
7763 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7764 {
7765 }
7766 
7767 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7768 {
7769  VmaBlockMetadata::Init(size);
7770  m_SumFreeSize = size;
7771 }
7772 
7773 bool VmaBlockMetadata_Linear::Validate() const
7774 {
7775  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7776  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7777 
7778  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7779  VMA_VALIDATE(!suballocations1st.empty() ||
7780  suballocations2nd.empty() ||
7781  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7782 
7783  if(!suballocations1st.empty())
7784  {
7785  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7786  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7787  // Null item at the end should be just pop_back().
7788  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7789  }
7790  if(!suballocations2nd.empty())
7791  {
7792  // Null item at the end should be just pop_back().
7793  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7794  }
7795 
7796  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7797  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7798 
7799  VkDeviceSize sumUsedSize = 0;
7800  const size_t suballoc1stCount = suballocations1st.size();
7801  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7802 
7803  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7804  {
7805  const size_t suballoc2ndCount = suballocations2nd.size();
7806  size_t nullItem2ndCount = 0;
7807  for(size_t i = 0; i < suballoc2ndCount; ++i)
7808  {
7809  const VmaSuballocation& suballoc = suballocations2nd[i];
7810  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7811 
7812  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7813  VMA_VALIDATE(suballoc.offset >= offset);
7814 
7815  if(!currFree)
7816  {
7817  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7818  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7819  sumUsedSize += suballoc.size;
7820  }
7821  else
7822  {
7823  ++nullItem2ndCount;
7824  }
7825 
7826  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7827  }
7828 
7829  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7830  }
7831 
7832  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7833  {
7834  const VmaSuballocation& suballoc = suballocations1st[i];
7835  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7836  suballoc.hAllocation == VK_NULL_HANDLE);
7837  }
7838 
7839  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7840 
7841  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7842  {
7843  const VmaSuballocation& suballoc = suballocations1st[i];
7844  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7845 
7846  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7847  VMA_VALIDATE(suballoc.offset >= offset);
7848  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7849 
7850  if(!currFree)
7851  {
7852  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7853  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7854  sumUsedSize += suballoc.size;
7855  }
7856  else
7857  {
7858  ++nullItem1stCount;
7859  }
7860 
7861  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7862  }
7863  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7864 
7865  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7866  {
7867  const size_t suballoc2ndCount = suballocations2nd.size();
7868  size_t nullItem2ndCount = 0;
7869  for(size_t i = suballoc2ndCount; i--; )
7870  {
7871  const VmaSuballocation& suballoc = suballocations2nd[i];
7872  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7873 
7874  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7875  VMA_VALIDATE(suballoc.offset >= offset);
7876 
7877  if(!currFree)
7878  {
7879  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7880  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7881  sumUsedSize += suballoc.size;
7882  }
7883  else
7884  {
7885  ++nullItem2ndCount;
7886  }
7887 
7888  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7889  }
7890 
7891  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7892  }
7893 
7894  VMA_VALIDATE(offset <= GetSize());
7895  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7896 
7897  return true;
7898 }
7899 
7900 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7901 {
7902  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7903  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7904 }
7905 
7906 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7907 {
7908  const VkDeviceSize size = GetSize();
7909 
7910  /*
7911  We don't consider gaps inside allocation vectors with freed allocations because
7912  they are not suitable for reuse in linear allocator. We consider only space that
7913  is available for new allocations.
7914  */
7915  if(IsEmpty())
7916  {
7917  return size;
7918  }
7919 
7920  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7921 
7922  switch(m_2ndVectorMode)
7923  {
7924  case SECOND_VECTOR_EMPTY:
7925  /*
7926  Available space is after end of 1st, as well as before beginning of 1st (which
7927  whould make it a ring buffer).
7928  */
7929  {
7930  const size_t suballocations1stCount = suballocations1st.size();
7931  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7932  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7933  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7934  return VMA_MAX(
7935  firstSuballoc.offset,
7936  size - (lastSuballoc.offset + lastSuballoc.size));
7937  }
7938  break;
7939 
7940  case SECOND_VECTOR_RING_BUFFER:
7941  /*
7942  Available space is only between end of 2nd and beginning of 1st.
7943  */
7944  {
7945  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7946  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7947  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7948  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7949  }
7950  break;
7951 
7952  case SECOND_VECTOR_DOUBLE_STACK:
7953  /*
7954  Available space is only between end of 1st and top of 2nd.
7955  */
7956  {
7957  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7958  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7959  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7960  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7961  }
7962  break;
7963 
7964  default:
7965  VMA_ASSERT(0);
7966  return 0;
7967  }
7968 }
7969 
7970 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7971 {
7972  const VkDeviceSize size = GetSize();
7973  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7974  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7975  const size_t suballoc1stCount = suballocations1st.size();
7976  const size_t suballoc2ndCount = suballocations2nd.size();
7977 
7978  outInfo.blockCount = 1;
7979  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7980  outInfo.unusedRangeCount = 0;
7981  outInfo.usedBytes = 0;
7982  outInfo.allocationSizeMin = UINT64_MAX;
7983  outInfo.allocationSizeMax = 0;
7984  outInfo.unusedRangeSizeMin = UINT64_MAX;
7985  outInfo.unusedRangeSizeMax = 0;
7986 
7987  VkDeviceSize lastOffset = 0;
7988 
7989  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7990  {
7991  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7992  size_t nextAlloc2ndIndex = 0;
7993  while(lastOffset < freeSpace2ndTo1stEnd)
7994  {
7995  // Find next non-null allocation or move nextAllocIndex to the end.
7996  while(nextAlloc2ndIndex < suballoc2ndCount &&
7997  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7998  {
7999  ++nextAlloc2ndIndex;
8000  }
8001 
8002  // Found non-null allocation.
8003  if(nextAlloc2ndIndex < suballoc2ndCount)
8004  {
8005  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8006 
8007  // 1. Process free space before this allocation.
8008  if(lastOffset < suballoc.offset)
8009  {
8010  // There is free space from lastOffset to suballoc.offset.
8011  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8012  ++outInfo.unusedRangeCount;
8013  outInfo.unusedBytes += unusedRangeSize;
8014  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8015  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8016  }
8017 
8018  // 2. Process this allocation.
8019  // There is allocation with suballoc.offset, suballoc.size.
8020  outInfo.usedBytes += suballoc.size;
8021  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8022  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8023 
8024  // 3. Prepare for next iteration.
8025  lastOffset = suballoc.offset + suballoc.size;
8026  ++nextAlloc2ndIndex;
8027  }
8028  // We are at the end.
8029  else
8030  {
8031  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8032  if(lastOffset < freeSpace2ndTo1stEnd)
8033  {
8034  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8035  ++outInfo.unusedRangeCount;
8036  outInfo.unusedBytes += unusedRangeSize;
8037  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8038  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8039  }
8040 
8041  // End of loop.
8042  lastOffset = freeSpace2ndTo1stEnd;
8043  }
8044  }
8045  }
8046 
8047  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8048  const VkDeviceSize freeSpace1stTo2ndEnd =
8049  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8050  while(lastOffset < freeSpace1stTo2ndEnd)
8051  {
8052  // Find next non-null allocation or move nextAllocIndex to the end.
8053  while(nextAlloc1stIndex < suballoc1stCount &&
8054  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8055  {
8056  ++nextAlloc1stIndex;
8057  }
8058 
8059  // Found non-null allocation.
8060  if(nextAlloc1stIndex < suballoc1stCount)
8061  {
8062  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8063 
8064  // 1. Process free space before this allocation.
8065  if(lastOffset < suballoc.offset)
8066  {
8067  // There is free space from lastOffset to suballoc.offset.
8068  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8069  ++outInfo.unusedRangeCount;
8070  outInfo.unusedBytes += unusedRangeSize;
8071  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8072  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8073  }
8074 
8075  // 2. Process this allocation.
8076  // There is allocation with suballoc.offset, suballoc.size.
8077  outInfo.usedBytes += suballoc.size;
8078  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8079  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8080 
8081  // 3. Prepare for next iteration.
8082  lastOffset = suballoc.offset + suballoc.size;
8083  ++nextAlloc1stIndex;
8084  }
8085  // We are at the end.
8086  else
8087  {
8088  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8089  if(lastOffset < freeSpace1stTo2ndEnd)
8090  {
8091  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8092  ++outInfo.unusedRangeCount;
8093  outInfo.unusedBytes += unusedRangeSize;
8094  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8095  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8096  }
8097 
8098  // End of loop.
8099  lastOffset = freeSpace1stTo2ndEnd;
8100  }
8101  }
8102 
8103  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8104  {
8105  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8106  while(lastOffset < size)
8107  {
8108  // Find next non-null allocation or move nextAllocIndex to the end.
8109  while(nextAlloc2ndIndex != SIZE_MAX &&
8110  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8111  {
8112  --nextAlloc2ndIndex;
8113  }
8114 
8115  // Found non-null allocation.
8116  if(nextAlloc2ndIndex != SIZE_MAX)
8117  {
8118  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8119 
8120  // 1. Process free space before this allocation.
8121  if(lastOffset < suballoc.offset)
8122  {
8123  // There is free space from lastOffset to suballoc.offset.
8124  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8125  ++outInfo.unusedRangeCount;
8126  outInfo.unusedBytes += unusedRangeSize;
8127  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8128  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8129  }
8130 
8131  // 2. Process this allocation.
8132  // There is allocation with suballoc.offset, suballoc.size.
8133  outInfo.usedBytes += suballoc.size;
8134  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8135  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8136 
8137  // 3. Prepare for next iteration.
8138  lastOffset = suballoc.offset + suballoc.size;
8139  --nextAlloc2ndIndex;
8140  }
8141  // We are at the end.
8142  else
8143  {
8144  // There is free space from lastOffset to size.
8145  if(lastOffset < size)
8146  {
8147  const VkDeviceSize unusedRangeSize = size - lastOffset;
8148  ++outInfo.unusedRangeCount;
8149  outInfo.unusedBytes += unusedRangeSize;
8150  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8151  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8152  }
8153 
8154  // End of loop.
8155  lastOffset = size;
8156  }
8157  }
8158  }
8159 
8160  outInfo.unusedBytes = size - outInfo.usedBytes;
8161 }
8162 
8163 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8164 {
8165  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8166  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8167  const VkDeviceSize size = GetSize();
8168  const size_t suballoc1stCount = suballocations1st.size();
8169  const size_t suballoc2ndCount = suballocations2nd.size();
8170 
8171  inoutStats.size += size;
8172 
8173  VkDeviceSize lastOffset = 0;
8174 
8175  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8176  {
8177  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8178  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8179  while(lastOffset < freeSpace2ndTo1stEnd)
8180  {
8181  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8182  while(nextAlloc2ndIndex < suballoc2ndCount &&
8183  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8184  {
8185  ++nextAlloc2ndIndex;
8186  }
8187 
8188  // Found non-null allocation.
8189  if(nextAlloc2ndIndex < suballoc2ndCount)
8190  {
8191  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8192 
8193  // 1. Process free space before this allocation.
8194  if(lastOffset < suballoc.offset)
8195  {
8196  // There is free space from lastOffset to suballoc.offset.
8197  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8198  inoutStats.unusedSize += unusedRangeSize;
8199  ++inoutStats.unusedRangeCount;
8200  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8201  }
8202 
8203  // 2. Process this allocation.
8204  // There is allocation with suballoc.offset, suballoc.size.
8205  ++inoutStats.allocationCount;
8206 
8207  // 3. Prepare for next iteration.
8208  lastOffset = suballoc.offset + suballoc.size;
8209  ++nextAlloc2ndIndex;
8210  }
8211  // We are at the end.
8212  else
8213  {
8214  if(lastOffset < freeSpace2ndTo1stEnd)
8215  {
8216  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8217  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8218  inoutStats.unusedSize += unusedRangeSize;
8219  ++inoutStats.unusedRangeCount;
8220  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8221  }
8222 
8223  // End of loop.
8224  lastOffset = freeSpace2ndTo1stEnd;
8225  }
8226  }
8227  }
8228 
8229  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8230  const VkDeviceSize freeSpace1stTo2ndEnd =
8231  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8232  while(lastOffset < freeSpace1stTo2ndEnd)
8233  {
8234  // Find next non-null allocation or move nextAllocIndex to the end.
8235  while(nextAlloc1stIndex < suballoc1stCount &&
8236  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8237  {
8238  ++nextAlloc1stIndex;
8239  }
8240 
8241  // Found non-null allocation.
8242  if(nextAlloc1stIndex < suballoc1stCount)
8243  {
8244  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8245 
8246  // 1. Process free space before this allocation.
8247  if(lastOffset < suballoc.offset)
8248  {
8249  // There is free space from lastOffset to suballoc.offset.
8250  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8251  inoutStats.unusedSize += unusedRangeSize;
8252  ++inoutStats.unusedRangeCount;
8253  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8254  }
8255 
8256  // 2. Process this allocation.
8257  // There is allocation with suballoc.offset, suballoc.size.
8258  ++inoutStats.allocationCount;
8259 
8260  // 3. Prepare for next iteration.
8261  lastOffset = suballoc.offset + suballoc.size;
8262  ++nextAlloc1stIndex;
8263  }
8264  // We are at the end.
8265  else
8266  {
8267  if(lastOffset < freeSpace1stTo2ndEnd)
8268  {
8269  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8270  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8271  inoutStats.unusedSize += unusedRangeSize;
8272  ++inoutStats.unusedRangeCount;
8273  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8274  }
8275 
8276  // End of loop.
8277  lastOffset = freeSpace1stTo2ndEnd;
8278  }
8279  }
8280 
8281  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8282  {
8283  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8284  while(lastOffset < size)
8285  {
8286  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8287  while(nextAlloc2ndIndex != SIZE_MAX &&
8288  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8289  {
8290  --nextAlloc2ndIndex;
8291  }
8292 
8293  // Found non-null allocation.
8294  if(nextAlloc2ndIndex != SIZE_MAX)
8295  {
8296  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8297 
8298  // 1. Process free space before this allocation.
8299  if(lastOffset < suballoc.offset)
8300  {
8301  // There is free space from lastOffset to suballoc.offset.
8302  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8303  inoutStats.unusedSize += unusedRangeSize;
8304  ++inoutStats.unusedRangeCount;
8305  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8306  }
8307 
8308  // 2. Process this allocation.
8309  // There is allocation with suballoc.offset, suballoc.size.
8310  ++inoutStats.allocationCount;
8311 
8312  // 3. Prepare for next iteration.
8313  lastOffset = suballoc.offset + suballoc.size;
8314  --nextAlloc2ndIndex;
8315  }
8316  // We are at the end.
8317  else
8318  {
8319  if(lastOffset < size)
8320  {
8321  // There is free space from lastOffset to size.
8322  const VkDeviceSize unusedRangeSize = size - lastOffset;
8323  inoutStats.unusedSize += unusedRangeSize;
8324  ++inoutStats.unusedRangeCount;
8325  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8326  }
8327 
8328  // End of loop.
8329  lastOffset = size;
8330  }
8331  }
8332  }
8333 }
8334 
8335 #if VMA_STATS_STRING_ENABLED
8336 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8337 {
8338  const VkDeviceSize size = GetSize();
8339  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8340  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8341  const size_t suballoc1stCount = suballocations1st.size();
8342  const size_t suballoc2ndCount = suballocations2nd.size();
8343 
8344  // FIRST PASS
8345 
8346  size_t unusedRangeCount = 0;
8347  VkDeviceSize usedBytes = 0;
8348 
8349  VkDeviceSize lastOffset = 0;
8350 
8351  size_t alloc2ndCount = 0;
8352  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8353  {
8354  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8355  size_t nextAlloc2ndIndex = 0;
8356  while(lastOffset < freeSpace2ndTo1stEnd)
8357  {
8358  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8359  while(nextAlloc2ndIndex < suballoc2ndCount &&
8360  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8361  {
8362  ++nextAlloc2ndIndex;
8363  }
8364 
8365  // Found non-null allocation.
8366  if(nextAlloc2ndIndex < suballoc2ndCount)
8367  {
8368  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8369 
8370  // 1. Process free space before this allocation.
8371  if(lastOffset < suballoc.offset)
8372  {
8373  // There is free space from lastOffset to suballoc.offset.
8374  ++unusedRangeCount;
8375  }
8376 
8377  // 2. Process this allocation.
8378  // There is allocation with suballoc.offset, suballoc.size.
8379  ++alloc2ndCount;
8380  usedBytes += suballoc.size;
8381 
8382  // 3. Prepare for next iteration.
8383  lastOffset = suballoc.offset + suballoc.size;
8384  ++nextAlloc2ndIndex;
8385  }
8386  // We are at the end.
8387  else
8388  {
8389  if(lastOffset < freeSpace2ndTo1stEnd)
8390  {
8391  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8392  ++unusedRangeCount;
8393  }
8394 
8395  // End of loop.
8396  lastOffset = freeSpace2ndTo1stEnd;
8397  }
8398  }
8399  }
8400 
8401  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8402  size_t alloc1stCount = 0;
8403  const VkDeviceSize freeSpace1stTo2ndEnd =
8404  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8405  while(lastOffset < freeSpace1stTo2ndEnd)
8406  {
8407  // Find next non-null allocation or move nextAllocIndex to the end.
8408  while(nextAlloc1stIndex < suballoc1stCount &&
8409  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8410  {
8411  ++nextAlloc1stIndex;
8412  }
8413 
8414  // Found non-null allocation.
8415  if(nextAlloc1stIndex < suballoc1stCount)
8416  {
8417  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8418 
8419  // 1. Process free space before this allocation.
8420  if(lastOffset < suballoc.offset)
8421  {
8422  // There is free space from lastOffset to suballoc.offset.
8423  ++unusedRangeCount;
8424  }
8425 
8426  // 2. Process this allocation.
8427  // There is allocation with suballoc.offset, suballoc.size.
8428  ++alloc1stCount;
8429  usedBytes += suballoc.size;
8430 
8431  // 3. Prepare for next iteration.
8432  lastOffset = suballoc.offset + suballoc.size;
8433  ++nextAlloc1stIndex;
8434  }
8435  // We are at the end.
8436  else
8437  {
8438  if(lastOffset < size)
8439  {
8440  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8441  ++unusedRangeCount;
8442  }
8443 
8444  // End of loop.
8445  lastOffset = freeSpace1stTo2ndEnd;
8446  }
8447  }
8448 
8449  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8450  {
8451  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8452  while(lastOffset < size)
8453  {
8454  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8455  while(nextAlloc2ndIndex != SIZE_MAX &&
8456  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8457  {
8458  --nextAlloc2ndIndex;
8459  }
8460 
8461  // Found non-null allocation.
8462  if(nextAlloc2ndIndex != SIZE_MAX)
8463  {
8464  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8465 
8466  // 1. Process free space before this allocation.
8467  if(lastOffset < suballoc.offset)
8468  {
8469  // There is free space from lastOffset to suballoc.offset.
8470  ++unusedRangeCount;
8471  }
8472 
8473  // 2. Process this allocation.
8474  // There is allocation with suballoc.offset, suballoc.size.
8475  ++alloc2ndCount;
8476  usedBytes += suballoc.size;
8477 
8478  // 3. Prepare for next iteration.
8479  lastOffset = suballoc.offset + suballoc.size;
8480  --nextAlloc2ndIndex;
8481  }
8482  // We are at the end.
8483  else
8484  {
8485  if(lastOffset < size)
8486  {
8487  // There is free space from lastOffset to size.
8488  ++unusedRangeCount;
8489  }
8490 
8491  // End of loop.
8492  lastOffset = size;
8493  }
8494  }
8495  }
8496 
8497  const VkDeviceSize unusedBytes = size - usedBytes;
8498  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8499 
8500  // SECOND PASS
8501  lastOffset = 0;
8502 
8503  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8504  {
8505  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8506  size_t nextAlloc2ndIndex = 0;
8507  while(lastOffset < freeSpace2ndTo1stEnd)
8508  {
8509  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8510  while(nextAlloc2ndIndex < suballoc2ndCount &&
8511  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8512  {
8513  ++nextAlloc2ndIndex;
8514  }
8515 
8516  // Found non-null allocation.
8517  if(nextAlloc2ndIndex < suballoc2ndCount)
8518  {
8519  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8520 
8521  // 1. Process free space before this allocation.
8522  if(lastOffset < suballoc.offset)
8523  {
8524  // There is free space from lastOffset to suballoc.offset.
8525  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8526  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8527  }
8528 
8529  // 2. Process this allocation.
8530  // There is allocation with suballoc.offset, suballoc.size.
8531  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8532 
8533  // 3. Prepare for next iteration.
8534  lastOffset = suballoc.offset + suballoc.size;
8535  ++nextAlloc2ndIndex;
8536  }
8537  // We are at the end.
8538  else
8539  {
8540  if(lastOffset < freeSpace2ndTo1stEnd)
8541  {
8542  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8543  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8544  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8545  }
8546 
8547  // End of loop.
8548  lastOffset = freeSpace2ndTo1stEnd;
8549  }
8550  }
8551  }
8552 
8553  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8554  while(lastOffset < freeSpace1stTo2ndEnd)
8555  {
8556  // Find next non-null allocation or move nextAllocIndex to the end.
8557  while(nextAlloc1stIndex < suballoc1stCount &&
8558  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8559  {
8560  ++nextAlloc1stIndex;
8561  }
8562 
8563  // Found non-null allocation.
8564  if(nextAlloc1stIndex < suballoc1stCount)
8565  {
8566  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8567 
8568  // 1. Process free space before this allocation.
8569  if(lastOffset < suballoc.offset)
8570  {
8571  // There is free space from lastOffset to suballoc.offset.
8572  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8573  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8574  }
8575 
8576  // 2. Process this allocation.
8577  // There is allocation with suballoc.offset, suballoc.size.
8578  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8579 
8580  // 3. Prepare for next iteration.
8581  lastOffset = suballoc.offset + suballoc.size;
8582  ++nextAlloc1stIndex;
8583  }
8584  // We are at the end.
8585  else
8586  {
8587  if(lastOffset < freeSpace1stTo2ndEnd)
8588  {
8589  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8590  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8592  }
8593 
8594  // End of loop.
8595  lastOffset = freeSpace1stTo2ndEnd;
8596  }
8597  }
8598 
8599  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8600  {
8601  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8602  while(lastOffset < size)
8603  {
8604  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8605  while(nextAlloc2ndIndex != SIZE_MAX &&
8606  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8607  {
8608  --nextAlloc2ndIndex;
8609  }
8610 
8611  // Found non-null allocation.
8612  if(nextAlloc2ndIndex != SIZE_MAX)
8613  {
8614  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8615 
8616  // 1. Process free space before this allocation.
8617  if(lastOffset < suballoc.offset)
8618  {
8619  // There is free space from lastOffset to suballoc.offset.
8620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8621  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8622  }
8623 
8624  // 2. Process this allocation.
8625  // There is allocation with suballoc.offset, suballoc.size.
8626  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8627 
8628  // 3. Prepare for next iteration.
8629  lastOffset = suballoc.offset + suballoc.size;
8630  --nextAlloc2ndIndex;
8631  }
8632  // We are at the end.
8633  else
8634  {
8635  if(lastOffset < size)
8636  {
8637  // There is free space from lastOffset to size.
8638  const VkDeviceSize unusedRangeSize = size - lastOffset;
8639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8640  }
8641 
8642  // End of loop.
8643  lastOffset = size;
8644  }
8645  }
8646  }
8647 
8648  PrintDetailedMap_End(json);
8649 }
8650 #endif // #if VMA_STATS_STRING_ENABLED
8651 
8652 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8653  uint32_t currentFrameIndex,
8654  uint32_t frameInUseCount,
8655  VkDeviceSize bufferImageGranularity,
8656  VkDeviceSize allocSize,
8657  VkDeviceSize allocAlignment,
8658  bool upperAddress,
8659  VmaSuballocationType allocType,
8660  bool canMakeOtherLost,
8661  uint32_t strategy,
8662  VmaAllocationRequest* pAllocationRequest)
8663 {
8664  VMA_ASSERT(allocSize > 0);
8665  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8666  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8667  VMA_HEAVY_ASSERT(Validate());
8668 
8669  const VkDeviceSize size = GetSize();
8670  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8671  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8672 
8673  if(upperAddress)
8674  {
8675  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8676  {
8677  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8678  return false;
8679  }
8680 
8681  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8682  if(allocSize > size)
8683  {
8684  return false;
8685  }
8686  VkDeviceSize resultBaseOffset = size - allocSize;
8687  if(!suballocations2nd.empty())
8688  {
8689  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8690  resultBaseOffset = lastSuballoc.offset - allocSize;
8691  if(allocSize > lastSuballoc.offset)
8692  {
8693  return false;
8694  }
8695  }
8696 
8697  // Start from offset equal to end of free space.
8698  VkDeviceSize resultOffset = resultBaseOffset;
8699 
8700  // Apply VMA_DEBUG_MARGIN at the end.
8701  if(VMA_DEBUG_MARGIN > 0)
8702  {
8703  if(resultOffset < VMA_DEBUG_MARGIN)
8704  {
8705  return false;
8706  }
8707  resultOffset -= VMA_DEBUG_MARGIN;
8708  }
8709 
8710  // Apply alignment.
8711  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8712 
8713  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8714  // Make bigger alignment if necessary.
8715  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8716  {
8717  bool bufferImageGranularityConflict = false;
8718  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8719  {
8720  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8721  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8722  {
8723  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8724  {
8725  bufferImageGranularityConflict = true;
8726  break;
8727  }
8728  }
8729  else
8730  // Already on previous page.
8731  break;
8732  }
8733  if(bufferImageGranularityConflict)
8734  {
8735  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8736  }
8737  }
8738 
8739  // There is enough free space.
8740  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8741  suballocations1st.back().offset + suballocations1st.back().size :
8742  0;
8743  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8744  {
8745  // Check previous suballocations for BufferImageGranularity conflicts.
8746  // If conflict exists, allocation cannot be made here.
8747  if(bufferImageGranularity > 1)
8748  {
8749  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8750  {
8751  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8752  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8753  {
8754  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8755  {
8756  return false;
8757  }
8758  }
8759  else
8760  {
8761  // Already on next page.
8762  break;
8763  }
8764  }
8765  }
8766 
8767  // All tests passed: Success.
8768  pAllocationRequest->offset = resultOffset;
8769  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8770  pAllocationRequest->sumItemSize = 0;
8771  // pAllocationRequest->item unused.
8772  pAllocationRequest->itemsToMakeLostCount = 0;
8773  return true;
8774  }
8775  }
8776  else // !upperAddress
8777  {
8778  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8779  {
8780  // Try to allocate at the end of 1st vector.
8781 
8782  VkDeviceSize resultBaseOffset = 0;
8783  if(!suballocations1st.empty())
8784  {
8785  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8786  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8787  }
8788 
8789  // Start from offset equal to beginning of free space.
8790  VkDeviceSize resultOffset = resultBaseOffset;
8791 
8792  // Apply VMA_DEBUG_MARGIN at the beginning.
8793  if(VMA_DEBUG_MARGIN > 0)
8794  {
8795  resultOffset += VMA_DEBUG_MARGIN;
8796  }
8797 
8798  // Apply alignment.
8799  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8800 
8801  // Check previous suballocations for BufferImageGranularity conflicts.
8802  // Make bigger alignment if necessary.
8803  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8804  {
8805  bool bufferImageGranularityConflict = false;
8806  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8807  {
8808  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8809  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8810  {
8811  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8812  {
8813  bufferImageGranularityConflict = true;
8814  break;
8815  }
8816  }
8817  else
8818  // Already on previous page.
8819  break;
8820  }
8821  if(bufferImageGranularityConflict)
8822  {
8823  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8824  }
8825  }
8826 
8827  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8828  suballocations2nd.back().offset : size;
8829 
8830  // There is enough free space at the end after alignment.
8831  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8832  {
8833  // Check next suballocations for BufferImageGranularity conflicts.
8834  // If conflict exists, allocation cannot be made here.
8835  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8836  {
8837  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8838  {
8839  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8840  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8841  {
8842  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8843  {
8844  return false;
8845  }
8846  }
8847  else
8848  {
8849  // Already on previous page.
8850  break;
8851  }
8852  }
8853  }
8854 
8855  // All tests passed: Success.
8856  pAllocationRequest->offset = resultOffset;
8857  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8858  pAllocationRequest->sumItemSize = 0;
8859  // pAllocationRequest->item unused.
8860  pAllocationRequest->itemsToMakeLostCount = 0;
8861  return true;
8862  }
8863  }
8864 
8865  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8866  // beginning of 1st vector as the end of free space.
8867  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8868  {
8869  VMA_ASSERT(!suballocations1st.empty());
8870 
8871  VkDeviceSize resultBaseOffset = 0;
8872  if(!suballocations2nd.empty())
8873  {
8874  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8875  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8876  }
8877 
8878  // Start from offset equal to beginning of free space.
8879  VkDeviceSize resultOffset = resultBaseOffset;
8880 
8881  // Apply VMA_DEBUG_MARGIN at the beginning.
8882  if(VMA_DEBUG_MARGIN > 0)
8883  {
8884  resultOffset += VMA_DEBUG_MARGIN;
8885  }
8886 
8887  // Apply alignment.
8888  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8889 
8890  // Check previous suballocations for BufferImageGranularity conflicts.
8891  // Make bigger alignment if necessary.
8892  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8893  {
8894  bool bufferImageGranularityConflict = false;
8895  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8896  {
8897  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8898  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8899  {
8900  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8901  {
8902  bufferImageGranularityConflict = true;
8903  break;
8904  }
8905  }
8906  else
8907  // Already on previous page.
8908  break;
8909  }
8910  if(bufferImageGranularityConflict)
8911  {
8912  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8913  }
8914  }
8915 
8916  pAllocationRequest->itemsToMakeLostCount = 0;
8917  pAllocationRequest->sumItemSize = 0;
8918  size_t index1st = m_1stNullItemsBeginCount;
8919 
8920  if(canMakeOtherLost)
8921  {
8922  while(index1st < suballocations1st.size() &&
8923  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8924  {
8925  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8926  const VmaSuballocation& suballoc = suballocations1st[index1st];
8927  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8928  {
8929  // No problem.
8930  }
8931  else
8932  {
8933  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8934  if(suballoc.hAllocation->CanBecomeLost() &&
8935  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8936  {
8937  ++pAllocationRequest->itemsToMakeLostCount;
8938  pAllocationRequest->sumItemSize += suballoc.size;
8939  }
8940  else
8941  {
8942  return false;
8943  }
8944  }
8945  ++index1st;
8946  }
8947 
8948  // Check next suballocations for BufferImageGranularity conflicts.
8949  // If conflict exists, we must mark more allocations lost or fail.
8950  if(bufferImageGranularity > 1)
8951  {
8952  while(index1st < suballocations1st.size())
8953  {
8954  const VmaSuballocation& suballoc = suballocations1st[index1st];
8955  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8956  {
8957  if(suballoc.hAllocation != VK_NULL_HANDLE)
8958  {
8959  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8960  if(suballoc.hAllocation->CanBecomeLost() &&
8961  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8962  {
8963  ++pAllocationRequest->itemsToMakeLostCount;
8964  pAllocationRequest->sumItemSize += suballoc.size;
8965  }
8966  else
8967  {
8968  return false;
8969  }
8970  }
8971  }
8972  else
8973  {
8974  // Already on next page.
8975  break;
8976  }
8977  ++index1st;
8978  }
8979  }
8980  }
8981 
8982  // There is enough free space at the end after alignment.
8983  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8984  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8985  {
8986  // Check next suballocations for BufferImageGranularity conflicts.
8987  // If conflict exists, allocation cannot be made here.
8988  if(bufferImageGranularity > 1)
8989  {
8990  for(size_t nextSuballocIndex = index1st;
8991  nextSuballocIndex < suballocations1st.size();
8992  nextSuballocIndex++)
8993  {
8994  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8995  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8996  {
8997  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8998  {
8999  return false;
9000  }
9001  }
9002  else
9003  {
9004  // Already on next page.
9005  break;
9006  }
9007  }
9008  }
9009 
9010  // All tests passed: Success.
9011  pAllocationRequest->offset = resultOffset;
9012  pAllocationRequest->sumFreeSize =
9013  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9014  - resultBaseOffset
9015  - pAllocationRequest->sumItemSize;
9016  // pAllocationRequest->item unused.
9017  return true;
9018  }
9019  }
9020  }
9021 
9022  return false;
9023 }
9024 
9025 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9026  uint32_t currentFrameIndex,
9027  uint32_t frameInUseCount,
9028  VmaAllocationRequest* pAllocationRequest)
9029 {
9030  if(pAllocationRequest->itemsToMakeLostCount == 0)
9031  {
9032  return true;
9033  }
9034 
9035  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9036 
9037  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9038  size_t index1st = m_1stNullItemsBeginCount;
9039  size_t madeLostCount = 0;
9040  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9041  {
9042  VMA_ASSERT(index1st < suballocations1st.size());
9043  VmaSuballocation& suballoc = suballocations1st[index1st];
9044  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9045  {
9046  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9047  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9048  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9049  {
9050  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9051  suballoc.hAllocation = VK_NULL_HANDLE;
9052  m_SumFreeSize += suballoc.size;
9053  ++m_1stNullItemsMiddleCount;
9054  ++madeLostCount;
9055  }
9056  else
9057  {
9058  return false;
9059  }
9060  }
9061  ++index1st;
9062  }
9063 
9064  CleanupAfterFree();
9065  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9066 
9067  return true;
9068 }
9069 
9070 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9071 {
9072  uint32_t lostAllocationCount = 0;
9073 
9074  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9075  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9076  {
9077  VmaSuballocation& suballoc = suballocations1st[i];
9078  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9079  suballoc.hAllocation->CanBecomeLost() &&
9080  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9081  {
9082  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9083  suballoc.hAllocation = VK_NULL_HANDLE;
9084  ++m_1stNullItemsMiddleCount;
9085  m_SumFreeSize += suballoc.size;
9086  ++lostAllocationCount;
9087  }
9088  }
9089 
9090  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9091  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9092  {
9093  VmaSuballocation& suballoc = suballocations2nd[i];
9094  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9095  suballoc.hAllocation->CanBecomeLost() &&
9096  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9097  {
9098  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9099  suballoc.hAllocation = VK_NULL_HANDLE;
9100  ++m_2ndNullItemsCount;
9101  ++lostAllocationCount;
9102  }
9103  }
9104 
9105  if(lostAllocationCount)
9106  {
9107  CleanupAfterFree();
9108  }
9109 
9110  return lostAllocationCount;
9111 }
9112 
9113 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9114 {
9115  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9116  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9117  {
9118  const VmaSuballocation& suballoc = suballocations1st[i];
9119  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9120  {
9121  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9122  {
9123  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9124  return VK_ERROR_VALIDATION_FAILED_EXT;
9125  }
9126  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9127  {
9128  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9129  return VK_ERROR_VALIDATION_FAILED_EXT;
9130  }
9131  }
9132  }
9133 
9134  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9135  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9136  {
9137  const VmaSuballocation& suballoc = suballocations2nd[i];
9138  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9139  {
9140  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9141  {
9142  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9143  return VK_ERROR_VALIDATION_FAILED_EXT;
9144  }
9145  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9146  {
9147  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9148  return VK_ERROR_VALIDATION_FAILED_EXT;
9149  }
9150  }
9151  }
9152 
9153  return VK_SUCCESS;
9154 }
9155 
9156 void VmaBlockMetadata_Linear::Alloc(
9157  const VmaAllocationRequest& request,
9158  VmaSuballocationType type,
9159  VkDeviceSize allocSize,
9160  bool upperAddress,
9161  VmaAllocation hAllocation)
9162 {
9163  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9164 
9165  if(upperAddress)
9166  {
9167  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9168  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9169  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9170  suballocations2nd.push_back(newSuballoc);
9171  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9172  }
9173  else
9174  {
9175  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9176 
9177  // First allocation.
9178  if(suballocations1st.empty())
9179  {
9180  suballocations1st.push_back(newSuballoc);
9181  }
9182  else
9183  {
9184  // New allocation at the end of 1st vector.
9185  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9186  {
9187  // Check if it fits before the end of the block.
9188  VMA_ASSERT(request.offset + allocSize <= GetSize());
9189  suballocations1st.push_back(newSuballoc);
9190  }
9191  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9192  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9193  {
9194  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9195 
9196  switch(m_2ndVectorMode)
9197  {
9198  case SECOND_VECTOR_EMPTY:
9199  // First allocation from second part ring buffer.
9200  VMA_ASSERT(suballocations2nd.empty());
9201  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9202  break;
9203  case SECOND_VECTOR_RING_BUFFER:
9204  // 2-part ring buffer is already started.
9205  VMA_ASSERT(!suballocations2nd.empty());
9206  break;
9207  case SECOND_VECTOR_DOUBLE_STACK:
9208  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9209  break;
9210  default:
9211  VMA_ASSERT(0);
9212  }
9213 
9214  suballocations2nd.push_back(newSuballoc);
9215  }
9216  else
9217  {
9218  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9219  }
9220  }
9221  }
9222 
9223  m_SumFreeSize -= newSuballoc.size;
9224 }
9225 
9226 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9227 {
9228  FreeAtOffset(allocation->GetOffset());
9229 }
9230 
9231 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9232 {
9233  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9234  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9235 
9236  if(!suballocations1st.empty())
9237  {
9238  // First allocation: Mark it as next empty at the beginning.
9239  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9240  if(firstSuballoc.offset == offset)
9241  {
9242  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9243  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9244  m_SumFreeSize += firstSuballoc.size;
9245  ++m_1stNullItemsBeginCount;
9246  CleanupAfterFree();
9247  return;
9248  }
9249  }
9250 
9251  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9252  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9253  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9254  {
9255  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9256  if(lastSuballoc.offset == offset)
9257  {
9258  m_SumFreeSize += lastSuballoc.size;
9259  suballocations2nd.pop_back();
9260  CleanupAfterFree();
9261  return;
9262  }
9263  }
9264  // Last allocation in 1st vector.
9265  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9266  {
9267  VmaSuballocation& lastSuballoc = suballocations1st.back();
9268  if(lastSuballoc.offset == offset)
9269  {
9270  m_SumFreeSize += lastSuballoc.size;
9271  suballocations1st.pop_back();
9272  CleanupAfterFree();
9273  return;
9274  }
9275  }
9276 
9277  // Item from the middle of 1st vector.
9278  {
9279  VmaSuballocation refSuballoc;
9280  refSuballoc.offset = offset;
9281  // Rest of members stays uninitialized intentionally for better performance.
9282  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9283  suballocations1st.begin() + m_1stNullItemsBeginCount,
9284  suballocations1st.end(),
9285  refSuballoc);
9286  if(it != suballocations1st.end())
9287  {
9288  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9289  it->hAllocation = VK_NULL_HANDLE;
9290  ++m_1stNullItemsMiddleCount;
9291  m_SumFreeSize += it->size;
9292  CleanupAfterFree();
9293  return;
9294  }
9295  }
9296 
9297  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9298  {
9299  // Item from the middle of 2nd vector.
9300  VmaSuballocation refSuballoc;
9301  refSuballoc.offset = offset;
9302  // Rest of members stays uninitialized intentionally for better performance.
9303  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9304  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9305  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9306  if(it != suballocations2nd.end())
9307  {
9308  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9309  it->hAllocation = VK_NULL_HANDLE;
9310  ++m_2ndNullItemsCount;
9311  m_SumFreeSize += it->size;
9312  CleanupAfterFree();
9313  return;
9314  }
9315  }
9316 
9317  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9318 }
9319 
9320 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9321 {
9322  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9323  const size_t suballocCount = AccessSuballocations1st().size();
9324  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9325 }
9326 
9327 void VmaBlockMetadata_Linear::CleanupAfterFree()
9328 {
9329  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9330  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9331 
9332  if(IsEmpty())
9333  {
9334  suballocations1st.clear();
9335  suballocations2nd.clear();
9336  m_1stNullItemsBeginCount = 0;
9337  m_1stNullItemsMiddleCount = 0;
9338  m_2ndNullItemsCount = 0;
9339  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9340  }
9341  else
9342  {
9343  const size_t suballoc1stCount = suballocations1st.size();
9344  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9345  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9346 
9347  // Find more null items at the beginning of 1st vector.
9348  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9349  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9350  {
9351  ++m_1stNullItemsBeginCount;
9352  --m_1stNullItemsMiddleCount;
9353  }
9354 
9355  // Find more null items at the end of 1st vector.
9356  while(m_1stNullItemsMiddleCount > 0 &&
9357  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9358  {
9359  --m_1stNullItemsMiddleCount;
9360  suballocations1st.pop_back();
9361  }
9362 
9363  // Find more null items at the end of 2nd vector.
9364  while(m_2ndNullItemsCount > 0 &&
9365  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9366  {
9367  --m_2ndNullItemsCount;
9368  suballocations2nd.pop_back();
9369  }
9370 
9371  if(ShouldCompact1st())
9372  {
9373  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9374  size_t srcIndex = m_1stNullItemsBeginCount;
9375  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9376  {
9377  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9378  {
9379  ++srcIndex;
9380  }
9381  if(dstIndex != srcIndex)
9382  {
9383  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9384  }
9385  ++srcIndex;
9386  }
9387  suballocations1st.resize(nonNullItemCount);
9388  m_1stNullItemsBeginCount = 0;
9389  m_1stNullItemsMiddleCount = 0;
9390  }
9391 
9392  // 2nd vector became empty.
9393  if(suballocations2nd.empty())
9394  {
9395  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9396  }
9397 
9398  // 1st vector became empty.
9399  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9400  {
9401  suballocations1st.clear();
9402  m_1stNullItemsBeginCount = 0;
9403 
9404  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9405  {
9406  // Swap 1st with 2nd. Now 2nd is empty.
9407  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9408  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9409  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9410  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9411  {
9412  ++m_1stNullItemsBeginCount;
9413  --m_1stNullItemsMiddleCount;
9414  }
9415  m_2ndNullItemsCount = 0;
9416  m_1stVectorIndex ^= 1;
9417  }
9418  }
9419  }
9420 
9421  VMA_HEAVY_ASSERT(Validate());
9422 }
9423 
9424 
9426 // class VmaBlockMetadata_Buddy
9427 
9428 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9429  VmaBlockMetadata(hAllocator),
9430  m_Root(VMA_NULL),
9431  m_AllocationCount(0),
9432  m_FreeCount(1),
9433  m_SumFreeSize(0)
9434 {
9435  memset(m_FreeList, 0, sizeof(m_FreeList));
9436 }
9437 
9438 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9439 {
9440  DeleteNode(m_Root);
9441 }
9442 
9443 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9444 {
9445  VmaBlockMetadata::Init(size);
9446 
9447  m_UsableSize = VmaPrevPow2(size);
9448  m_SumFreeSize = m_UsableSize;
9449 
9450  // Calculate m_LevelCount.
9451  m_LevelCount = 1;
9452  while(m_LevelCount < MAX_LEVELS &&
9453  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9454  {
9455  ++m_LevelCount;
9456  }
9457 
9458  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9459  rootNode->offset = 0;
9460  rootNode->type = Node::TYPE_FREE;
9461  rootNode->parent = VMA_NULL;
9462  rootNode->buddy = VMA_NULL;
9463 
9464  m_Root = rootNode;
9465  AddToFreeListFront(0, rootNode);
9466 }
9467 
9468 bool VmaBlockMetadata_Buddy::Validate() const
9469 {
9470  // Validate tree.
9471  ValidationContext ctx;
9472  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9473  {
9474  VMA_VALIDATE(false && "ValidateNode failed.");
9475  }
9476  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9477  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9478 
9479  // Validate free node lists.
9480  for(uint32_t level = 0; level < m_LevelCount; ++level)
9481  {
9482  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9483  m_FreeList[level].front->free.prev == VMA_NULL);
9484 
9485  for(Node* node = m_FreeList[level].front;
9486  node != VMA_NULL;
9487  node = node->free.next)
9488  {
9489  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9490 
9491  if(node->free.next == VMA_NULL)
9492  {
9493  VMA_VALIDATE(m_FreeList[level].back == node);
9494  }
9495  else
9496  {
9497  VMA_VALIDATE(node->free.next->free.prev == node);
9498  }
9499  }
9500  }
9501 
9502  // Validate that free lists ar higher levels are empty.
9503  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9504  {
9505  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9506  }
9507 
9508  return true;
9509 }
9510 
9511 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9512 {
9513  for(uint32_t level = 0; level < m_LevelCount; ++level)
9514  {
9515  if(m_FreeList[level].front != VMA_NULL)
9516  {
9517  return LevelToNodeSize(level);
9518  }
9519  }
9520  return 0;
9521 }
9522 
9523 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9524 {
9525  const VkDeviceSize unusableSize = GetUnusableSize();
9526 
9527  outInfo.blockCount = 1;
9528 
9529  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9530  outInfo.usedBytes = outInfo.unusedBytes = 0;
9531 
9532  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9533  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9534  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9535 
9536  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9537 
9538  if(unusableSize > 0)
9539  {
9540  ++outInfo.unusedRangeCount;
9541  outInfo.unusedBytes += unusableSize;
9542  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9543  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9544  }
9545 }
9546 
9547 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9548 {
9549  const VkDeviceSize unusableSize = GetUnusableSize();
9550 
9551  inoutStats.size += GetSize();
9552  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9553  inoutStats.allocationCount += m_AllocationCount;
9554  inoutStats.unusedRangeCount += m_FreeCount;
9555  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9556 
9557  if(unusableSize > 0)
9558  {
9559  ++inoutStats.unusedRangeCount;
9560  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9561  }
9562 }
9563 
9564 #if VMA_STATS_STRING_ENABLED
9565 
9566 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9567 {
9568  // TODO optimize
9569  VmaStatInfo stat;
9570  CalcAllocationStatInfo(stat);
9571 
9572  PrintDetailedMap_Begin(
9573  json,
9574  stat.unusedBytes,
9575  stat.allocationCount,
9576  stat.unusedRangeCount);
9577 
9578  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9579 
9580  const VkDeviceSize unusableSize = GetUnusableSize();
9581  if(unusableSize > 0)
9582  {
9583  PrintDetailedMap_UnusedRange(json,
9584  m_UsableSize, // offset
9585  unusableSize); // size
9586  }
9587 
9588  PrintDetailedMap_End(json);
9589 }
9590 
9591 #endif // #if VMA_STATS_STRING_ENABLED
9592 
9593 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9594  uint32_t currentFrameIndex,
9595  uint32_t frameInUseCount,
9596  VkDeviceSize bufferImageGranularity,
9597  VkDeviceSize allocSize,
9598  VkDeviceSize allocAlignment,
9599  bool upperAddress,
9600  VmaSuballocationType allocType,
9601  bool canMakeOtherLost,
9602  uint32_t strategy,
9603  VmaAllocationRequest* pAllocationRequest)
9604 {
9605  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9606 
9607  // Simple way to respect bufferImageGranularity. May be optimized some day.
9608  // Whenever it might be an OPTIMAL image...
9609  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9610  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9611  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9612  {
9613  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9614  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9615  }
9616 
9617  if(allocSize > m_UsableSize)
9618  {
9619  return false;
9620  }
9621 
9622  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9623  for(uint32_t level = targetLevel + 1; level--; )
9624  {
9625  for(Node* freeNode = m_FreeList[level].front;
9626  freeNode != VMA_NULL;
9627  freeNode = freeNode->free.next)
9628  {
9629  if(freeNode->offset % allocAlignment == 0)
9630  {
9631  pAllocationRequest->offset = freeNode->offset;
9632  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9633  pAllocationRequest->sumItemSize = 0;
9634  pAllocationRequest->itemsToMakeLostCount = 0;
9635  pAllocationRequest->customData = (void*)(uintptr_t)level;
9636  return true;
9637  }
9638  }
9639  }
9640 
9641  return false;
9642 }
9643 
9644 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9645  uint32_t currentFrameIndex,
9646  uint32_t frameInUseCount,
9647  VmaAllocationRequest* pAllocationRequest)
9648 {
9649  /*
9650  Lost allocations are not supported in buddy allocator at the moment.
9651  Support might be added in the future.
9652  */
9653  return pAllocationRequest->itemsToMakeLostCount == 0;
9654 }
9655 
9656 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9657 {
9658  /*
9659  Lost allocations are not supported in buddy allocator at the moment.
9660  Support might be added in the future.
9661  */
9662  return 0;
9663 }
9664 
9665 void VmaBlockMetadata_Buddy::Alloc(
9666  const VmaAllocationRequest& request,
9667  VmaSuballocationType type,
9668  VkDeviceSize allocSize,
9669  bool upperAddress,
9670  VmaAllocation hAllocation)
9671 {
9672  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9673  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9674 
9675  Node* currNode = m_FreeList[currLevel].front;
9676  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9677  while(currNode->offset != request.offset)
9678  {
9679  currNode = currNode->free.next;
9680  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9681  }
9682 
9683  // Go down, splitting free nodes.
9684  while(currLevel < targetLevel)
9685  {
9686  // currNode is already first free node at currLevel.
9687  // Remove it from list of free nodes at this currLevel.
9688  RemoveFromFreeList(currLevel, currNode);
9689 
9690  const uint32_t childrenLevel = currLevel + 1;
9691 
9692  // Create two free sub-nodes.
9693  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9694  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9695 
9696  leftChild->offset = currNode->offset;
9697  leftChild->type = Node::TYPE_FREE;
9698  leftChild->parent = currNode;
9699  leftChild->buddy = rightChild;
9700 
9701  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9702  rightChild->type = Node::TYPE_FREE;
9703  rightChild->parent = currNode;
9704  rightChild->buddy = leftChild;
9705 
9706  // Convert current currNode to split type.
9707  currNode->type = Node::TYPE_SPLIT;
9708  currNode->split.leftChild = leftChild;
9709 
9710  // Add child nodes to free list. Order is important!
9711  AddToFreeListFront(childrenLevel, rightChild);
9712  AddToFreeListFront(childrenLevel, leftChild);
9713 
9714  ++m_FreeCount;
9715  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9716  ++currLevel;
9717  currNode = m_FreeList[currLevel].front;
9718 
9719  /*
9720  We can be sure that currNode, as left child of node previously split,
9721  also fullfills the alignment requirement.
9722  */
9723  }
9724 
9725  // Remove from free list.
9726  VMA_ASSERT(currLevel == targetLevel &&
9727  currNode != VMA_NULL &&
9728  currNode->type == Node::TYPE_FREE);
9729  RemoveFromFreeList(currLevel, currNode);
9730 
9731  // Convert to allocation node.
9732  currNode->type = Node::TYPE_ALLOCATION;
9733  currNode->allocation.alloc = hAllocation;
9734 
9735  ++m_AllocationCount;
9736  --m_FreeCount;
9737  m_SumFreeSize -= allocSize;
9738 }
9739 
9740 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9741 {
9742  if(node->type == Node::TYPE_SPLIT)
9743  {
9744  DeleteNode(node->split.leftChild->buddy);
9745  DeleteNode(node->split.leftChild);
9746  }
9747 
9748  vma_delete(GetAllocationCallbacks(), node);
9749 }
9750 
9751 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9752 {
9753  VMA_VALIDATE(level < m_LevelCount);
9754  VMA_VALIDATE(curr->parent == parent);
9755  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9756  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9757  switch(curr->type)
9758  {
9759  case Node::TYPE_FREE:
9760  // curr->free.prev, next are validated separately.
9761  ctx.calculatedSumFreeSize += levelNodeSize;
9762  ++ctx.calculatedFreeCount;
9763  break;
9764  case Node::TYPE_ALLOCATION:
9765  ++ctx.calculatedAllocationCount;
9766  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9767  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9768  break;
9769  case Node::TYPE_SPLIT:
9770  {
9771  const uint32_t childrenLevel = level + 1;
9772  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9773  const Node* const leftChild = curr->split.leftChild;
9774  VMA_VALIDATE(leftChild != VMA_NULL);
9775  VMA_VALIDATE(leftChild->offset == curr->offset);
9776  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9777  {
9778  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9779  }
9780  const Node* const rightChild = leftChild->buddy;
9781  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9782  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9783  {
9784  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9785  }
9786  }
9787  break;
9788  default:
9789  return false;
9790  }
9791 
9792  return true;
9793 }
9794 
9795 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9796 {
9797  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9798  uint32_t level = 0;
9799  VkDeviceSize currLevelNodeSize = m_UsableSize;
9800  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9801  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9802  {
9803  ++level;
9804  currLevelNodeSize = nextLevelNodeSize;
9805  nextLevelNodeSize = currLevelNodeSize >> 1;
9806  }
9807  return level;
9808 }
9809 
9810 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9811 {
9812  // Find node and level.
9813  Node* node = m_Root;
9814  VkDeviceSize nodeOffset = 0;
9815  uint32_t level = 0;
9816  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9817  while(node->type == Node::TYPE_SPLIT)
9818  {
9819  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9820  if(offset < nodeOffset + nextLevelSize)
9821  {
9822  node = node->split.leftChild;
9823  }
9824  else
9825  {
9826  node = node->split.leftChild->buddy;
9827  nodeOffset += nextLevelSize;
9828  }
9829  ++level;
9830  levelNodeSize = nextLevelSize;
9831  }
9832 
9833  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9834  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9835 
9836  ++m_FreeCount;
9837  --m_AllocationCount;
9838  m_SumFreeSize += alloc->GetSize();
9839 
9840  node->type = Node::TYPE_FREE;
9841 
9842  // Join free nodes if possible.
9843  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9844  {
9845  RemoveFromFreeList(level, node->buddy);
9846  Node* const parent = node->parent;
9847 
9848  vma_delete(GetAllocationCallbacks(), node->buddy);
9849  vma_delete(GetAllocationCallbacks(), node);
9850  parent->type = Node::TYPE_FREE;
9851 
9852  node = parent;
9853  --level;
9854  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9855  --m_FreeCount;
9856  }
9857 
9858  AddToFreeListFront(level, node);
9859 }
9860 
9861 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9862 {
9863  switch(node->type)
9864  {
9865  case Node::TYPE_FREE:
9866  ++outInfo.unusedRangeCount;
9867  outInfo.unusedBytes += levelNodeSize;
9868  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9869  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9870  break;
9871  case Node::TYPE_ALLOCATION:
9872  {
9873  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9874  ++outInfo.allocationCount;
9875  outInfo.usedBytes += allocSize;
9876  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9877  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9878 
9879  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9880  if(unusedRangeSize > 0)
9881  {
9882  ++outInfo.unusedRangeCount;
9883  outInfo.unusedBytes += unusedRangeSize;
9884  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9885  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9886  }
9887  }
9888  break;
9889  case Node::TYPE_SPLIT:
9890  {
9891  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9892  const Node* const leftChild = node->split.leftChild;
9893  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9894  const Node* const rightChild = leftChild->buddy;
9895  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9896  }
9897  break;
9898  default:
9899  VMA_ASSERT(0);
9900  }
9901 }
9902 
9903 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9904 {
9905  VMA_ASSERT(node->type == Node::TYPE_FREE);
9906 
9907  // List is empty.
9908  Node* const frontNode = m_FreeList[level].front;
9909  if(frontNode == VMA_NULL)
9910  {
9911  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9912  node->free.prev = node->free.next = VMA_NULL;
9913  m_FreeList[level].front = m_FreeList[level].back = node;
9914  }
9915  else
9916  {
9917  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9918  node->free.prev = VMA_NULL;
9919  node->free.next = frontNode;
9920  frontNode->free.prev = node;
9921  m_FreeList[level].front = node;
9922  }
9923 }
9924 
9925 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9926 {
9927  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9928 
9929  // It is at the front.
9930  if(node->free.prev == VMA_NULL)
9931  {
9932  VMA_ASSERT(m_FreeList[level].front == node);
9933  m_FreeList[level].front = node->free.next;
9934  }
9935  else
9936  {
9937  Node* const prevFreeNode = node->free.prev;
9938  VMA_ASSERT(prevFreeNode->free.next == node);
9939  prevFreeNode->free.next = node->free.next;
9940  }
9941 
9942  // It is at the back.
9943  if(node->free.next == VMA_NULL)
9944  {
9945  VMA_ASSERT(m_FreeList[level].back == node);
9946  m_FreeList[level].back = node->free.prev;
9947  }
9948  else
9949  {
9950  Node* const nextFreeNode = node->free.next;
9951  VMA_ASSERT(nextFreeNode->free.prev == node);
9952  nextFreeNode->free.prev = node->free.prev;
9953  }
9954 }
9955 
9956 #if VMA_STATS_STRING_ENABLED
9957 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9958 {
9959  switch(node->type)
9960  {
9961  case Node::TYPE_FREE:
9962  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9963  break;
9964  case Node::TYPE_ALLOCATION:
9965  {
9966  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9967  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9968  if(allocSize < levelNodeSize)
9969  {
9970  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9971  }
9972  }
9973  break;
9974  case Node::TYPE_SPLIT:
9975  {
9976  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9977  const Node* const leftChild = node->split.leftChild;
9978  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9979  const Node* const rightChild = leftChild->buddy;
9980  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9981  }
9982  break;
9983  default:
9984  VMA_ASSERT(0);
9985  }
9986 }
9987 #endif // #if VMA_STATS_STRING_ENABLED
9988 
9989 
9991 // class VmaDeviceMemoryBlock
9992 
9993 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9994  m_pMetadata(VMA_NULL),
9995  m_MemoryTypeIndex(UINT32_MAX),
9996  m_Id(0),
9997  m_hMemory(VK_NULL_HANDLE),
9998  m_MapCount(0),
9999  m_pMappedData(VMA_NULL)
10000 {
10001 }
10002 
10003 void VmaDeviceMemoryBlock::Init(
10004  VmaAllocator hAllocator,
10005  uint32_t newMemoryTypeIndex,
10006  VkDeviceMemory newMemory,
10007  VkDeviceSize newSize,
10008  uint32_t id,
10009  uint32_t algorithm)
10010 {
10011  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10012 
10013  m_MemoryTypeIndex = newMemoryTypeIndex;
10014  m_Id = id;
10015  m_hMemory = newMemory;
10016 
10017  switch(algorithm)
10018  {
10020  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10021  break;
10023  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10024  break;
10025  default:
10026  VMA_ASSERT(0);
10027  // Fall-through.
10028  case 0:
10029  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10030  }
10031  m_pMetadata->Init(newSize);
10032 }
10033 
10034 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10035 {
10036  // This is the most important assert in the entire library.
10037  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10038  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10039 
10040  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10041  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10042  m_hMemory = VK_NULL_HANDLE;
10043 
10044  vma_delete(allocator, m_pMetadata);
10045  m_pMetadata = VMA_NULL;
10046 }
10047 
10048 bool VmaDeviceMemoryBlock::Validate() const
10049 {
10050  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10051  (m_pMetadata->GetSize() != 0));
10052 
10053  return m_pMetadata->Validate();
10054 }
10055 
10056 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10057 {
10058  void* pData = nullptr;
10059  VkResult res = Map(hAllocator, 1, &pData);
10060  if(res != VK_SUCCESS)
10061  {
10062  return res;
10063  }
10064 
10065  res = m_pMetadata->CheckCorruption(pData);
10066 
10067  Unmap(hAllocator, 1);
10068 
10069  return res;
10070 }
10071 
10072 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10073 {
10074  if(count == 0)
10075  {
10076  return VK_SUCCESS;
10077  }
10078 
10079  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10080  if(m_MapCount != 0)
10081  {
10082  m_MapCount += count;
10083  VMA_ASSERT(m_pMappedData != VMA_NULL);
10084  if(ppData != VMA_NULL)
10085  {
10086  *ppData = m_pMappedData;
10087  }
10088  return VK_SUCCESS;
10089  }
10090  else
10091  {
10092  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10093  hAllocator->m_hDevice,
10094  m_hMemory,
10095  0, // offset
10096  VK_WHOLE_SIZE,
10097  0, // flags
10098  &m_pMappedData);
10099  if(result == VK_SUCCESS)
10100  {
10101  if(ppData != VMA_NULL)
10102  {
10103  *ppData = m_pMappedData;
10104  }
10105  m_MapCount = count;
10106  }
10107  return result;
10108  }
10109 }
10110 
10111 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10112 {
10113  if(count == 0)
10114  {
10115  return;
10116  }
10117 
10118  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10119  if(m_MapCount >= count)
10120  {
10121  m_MapCount -= count;
10122  if(m_MapCount == 0)
10123  {
10124  m_pMappedData = VMA_NULL;
10125  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10126  }
10127  }
10128  else
10129  {
10130  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10131  }
10132 }
10133 
10134 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10135 {
10136  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10137  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10138 
10139  void* pData;
10140  VkResult res = Map(hAllocator, 1, &pData);
10141  if(res != VK_SUCCESS)
10142  {
10143  return res;
10144  }
10145 
10146  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10147  VmaWriteMagicValue(pData, allocOffset + allocSize);
10148 
10149  Unmap(hAllocator, 1);
10150 
10151  return VK_SUCCESS;
10152 }
10153 
10154 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10155 {
10156  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10157  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10158 
10159  void* pData;
10160  VkResult res = Map(hAllocator, 1, &pData);
10161  if(res != VK_SUCCESS)
10162  {
10163  return res;
10164  }
10165 
10166  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10167  {
10168  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10169  }
10170  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10171  {
10172  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10173  }
10174 
10175  Unmap(hAllocator, 1);
10176 
10177  return VK_SUCCESS;
10178 }
10179 
10180 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10181  const VmaAllocator hAllocator,
10182  const VmaAllocation hAllocation,
10183  VkBuffer hBuffer)
10184 {
10185  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10186  hAllocation->GetBlock() == this);
10187  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10188  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10189  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10190  hAllocator->m_hDevice,
10191  hBuffer,
10192  m_hMemory,
10193  hAllocation->GetOffset());
10194 }
10195 
10196 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10197  const VmaAllocator hAllocator,
10198  const VmaAllocation hAllocation,
10199  VkImage hImage)
10200 {
10201  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10202  hAllocation->GetBlock() == this);
10203  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10204  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10205  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10206  hAllocator->m_hDevice,
10207  hImage,
10208  m_hMemory,
10209  hAllocation->GetOffset());
10210 }
10211 
10212 static void InitStatInfo(VmaStatInfo& outInfo)
10213 {
10214  memset(&outInfo, 0, sizeof(outInfo));
10215  outInfo.allocationSizeMin = UINT64_MAX;
10216  outInfo.unusedRangeSizeMin = UINT64_MAX;
10217 }
10218 
10219 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10220 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10221 {
10222  inoutInfo.blockCount += srcInfo.blockCount;
10223  inoutInfo.allocationCount += srcInfo.allocationCount;
10224  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10225  inoutInfo.usedBytes += srcInfo.usedBytes;
10226  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10227  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10228  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10229  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10230  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10231 }
10232 
10233 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10234 {
10235  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10236  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10237  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10238  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10239 }
10240 
10241 VmaPool_T::VmaPool_T(
10242  VmaAllocator hAllocator,
10243  const VmaPoolCreateInfo& createInfo,
10244  VkDeviceSize preferredBlockSize) :
10245  m_BlockVector(
10246  hAllocator,
10247  createInfo.memoryTypeIndex,
10248  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10249  createInfo.minBlockCount,
10250  createInfo.maxBlockCount,
10251  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10252  createInfo.frameInUseCount,
10253  true, // isCustomPool
10254  createInfo.blockSize != 0, // explicitBlockSize
10255  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10256  m_Id(0)
10257 {
10258 }
10259 
10260 VmaPool_T::~VmaPool_T()
10261 {
10262 }
10263 
10264 #if VMA_STATS_STRING_ENABLED
10265 
10266 #endif // #if VMA_STATS_STRING_ENABLED
10267 
10268 VmaBlockVector::VmaBlockVector(
10269  VmaAllocator hAllocator,
10270  uint32_t memoryTypeIndex,
10271  VkDeviceSize preferredBlockSize,
10272  size_t minBlockCount,
10273  size_t maxBlockCount,
10274  VkDeviceSize bufferImageGranularity,
10275  uint32_t frameInUseCount,
10276  bool isCustomPool,
10277  bool explicitBlockSize,
10278  uint32_t algorithm) :
10279  m_hAllocator(hAllocator),
10280  m_MemoryTypeIndex(memoryTypeIndex),
10281  m_PreferredBlockSize(preferredBlockSize),
10282  m_MinBlockCount(minBlockCount),
10283  m_MaxBlockCount(maxBlockCount),
10284  m_BufferImageGranularity(bufferImageGranularity),
10285  m_FrameInUseCount(frameInUseCount),
10286  m_IsCustomPool(isCustomPool),
10287  m_ExplicitBlockSize(explicitBlockSize),
10288  m_Algorithm(algorithm),
10289  m_HasEmptyBlock(false),
10290  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10291  m_pDefragmentator(VMA_NULL),
10292  m_NextBlockId(0)
10293 {
10294 }
10295 
10296 VmaBlockVector::~VmaBlockVector()
10297 {
10298  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10299 
10300  for(size_t i = m_Blocks.size(); i--; )
10301  {
10302  m_Blocks[i]->Destroy(m_hAllocator);
10303  vma_delete(m_hAllocator, m_Blocks[i]);
10304  }
10305 }
10306 
10307 VkResult VmaBlockVector::CreateMinBlocks()
10308 {
10309  for(size_t i = 0; i < m_MinBlockCount; ++i)
10310  {
10311  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10312  if(res != VK_SUCCESS)
10313  {
10314  return res;
10315  }
10316  }
10317  return VK_SUCCESS;
10318 }
10319 
10320 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10321 {
10322  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10323 
10324  const size_t blockCount = m_Blocks.size();
10325 
10326  pStats->size = 0;
10327  pStats->unusedSize = 0;
10328  pStats->allocationCount = 0;
10329  pStats->unusedRangeCount = 0;
10330  pStats->unusedRangeSizeMax = 0;
10331  pStats->blockCount = blockCount;
10332 
10333  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10334  {
10335  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10336  VMA_ASSERT(pBlock);
10337  VMA_HEAVY_ASSERT(pBlock->Validate());
10338  pBlock->m_pMetadata->AddPoolStats(*pStats);
10339  }
10340 }
10341 
10342 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10343 {
10344  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10345  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10346  (VMA_DEBUG_MARGIN > 0) &&
10347  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10348 }
10349 
10350 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10351 
10352 VkResult VmaBlockVector::Allocate(
10353  VmaPool hCurrentPool,
10354  uint32_t currentFrameIndex,
10355  VkDeviceSize size,
10356  VkDeviceSize alignment,
10357  const VmaAllocationCreateInfo& createInfo,
10358  VmaSuballocationType suballocType,
10359  VmaAllocation* pAllocation)
10360 {
10361  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10362  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10363  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10364  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10365  const bool canCreateNewBlock =
10366  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10367  (m_Blocks.size() < m_MaxBlockCount);
10368  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10369 
10370  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10371  // Which in turn is available only when maxBlockCount = 1.
10372  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10373  {
10374  canMakeOtherLost = false;
10375  }
10376 
10377  // Upper address can only be used with linear allocator and within single memory block.
10378  if(isUpperAddress &&
10379  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10380  {
10381  return VK_ERROR_FEATURE_NOT_PRESENT;
10382  }
10383 
10384  // Validate strategy.
10385  switch(strategy)
10386  {
10387  case 0:
10389  break;
10393  break;
10394  default:
10395  return VK_ERROR_FEATURE_NOT_PRESENT;
10396  }
10397 
10398  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10399  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10400  {
10401  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10402  }
10403 
10404  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10405 
10406  /*
10407  Under certain condition, this whole section can be skipped for optimization, so
10408  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10409  e.g. for custom pools with linear algorithm.
10410  */
10411  if(!canMakeOtherLost || canCreateNewBlock)
10412  {
10413  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10414  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10416 
10417  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10418  {
10419  // Use only last block.
10420  if(!m_Blocks.empty())
10421  {
10422  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10423  VMA_ASSERT(pCurrBlock);
10424  VkResult res = AllocateFromBlock(
10425  pCurrBlock,
10426  hCurrentPool,
10427  currentFrameIndex,
10428  size,
10429  alignment,
10430  allocFlagsCopy,
10431  createInfo.pUserData,
10432  suballocType,
10433  strategy,
10434  pAllocation);
10435  if(res == VK_SUCCESS)
10436  {
10437  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10438  return VK_SUCCESS;
10439  }
10440  }
10441  }
10442  else
10443  {
10445  {
10446  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10447  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10448  {
10449  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10450  VMA_ASSERT(pCurrBlock);
10451  VkResult res = AllocateFromBlock(
10452  pCurrBlock,
10453  hCurrentPool,
10454  currentFrameIndex,
10455  size,
10456  alignment,
10457  allocFlagsCopy,
10458  createInfo.pUserData,
10459  suballocType,
10460  strategy,
10461  pAllocation);
10462  if(res == VK_SUCCESS)
10463  {
10464  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10465  return VK_SUCCESS;
10466  }
10467  }
10468  }
10469  else // WORST_FIT, FIRST_FIT
10470  {
10471  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10472  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10473  {
10474  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10475  VMA_ASSERT(pCurrBlock);
10476  VkResult res = AllocateFromBlock(
10477  pCurrBlock,
10478  hCurrentPool,
10479  currentFrameIndex,
10480  size,
10481  alignment,
10482  allocFlagsCopy,
10483  createInfo.pUserData,
10484  suballocType,
10485  strategy,
10486  pAllocation);
10487  if(res == VK_SUCCESS)
10488  {
10489  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10490  return VK_SUCCESS;
10491  }
10492  }
10493  }
10494  }
10495 
10496  // 2. Try to create new block.
10497  if(canCreateNewBlock)
10498  {
10499  // Calculate optimal size for new block.
10500  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10501  uint32_t newBlockSizeShift = 0;
10502  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10503 
10504  if(!m_ExplicitBlockSize)
10505  {
10506  // Allocate 1/8, 1/4, 1/2 as first blocks.
10507  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10508  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10509  {
10510  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10511  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10512  {
10513  newBlockSize = smallerNewBlockSize;
10514  ++newBlockSizeShift;
10515  }
10516  else
10517  {
10518  break;
10519  }
10520  }
10521  }
10522 
10523  size_t newBlockIndex = 0;
10524  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10525  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10526  if(!m_ExplicitBlockSize)
10527  {
10528  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10529  {
10530  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10531  if(smallerNewBlockSize >= size)
10532  {
10533  newBlockSize = smallerNewBlockSize;
10534  ++newBlockSizeShift;
10535  res = CreateBlock(newBlockSize, &newBlockIndex);
10536  }
10537  else
10538  {
10539  break;
10540  }
10541  }
10542  }
10543 
10544  if(res == VK_SUCCESS)
10545  {
10546  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10547  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10548 
10549  res = AllocateFromBlock(
10550  pBlock,
10551  hCurrentPool,
10552  currentFrameIndex,
10553  size,
10554  alignment,
10555  allocFlagsCopy,
10556  createInfo.pUserData,
10557  suballocType,
10558  strategy,
10559  pAllocation);
10560  if(res == VK_SUCCESS)
10561  {
10562  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10563  return VK_SUCCESS;
10564  }
10565  else
10566  {
10567  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10568  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10569  }
10570  }
10571  }
10572  }
10573 
10574  // 3. Try to allocate from existing blocks with making other allocations lost.
10575  if(canMakeOtherLost)
10576  {
10577  uint32_t tryIndex = 0;
10578  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10579  {
10580  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10581  VmaAllocationRequest bestRequest = {};
10582  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10583 
10584  // 1. Search existing allocations.
10586  {
10587  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10588  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10589  {
10590  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10591  VMA_ASSERT(pCurrBlock);
10592  VmaAllocationRequest currRequest = {};
10593  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10594  currentFrameIndex,
10595  m_FrameInUseCount,
10596  m_BufferImageGranularity,
10597  size,
10598  alignment,
10599  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10600  suballocType,
10601  canMakeOtherLost,
10602  strategy,
10603  &currRequest))
10604  {
10605  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10606  if(pBestRequestBlock == VMA_NULL ||
10607  currRequestCost < bestRequestCost)
10608  {
10609  pBestRequestBlock = pCurrBlock;
10610  bestRequest = currRequest;
10611  bestRequestCost = currRequestCost;
10612 
10613  if(bestRequestCost == 0)
10614  {
10615  break;
10616  }
10617  }
10618  }
10619  }
10620  }
10621  else // WORST_FIT, FIRST_FIT
10622  {
10623  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10624  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10625  {
10626  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10627  VMA_ASSERT(pCurrBlock);
10628  VmaAllocationRequest currRequest = {};
10629  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10630  currentFrameIndex,
10631  m_FrameInUseCount,
10632  m_BufferImageGranularity,
10633  size,
10634  alignment,
10635  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10636  suballocType,
10637  canMakeOtherLost,
10638  strategy,
10639  &currRequest))
10640  {
10641  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10642  if(pBestRequestBlock == VMA_NULL ||
10643  currRequestCost < bestRequestCost ||
10645  {
10646  pBestRequestBlock = pCurrBlock;
10647  bestRequest = currRequest;
10648  bestRequestCost = currRequestCost;
10649 
10650  if(bestRequestCost == 0 ||
10652  {
10653  break;
10654  }
10655  }
10656  }
10657  }
10658  }
10659 
10660  if(pBestRequestBlock != VMA_NULL)
10661  {
10662  if(mapped)
10663  {
10664  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10665  if(res != VK_SUCCESS)
10666  {
10667  return res;
10668  }
10669  }
10670 
10671  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10672  currentFrameIndex,
10673  m_FrameInUseCount,
10674  &bestRequest))
10675  {
10676  // We no longer have an empty Allocation.
10677  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10678  {
10679  m_HasEmptyBlock = false;
10680  }
10681  // Allocate from this pBlock.
10682  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10683  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10684  (*pAllocation)->InitBlockAllocation(
10685  hCurrentPool,
10686  pBestRequestBlock,
10687  bestRequest.offset,
10688  alignment,
10689  size,
10690  suballocType,
10691  mapped,
10692  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10693  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10694  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10695  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10696  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10697  {
10698  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10699  }
10700  if(IsCorruptionDetectionEnabled())
10701  {
10702  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10703  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10704  }
10705  return VK_SUCCESS;
10706  }
10707  // else: Some allocations must have been touched while we are here. Next try.
10708  }
10709  else
10710  {
10711  // Could not find place in any of the blocks - break outer loop.
10712  break;
10713  }
10714  }
10715  /* Maximum number of tries exceeded - a very unlike event when many other
10716  threads are simultaneously touching allocations making it impossible to make
10717  lost at the same time as we try to allocate. */
10718  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10719  {
10720  return VK_ERROR_TOO_MANY_OBJECTS;
10721  }
10722  }
10723 
10724  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10725 }
10726 
10727 void VmaBlockVector::Free(
10728  VmaAllocation hAllocation)
10729 {
10730  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10731 
10732  // Scope for lock.
10733  {
10734  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10735 
10736  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10737 
10738  if(IsCorruptionDetectionEnabled())
10739  {
10740  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10741  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10742  }
10743 
10744  if(hAllocation->IsPersistentMap())
10745  {
10746  pBlock->Unmap(m_hAllocator, 1);
10747  }
10748 
10749  pBlock->m_pMetadata->Free(hAllocation);
10750  VMA_HEAVY_ASSERT(pBlock->Validate());
10751 
10752  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10753 
10754  // pBlock became empty after this deallocation.
10755  if(pBlock->m_pMetadata->IsEmpty())
10756  {
10757  // Already has empty Allocation. We don't want to have two, so delete this one.
10758  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10759  {
10760  pBlockToDelete = pBlock;
10761  Remove(pBlock);
10762  }
10763  // We now have first empty block.
10764  else
10765  {
10766  m_HasEmptyBlock = true;
10767  }
10768  }
10769  // pBlock didn't become empty, but we have another empty block - find and free that one.
10770  // (This is optional, heuristics.)
10771  else if(m_HasEmptyBlock)
10772  {
10773  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10774  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10775  {
10776  pBlockToDelete = pLastBlock;
10777  m_Blocks.pop_back();
10778  m_HasEmptyBlock = false;
10779  }
10780  }
10781 
10782  IncrementallySortBlocks();
10783  }
10784 
10785  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10786  // lock, for performance reason.
10787  if(pBlockToDelete != VMA_NULL)
10788  {
10789  VMA_DEBUG_LOG(" Deleted empty allocation");
10790  pBlockToDelete->Destroy(m_hAllocator);
10791  vma_delete(m_hAllocator, pBlockToDelete);
10792  }
10793 }
10794 
10795 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10796 {
10797  VkDeviceSize result = 0;
10798  for(size_t i = m_Blocks.size(); i--; )
10799  {
10800  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10801  if(result >= m_PreferredBlockSize)
10802  {
10803  break;
10804  }
10805  }
10806  return result;
10807 }
10808 
10809 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10810 {
10811  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10812  {
10813  if(m_Blocks[blockIndex] == pBlock)
10814  {
10815  VmaVectorRemove(m_Blocks, blockIndex);
10816  return;
10817  }
10818  }
10819  VMA_ASSERT(0);
10820 }
10821 
10822 void VmaBlockVector::IncrementallySortBlocks()
10823 {
10824  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10825  {
10826  // Bubble sort only until first swap.
10827  for(size_t i = 1; i < m_Blocks.size(); ++i)
10828  {
10829  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10830  {
10831  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10832  return;
10833  }
10834  }
10835  }
10836 }
10837 
10838 VkResult VmaBlockVector::AllocateFromBlock(
10839  VmaDeviceMemoryBlock* pBlock,
10840  VmaPool hCurrentPool,
10841  uint32_t currentFrameIndex,
10842  VkDeviceSize size,
10843  VkDeviceSize alignment,
10844  VmaAllocationCreateFlags allocFlags,
10845  void* pUserData,
10846  VmaSuballocationType suballocType,
10847  uint32_t strategy,
10848  VmaAllocation* pAllocation)
10849 {
10850  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10851  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10852  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10853  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10854 
10855  VmaAllocationRequest currRequest = {};
10856  if(pBlock->m_pMetadata->CreateAllocationRequest(
10857  currentFrameIndex,
10858  m_FrameInUseCount,
10859  m_BufferImageGranularity,
10860  size,
10861  alignment,
10862  isUpperAddress,
10863  suballocType,
10864  false, // canMakeOtherLost
10865  strategy,
10866  &currRequest))
10867  {
10868  // Allocate from pCurrBlock.
10869  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10870 
10871  if(mapped)
10872  {
10873  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10874  if(res != VK_SUCCESS)
10875  {
10876  return res;
10877  }
10878  }
10879 
10880  // We no longer have an empty Allocation.
10881  if(pBlock->m_pMetadata->IsEmpty())
10882  {
10883  m_HasEmptyBlock = false;
10884  }
10885 
10886  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10887  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10888  (*pAllocation)->InitBlockAllocation(
10889  hCurrentPool,
10890  pBlock,
10891  currRequest.offset,
10892  alignment,
10893  size,
10894  suballocType,
10895  mapped,
10896  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10897  VMA_HEAVY_ASSERT(pBlock->Validate());
10898  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10899  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10900  {
10901  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10902  }
10903  if(IsCorruptionDetectionEnabled())
10904  {
10905  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10906  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10907  }
10908  return VK_SUCCESS;
10909  }
10910  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10911 }
10912 
10913 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10914 {
10915  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10916  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10917  allocInfo.allocationSize = blockSize;
10918  VkDeviceMemory mem = VK_NULL_HANDLE;
10919  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10920  if(res < 0)
10921  {
10922  return res;
10923  }
10924 
10925  // New VkDeviceMemory successfully created.
10926 
10927  // Create new Allocation for it.
10928  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10929  pBlock->Init(
10930  m_hAllocator,
10931  m_MemoryTypeIndex,
10932  mem,
10933  allocInfo.allocationSize,
10934  m_NextBlockId++,
10935  m_Algorithm);
10936 
10937  m_Blocks.push_back(pBlock);
10938  if(pNewBlockIndex != VMA_NULL)
10939  {
10940  *pNewBlockIndex = m_Blocks.size() - 1;
10941  }
10942 
10943  return VK_SUCCESS;
10944 }
10945 
10946 #if VMA_STATS_STRING_ENABLED
10947 
10948 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10949 {
10950  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10951 
10952  json.BeginObject();
10953 
10954  if(m_IsCustomPool)
10955  {
10956  json.WriteString("MemoryTypeIndex");
10957  json.WriteNumber(m_MemoryTypeIndex);
10958 
10959  json.WriteString("BlockSize");
10960  json.WriteNumber(m_PreferredBlockSize);
10961 
10962  json.WriteString("BlockCount");
10963  json.BeginObject(true);
10964  if(m_MinBlockCount > 0)
10965  {
10966  json.WriteString("Min");
10967  json.WriteNumber((uint64_t)m_MinBlockCount);
10968  }
10969  if(m_MaxBlockCount < SIZE_MAX)
10970  {
10971  json.WriteString("Max");
10972  json.WriteNumber((uint64_t)m_MaxBlockCount);
10973  }
10974  json.WriteString("Cur");
10975  json.WriteNumber((uint64_t)m_Blocks.size());
10976  json.EndObject();
10977 
10978  if(m_FrameInUseCount > 0)
10979  {
10980  json.WriteString("FrameInUseCount");
10981  json.WriteNumber(m_FrameInUseCount);
10982  }
10983 
10984  if(m_Algorithm != 0)
10985  {
10986  json.WriteString("Algorithm");
10987  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10988  }
10989  }
10990  else
10991  {
10992  json.WriteString("PreferredBlockSize");
10993  json.WriteNumber(m_PreferredBlockSize);
10994  }
10995 
10996  json.WriteString("Blocks");
10997  json.BeginObject();
10998  for(size_t i = 0; i < m_Blocks.size(); ++i)
10999  {
11000  json.BeginString();
11001  json.ContinueString(m_Blocks[i]->GetId());
11002  json.EndString();
11003 
11004  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11005  }
11006  json.EndObject();
11007 
11008  json.EndObject();
11009 }
11010 
11011 #endif // #if VMA_STATS_STRING_ENABLED
11012 
11013 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11014  VmaAllocator hAllocator,
11015  uint32_t currentFrameIndex)
11016 {
11017  if(m_pDefragmentator == VMA_NULL)
11018  {
11019  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11020  hAllocator,
11021  this,
11022  currentFrameIndex);
11023  }
11024 
11025  return m_pDefragmentator;
11026 }
11027 
11028 VkResult VmaBlockVector::Defragment(
11029  VmaDefragmentationStats* pDefragmentationStats,
11030  VkDeviceSize& maxBytesToMove,
11031  uint32_t& maxAllocationsToMove)
11032 {
11033  if(m_pDefragmentator == VMA_NULL)
11034  {
11035  return VK_SUCCESS;
11036  }
11037 
11038  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11039 
11040  // Defragment.
11041  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11042 
11043  // Accumulate statistics.
11044  if(pDefragmentationStats != VMA_NULL)
11045  {
11046  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11047  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11048  pDefragmentationStats->bytesMoved += bytesMoved;
11049  pDefragmentationStats->allocationsMoved += allocationsMoved;
11050  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11051  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11052  maxBytesToMove -= bytesMoved;
11053  maxAllocationsToMove -= allocationsMoved;
11054  }
11055 
11056  // Free empty blocks.
11057  m_HasEmptyBlock = false;
11058  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11059  {
11060  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11061  if(pBlock->m_pMetadata->IsEmpty())
11062  {
11063  if(m_Blocks.size() > m_MinBlockCount)
11064  {
11065  if(pDefragmentationStats != VMA_NULL)
11066  {
11067  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11068  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11069  }
11070 
11071  VmaVectorRemove(m_Blocks, blockIndex);
11072  pBlock->Destroy(m_hAllocator);
11073  vma_delete(m_hAllocator, pBlock);
11074  }
11075  else
11076  {
11077  m_HasEmptyBlock = true;
11078  }
11079  }
11080  }
11081 
11082  return result;
11083 }
11084 
11085 void VmaBlockVector::DestroyDefragmentator()
11086 {
11087  if(m_pDefragmentator != VMA_NULL)
11088  {
11089  vma_delete(m_hAllocator, m_pDefragmentator);
11090  m_pDefragmentator = VMA_NULL;
11091  }
11092 }
11093 
11094 void VmaBlockVector::MakePoolAllocationsLost(
11095  uint32_t currentFrameIndex,
11096  size_t* pLostAllocationCount)
11097 {
11098  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11099  size_t lostAllocationCount = 0;
11100  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11101  {
11102  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11103  VMA_ASSERT(pBlock);
11104  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11105  }
11106  if(pLostAllocationCount != VMA_NULL)
11107  {
11108  *pLostAllocationCount = lostAllocationCount;
11109  }
11110 }
11111 
11112 VkResult VmaBlockVector::CheckCorruption()
11113 {
11114  if(!IsCorruptionDetectionEnabled())
11115  {
11116  return VK_ERROR_FEATURE_NOT_PRESENT;
11117  }
11118 
11119  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11120  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11121  {
11122  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11123  VMA_ASSERT(pBlock);
11124  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11125  if(res != VK_SUCCESS)
11126  {
11127  return res;
11128  }
11129  }
11130  return VK_SUCCESS;
11131 }
11132 
11133 void VmaBlockVector::AddStats(VmaStats* pStats)
11134 {
11135  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11136  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11137 
11138  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11139 
11140  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11141  {
11142  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11143  VMA_ASSERT(pBlock);
11144  VMA_HEAVY_ASSERT(pBlock->Validate());
11145  VmaStatInfo allocationStatInfo;
11146  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11147  VmaAddStatInfo(pStats->total, allocationStatInfo);
11148  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11149  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11150  }
11151 }
11152 
11154 // VmaDefragmentator members definition
11155 
11156 VmaDefragmentator::VmaDefragmentator(
11157  VmaAllocator hAllocator,
11158  VmaBlockVector* pBlockVector,
11159  uint32_t currentFrameIndex) :
11160  m_hAllocator(hAllocator),
11161  m_pBlockVector(pBlockVector),
11162  m_CurrentFrameIndex(currentFrameIndex),
11163  m_BytesMoved(0),
11164  m_AllocationsMoved(0),
11165  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11166  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11167 {
11168  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11169 }
11170 
11171 VmaDefragmentator::~VmaDefragmentator()
11172 {
11173  for(size_t i = m_Blocks.size(); i--; )
11174  {
11175  vma_delete(m_hAllocator, m_Blocks[i]);
11176  }
11177 }
11178 
11179 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11180 {
11181  AllocationInfo allocInfo;
11182  allocInfo.m_hAllocation = hAlloc;
11183  allocInfo.m_pChanged = pChanged;
11184  m_Allocations.push_back(allocInfo);
11185 }
11186 
11187 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11188 {
11189  // It has already been mapped for defragmentation.
11190  if(m_pMappedDataForDefragmentation)
11191  {
11192  *ppMappedData = m_pMappedDataForDefragmentation;
11193  return VK_SUCCESS;
11194  }
11195 
11196  // It is originally mapped.
11197  if(m_pBlock->GetMappedData())
11198  {
11199  *ppMappedData = m_pBlock->GetMappedData();
11200  return VK_SUCCESS;
11201  }
11202 
11203  // Map on first usage.
11204  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11205  *ppMappedData = m_pMappedDataForDefragmentation;
11206  return res;
11207 }
11208 
11209 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11210 {
11211  if(m_pMappedDataForDefragmentation != VMA_NULL)
11212  {
11213  m_pBlock->Unmap(hAllocator, 1);
11214  }
11215 }
11216 
11217 VkResult VmaDefragmentator::DefragmentRound(
11218  VkDeviceSize maxBytesToMove,
11219  uint32_t maxAllocationsToMove)
11220 {
11221  if(m_Blocks.empty())
11222  {
11223  return VK_SUCCESS;
11224  }
11225 
11226  size_t srcBlockIndex = m_Blocks.size() - 1;
11227  size_t srcAllocIndex = SIZE_MAX;
11228  for(;;)
11229  {
11230  // 1. Find next allocation to move.
11231  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11232  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11233  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11234  {
11235  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11236  {
11237  // Finished: no more allocations to process.
11238  if(srcBlockIndex == 0)
11239  {
11240  return VK_SUCCESS;
11241  }
11242  else
11243  {
11244  --srcBlockIndex;
11245  srcAllocIndex = SIZE_MAX;
11246  }
11247  }
11248  else
11249  {
11250  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11251  }
11252  }
11253 
11254  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11255  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11256 
11257  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11258  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11259  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11260  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11261 
11262  // 2. Try to find new place for this allocation in preceding or current block.
11263  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11264  {
11265  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11266  VmaAllocationRequest dstAllocRequest;
11267  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11268  m_CurrentFrameIndex,
11269  m_pBlockVector->GetFrameInUseCount(),
11270  m_pBlockVector->GetBufferImageGranularity(),
11271  size,
11272  alignment,
11273  false, // upperAddress
11274  suballocType,
11275  false, // canMakeOtherLost
11277  &dstAllocRequest) &&
11278  MoveMakesSense(
11279  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11280  {
11281  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11282 
11283  // Reached limit on number of allocations or bytes to move.
11284  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11285  (m_BytesMoved + size > maxBytesToMove))
11286  {
11287  return VK_INCOMPLETE;
11288  }
11289 
11290  void* pDstMappedData = VMA_NULL;
11291  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11292  if(res != VK_SUCCESS)
11293  {
11294  return res;
11295  }
11296 
11297  void* pSrcMappedData = VMA_NULL;
11298  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11299  if(res != VK_SUCCESS)
11300  {
11301  return res;
11302  }
11303 
11304  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11305  memcpy(
11306  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11307  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11308  static_cast<size_t>(size));
11309 
11310  if(VMA_DEBUG_MARGIN > 0)
11311  {
11312  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11313  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11314  }
11315 
11316  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11317  dstAllocRequest,
11318  suballocType,
11319  size,
11320  false, // upperAddress
11321  allocInfo.m_hAllocation);
11322  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11323 
11324  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11325 
11326  if(allocInfo.m_pChanged != VMA_NULL)
11327  {
11328  *allocInfo.m_pChanged = VK_TRUE;
11329  }
11330 
11331  ++m_AllocationsMoved;
11332  m_BytesMoved += size;
11333 
11334  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11335 
11336  break;
11337  }
11338  }
11339 
11340  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11341 
11342  if(srcAllocIndex > 0)
11343  {
11344  --srcAllocIndex;
11345  }
11346  else
11347  {
11348  if(srcBlockIndex > 0)
11349  {
11350  --srcBlockIndex;
11351  srcAllocIndex = SIZE_MAX;
11352  }
11353  else
11354  {
11355  return VK_SUCCESS;
11356  }
11357  }
11358  }
11359 }
11360 
11361 VkResult VmaDefragmentator::Defragment(
11362  VkDeviceSize maxBytesToMove,
11363  uint32_t maxAllocationsToMove)
11364 {
11365  if(m_Allocations.empty())
11366  {
11367  return VK_SUCCESS;
11368  }
11369 
11370  // Create block info for each block.
11371  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11372  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11373  {
11374  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11375  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11376  m_Blocks.push_back(pBlockInfo);
11377  }
11378 
11379  // Sort them by m_pBlock pointer value.
11380  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11381 
11382  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11383  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11384  {
11385  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11386  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11387  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11388  {
11389  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11390  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11391  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11392  {
11393  (*it)->m_Allocations.push_back(allocInfo);
11394  }
11395  else
11396  {
11397  VMA_ASSERT(0);
11398  }
11399  }
11400  }
11401  m_Allocations.clear();
11402 
11403  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11404  {
11405  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11406  pBlockInfo->CalcHasNonMovableAllocations();
11407  pBlockInfo->SortAllocationsBySizeDescecnding();
11408  }
11409 
11410  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11411  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11412 
11413  // Execute defragmentation rounds (the main part).
11414  VkResult result = VK_SUCCESS;
11415  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11416  {
11417  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11418  }
11419 
11420  // Unmap blocks that were mapped for defragmentation.
11421  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11422  {
11423  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11424  }
11425 
11426  return result;
11427 }
11428 
11429 bool VmaDefragmentator::MoveMakesSense(
11430  size_t dstBlockIndex, VkDeviceSize dstOffset,
11431  size_t srcBlockIndex, VkDeviceSize srcOffset)
11432 {
11433  if(dstBlockIndex < srcBlockIndex)
11434  {
11435  return true;
11436  }
11437  if(dstBlockIndex > srcBlockIndex)
11438  {
11439  return false;
11440  }
11441  if(dstOffset < srcOffset)
11442  {
11443  return true;
11444  }
11445  return false;
11446 }
11447 
11449 // VmaDefragmentationContext
11450 
11451 VmaDefragmentationContext_T::VmaDefragmentationContext_T()
11452 {
11453 }
11454 
11455 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
11456 {
11457 }
11458 
11460 // VmaRecorder
11461 
11462 #if VMA_RECORDING_ENABLED
11463 
11464 VmaRecorder::VmaRecorder() :
11465  m_UseMutex(true),
11466  m_Flags(0),
11467  m_File(VMA_NULL),
11468  m_Freq(INT64_MAX),
11469  m_StartCounter(INT64_MAX)
11470 {
11471 }
11472 
11473 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11474 {
11475  m_UseMutex = useMutex;
11476  m_Flags = settings.flags;
11477 
11478  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11479  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11480 
11481  // Open file for writing.
11482  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11483  if(err != 0)
11484  {
11485  return VK_ERROR_INITIALIZATION_FAILED;
11486  }
11487 
11488  // Write header.
11489  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11490  fprintf(m_File, "%s\n", "1,3");
11491 
11492  return VK_SUCCESS;
11493 }
11494 
11495 VmaRecorder::~VmaRecorder()
11496 {
11497  if(m_File != VMA_NULL)
11498  {
11499  fclose(m_File);
11500  }
11501 }
11502 
11503 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11504 {
11505  CallParams callParams;
11506  GetBasicParams(callParams);
11507 
11508  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11509  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11510  Flush();
11511 }
11512 
11513 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11514 {
11515  CallParams callParams;
11516  GetBasicParams(callParams);
11517 
11518  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11519  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11520  Flush();
11521 }
11522 
11523 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11524 {
11525  CallParams callParams;
11526  GetBasicParams(callParams);
11527 
11528  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11529  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11530  createInfo.memoryTypeIndex,
11531  createInfo.flags,
11532  createInfo.blockSize,
11533  (uint64_t)createInfo.minBlockCount,
11534  (uint64_t)createInfo.maxBlockCount,
11535  createInfo.frameInUseCount,
11536  pool);
11537  Flush();
11538 }
11539 
11540 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11541 {
11542  CallParams callParams;
11543  GetBasicParams(callParams);
11544 
11545  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11546  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11547  pool);
11548  Flush();
11549 }
11550 
11551 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11552  const VkMemoryRequirements& vkMemReq,
11553  const VmaAllocationCreateInfo& createInfo,
11554  VmaAllocation allocation)
11555 {
11556  CallParams callParams;
11557  GetBasicParams(callParams);
11558 
11559  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11560  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11561  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11562  vkMemReq.size,
11563  vkMemReq.alignment,
11564  vkMemReq.memoryTypeBits,
11565  createInfo.flags,
11566  createInfo.usage,
11567  createInfo.requiredFlags,
11568  createInfo.preferredFlags,
11569  createInfo.memoryTypeBits,
11570  createInfo.pool,
11571  allocation,
11572  userDataStr.GetString());
11573  Flush();
11574 }
11575 
11576 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11577  const VkMemoryRequirements& vkMemReq,
11578  bool requiresDedicatedAllocation,
11579  bool prefersDedicatedAllocation,
11580  const VmaAllocationCreateInfo& createInfo,
11581  VmaAllocation allocation)
11582 {
11583  CallParams callParams;
11584  GetBasicParams(callParams);
11585 
11586  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11587  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11588  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11589  vkMemReq.size,
11590  vkMemReq.alignment,
11591  vkMemReq.memoryTypeBits,
11592  requiresDedicatedAllocation ? 1 : 0,
11593  prefersDedicatedAllocation ? 1 : 0,
11594  createInfo.flags,
11595  createInfo.usage,
11596  createInfo.requiredFlags,
11597  createInfo.preferredFlags,
11598  createInfo.memoryTypeBits,
11599  createInfo.pool,
11600  allocation,
11601  userDataStr.GetString());
11602  Flush();
11603 }
11604 
11605 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11606  const VkMemoryRequirements& vkMemReq,
11607  bool requiresDedicatedAllocation,
11608  bool prefersDedicatedAllocation,
11609  const VmaAllocationCreateInfo& createInfo,
11610  VmaAllocation allocation)
11611 {
11612  CallParams callParams;
11613  GetBasicParams(callParams);
11614 
11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11616  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11617  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11618  vkMemReq.size,
11619  vkMemReq.alignment,
11620  vkMemReq.memoryTypeBits,
11621  requiresDedicatedAllocation ? 1 : 0,
11622  prefersDedicatedAllocation ? 1 : 0,
11623  createInfo.flags,
11624  createInfo.usage,
11625  createInfo.requiredFlags,
11626  createInfo.preferredFlags,
11627  createInfo.memoryTypeBits,
11628  createInfo.pool,
11629  allocation,
11630  userDataStr.GetString());
11631  Flush();
11632 }
11633 
11634 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11635  VmaAllocation allocation)
11636 {
11637  CallParams callParams;
11638  GetBasicParams(callParams);
11639 
11640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11641  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11642  allocation);
11643  Flush();
11644 }
11645 
11646 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11647  VmaAllocation allocation,
11648  const void* pUserData)
11649 {
11650  CallParams callParams;
11651  GetBasicParams(callParams);
11652 
11653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11654  UserDataString userDataStr(
11655  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11656  pUserData);
11657  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11658  allocation,
11659  userDataStr.GetString());
11660  Flush();
11661 }
11662 
11663 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11664  VmaAllocation allocation)
11665 {
11666  CallParams callParams;
11667  GetBasicParams(callParams);
11668 
11669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11670  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11671  allocation);
11672  Flush();
11673 }
11674 
11675 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11676  VmaAllocation allocation)
11677 {
11678  CallParams callParams;
11679  GetBasicParams(callParams);
11680 
11681  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11682  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11683  allocation);
11684  Flush();
11685 }
11686 
11687 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11688  VmaAllocation allocation)
11689 {
11690  CallParams callParams;
11691  GetBasicParams(callParams);
11692 
11693  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11694  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11695  allocation);
11696  Flush();
11697 }
11698 
11699 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11700  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11701 {
11702  CallParams callParams;
11703  GetBasicParams(callParams);
11704 
11705  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11706  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11707  allocation,
11708  offset,
11709  size);
11710  Flush();
11711 }
11712 
11713 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11714  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11715 {
11716  CallParams callParams;
11717  GetBasicParams(callParams);
11718 
11719  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11720  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11721  allocation,
11722  offset,
11723  size);
11724  Flush();
11725 }
11726 
11727 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11728  const VkBufferCreateInfo& bufCreateInfo,
11729  const VmaAllocationCreateInfo& allocCreateInfo,
11730  VmaAllocation allocation)
11731 {
11732  CallParams callParams;
11733  GetBasicParams(callParams);
11734 
11735  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11736  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11737  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11738  bufCreateInfo.flags,
11739  bufCreateInfo.size,
11740  bufCreateInfo.usage,
11741  bufCreateInfo.sharingMode,
11742  allocCreateInfo.flags,
11743  allocCreateInfo.usage,
11744  allocCreateInfo.requiredFlags,
11745  allocCreateInfo.preferredFlags,
11746  allocCreateInfo.memoryTypeBits,
11747  allocCreateInfo.pool,
11748  allocation,
11749  userDataStr.GetString());
11750  Flush();
11751 }
11752 
11753 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11754  const VkImageCreateInfo& imageCreateInfo,
11755  const VmaAllocationCreateInfo& allocCreateInfo,
11756  VmaAllocation allocation)
11757 {
11758  CallParams callParams;
11759  GetBasicParams(callParams);
11760 
11761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11762  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11763  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11764  imageCreateInfo.flags,
11765  imageCreateInfo.imageType,
11766  imageCreateInfo.format,
11767  imageCreateInfo.extent.width,
11768  imageCreateInfo.extent.height,
11769  imageCreateInfo.extent.depth,
11770  imageCreateInfo.mipLevels,
11771  imageCreateInfo.arrayLayers,
11772  imageCreateInfo.samples,
11773  imageCreateInfo.tiling,
11774  imageCreateInfo.usage,
11775  imageCreateInfo.sharingMode,
11776  imageCreateInfo.initialLayout,
11777  allocCreateInfo.flags,
11778  allocCreateInfo.usage,
11779  allocCreateInfo.requiredFlags,
11780  allocCreateInfo.preferredFlags,
11781  allocCreateInfo.memoryTypeBits,
11782  allocCreateInfo.pool,
11783  allocation,
11784  userDataStr.GetString());
11785  Flush();
11786 }
11787 
11788 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11789  VmaAllocation allocation)
11790 {
11791  CallParams callParams;
11792  GetBasicParams(callParams);
11793 
11794  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11795  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11796  allocation);
11797  Flush();
11798 }
11799 
11800 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11801  VmaAllocation allocation)
11802 {
11803  CallParams callParams;
11804  GetBasicParams(callParams);
11805 
11806  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11807  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11808  allocation);
11809  Flush();
11810 }
11811 
11812 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11813  VmaAllocation allocation)
11814 {
11815  CallParams callParams;
11816  GetBasicParams(callParams);
11817 
11818  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11819  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11820  allocation);
11821  Flush();
11822 }
11823 
11824 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11825  VmaAllocation allocation)
11826 {
11827  CallParams callParams;
11828  GetBasicParams(callParams);
11829 
11830  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11831  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11832  allocation);
11833  Flush();
11834 }
11835 
11836 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11837  VmaPool pool)
11838 {
11839  CallParams callParams;
11840  GetBasicParams(callParams);
11841 
11842  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11843  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11844  pool);
11845  Flush();
11846 }
11847 
11848 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11849 {
11850  if(pUserData != VMA_NULL)
11851  {
11852  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11853  {
11854  m_Str = (const char*)pUserData;
11855  }
11856  else
11857  {
11858  sprintf_s(m_PtrStr, "%p", pUserData);
11859  m_Str = m_PtrStr;
11860  }
11861  }
11862  else
11863  {
11864  m_Str = "";
11865  }
11866 }
11867 
11868 void VmaRecorder::WriteConfiguration(
11869  const VkPhysicalDeviceProperties& devProps,
11870  const VkPhysicalDeviceMemoryProperties& memProps,
11871  bool dedicatedAllocationExtensionEnabled)
11872 {
11873  fprintf(m_File, "Config,Begin\n");
11874 
11875  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11876  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11877  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11878  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11879  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11880  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11881 
11882  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11883  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11884  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11885 
11886  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11887  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11888  {
11889  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11890  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11891  }
11892  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11893  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11894  {
11895  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11896  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11897  }
11898 
11899  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11900 
11901  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11902  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11903  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11904  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11905  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11906  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11907  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11908  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11909  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11910 
11911  fprintf(m_File, "Config,End\n");
11912 }
11913 
11914 void VmaRecorder::GetBasicParams(CallParams& outParams)
11915 {
11916  outParams.threadId = GetCurrentThreadId();
11917 
11918  LARGE_INTEGER counter;
11919  QueryPerformanceCounter(&counter);
11920  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11921 }
11922 
11923 void VmaRecorder::Flush()
11924 {
11925  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11926  {
11927  fflush(m_File);
11928  }
11929 }
11930 
11931 #endif // #if VMA_RECORDING_ENABLED
11932 
11934 // VmaAllocator_T
11935 
11936 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11937  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11938  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11939  m_hDevice(pCreateInfo->device),
11940  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11941  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11942  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11943  m_PreferredLargeHeapBlockSize(0),
11944  m_PhysicalDevice(pCreateInfo->physicalDevice),
11945  m_CurrentFrameIndex(0),
11946  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11947  m_NextPoolId(0)
11949  ,m_pRecorder(VMA_NULL)
11950 #endif
11951 {
11952  if(VMA_DEBUG_DETECT_CORRUPTION)
11953  {
11954  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11955  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11956  }
11957 
11958  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11959 
11960 #if !(VMA_DEDICATED_ALLOCATION)
11962  {
11963  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11964  }
11965 #endif
11966 
11967  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11968  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11969  memset(&m_MemProps, 0, sizeof(m_MemProps));
11970 
11971  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11972  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11973 
11974  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11975  {
11976  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11977  }
11978 
11979  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11980  {
11981  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11982  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11983  }
11984 
11985  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11986 
11987  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11988  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11989 
11990  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11991  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11992  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11993  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11994 
11995  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11996  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11997 
11998  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11999  {
12000  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12001  {
12002  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12003  if(limit != VK_WHOLE_SIZE)
12004  {
12005  m_HeapSizeLimit[heapIndex] = limit;
12006  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12007  {
12008  m_MemProps.memoryHeaps[heapIndex].size = limit;
12009  }
12010  }
12011  }
12012  }
12013 
12014  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12015  {
12016  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12017 
12018  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12019  this,
12020  memTypeIndex,
12021  preferredBlockSize,
12022  0,
12023  SIZE_MAX,
12024  GetBufferImageGranularity(),
12025  pCreateInfo->frameInUseCount,
12026  false, // isCustomPool
12027  false, // explicitBlockSize
12028  false); // linearAlgorithm
12029  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12030  // becase minBlockCount is 0.
12031  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12032 
12033  }
12034 }
12035 
12036 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12037 {
12038  VkResult res = VK_SUCCESS;
12039 
12040  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12041  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12042  {
12043 #if VMA_RECORDING_ENABLED
12044  m_pRecorder = vma_new(this, VmaRecorder)();
12045  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12046  if(res != VK_SUCCESS)
12047  {
12048  return res;
12049  }
12050  m_pRecorder->WriteConfiguration(
12051  m_PhysicalDeviceProperties,
12052  m_MemProps,
12053  m_UseKhrDedicatedAllocation);
12054  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12055 #else
12056  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12057  return VK_ERROR_FEATURE_NOT_PRESENT;
12058 #endif
12059  }
12060 
12061  return res;
12062 }
12063 
12064 VmaAllocator_T::~VmaAllocator_T()
12065 {
12066 #if VMA_RECORDING_ENABLED
12067  if(m_pRecorder != VMA_NULL)
12068  {
12069  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12070  vma_delete(this, m_pRecorder);
12071  }
12072 #endif
12073 
12074  VMA_ASSERT(m_Pools.empty());
12075 
12076  for(size_t i = GetMemoryTypeCount(); i--; )
12077  {
12078  vma_delete(this, m_pDedicatedAllocations[i]);
12079  vma_delete(this, m_pBlockVectors[i]);
12080  }
12081 }
12082 
12083 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12084 {
12085 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12086  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12087  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12088  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12089  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12090  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12091  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12092  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12093  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12094  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12095  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12096  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12097  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12098  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12099  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12100  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12101  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12102 #if VMA_DEDICATED_ALLOCATION
12103  if(m_UseKhrDedicatedAllocation)
12104  {
12105  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12106  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12107  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12108  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12109  }
12110 #endif // #if VMA_DEDICATED_ALLOCATION
12111 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12112 
12113 #define VMA_COPY_IF_NOT_NULL(funcName) \
12114  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12115 
12116  if(pVulkanFunctions != VMA_NULL)
12117  {
12118  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12119  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12120  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12121  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12122  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12123  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12124  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12125  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12126  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12127  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12128  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12129  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12130  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12131  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12132  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12133  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12134 #if VMA_DEDICATED_ALLOCATION
12135  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12136  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12137 #endif
12138  }
12139 
12140 #undef VMA_COPY_IF_NOT_NULL
12141 
12142  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12143  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12144  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12145  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12146  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12147  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12148  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12149  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12150  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12151  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12152  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12153  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12154  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12155  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12156  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12157  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12158  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12159  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12160 #if VMA_DEDICATED_ALLOCATION
12161  if(m_UseKhrDedicatedAllocation)
12162  {
12163  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12164  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12165  }
12166 #endif
12167 }
12168 
12169 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12170 {
12171  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12172  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12173  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12174  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12175 }
12176 
12177 VkResult VmaAllocator_T::AllocateMemoryOfType(
12178  VkDeviceSize size,
12179  VkDeviceSize alignment,
12180  bool dedicatedAllocation,
12181  VkBuffer dedicatedBuffer,
12182  VkImage dedicatedImage,
12183  const VmaAllocationCreateInfo& createInfo,
12184  uint32_t memTypeIndex,
12185  VmaSuballocationType suballocType,
12186  VmaAllocation* pAllocation)
12187 {
12188  VMA_ASSERT(pAllocation != VMA_NULL);
12189  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12190 
12191  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12192 
12193  // If memory type is not HOST_VISIBLE, disable MAPPED.
12194  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12195  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12196  {
12197  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12198  }
12199 
12200  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12201  VMA_ASSERT(blockVector);
12202 
12203  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12204  bool preferDedicatedMemory =
12205  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12206  dedicatedAllocation ||
12207  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12208  size > preferredBlockSize / 2;
12209 
12210  if(preferDedicatedMemory &&
12211  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12212  finalCreateInfo.pool == VK_NULL_HANDLE)
12213  {
12215  }
12216 
12217  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12218  {
12219  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12220  {
12221  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12222  }
12223  else
12224  {
12225  return AllocateDedicatedMemory(
12226  size,
12227  suballocType,
12228  memTypeIndex,
12229  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12230  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12231  finalCreateInfo.pUserData,
12232  dedicatedBuffer,
12233  dedicatedImage,
12234  pAllocation);
12235  }
12236  }
12237  else
12238  {
12239  VkResult res = blockVector->Allocate(
12240  VK_NULL_HANDLE, // hCurrentPool
12241  m_CurrentFrameIndex.load(),
12242  size,
12243  alignment,
12244  finalCreateInfo,
12245  suballocType,
12246  pAllocation);
12247  if(res == VK_SUCCESS)
12248  {
12249  return res;
12250  }
12251 
12252  // 5. Try dedicated memory.
12253  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12254  {
12255  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12256  }
12257  else
12258  {
12259  res = AllocateDedicatedMemory(
12260  size,
12261  suballocType,
12262  memTypeIndex,
12263  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12264  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12265  finalCreateInfo.pUserData,
12266  dedicatedBuffer,
12267  dedicatedImage,
12268  pAllocation);
12269  if(res == VK_SUCCESS)
12270  {
12271  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12272  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12273  return VK_SUCCESS;
12274  }
12275  else
12276  {
12277  // Everything failed: Return error code.
12278  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12279  return res;
12280  }
12281  }
12282  }
12283 }
12284 
12285 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12286  VkDeviceSize size,
12287  VmaSuballocationType suballocType,
12288  uint32_t memTypeIndex,
12289  bool map,
12290  bool isUserDataString,
12291  void* pUserData,
12292  VkBuffer dedicatedBuffer,
12293  VkImage dedicatedImage,
12294  VmaAllocation* pAllocation)
12295 {
12296  VMA_ASSERT(pAllocation);
12297 
12298  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12299  allocInfo.memoryTypeIndex = memTypeIndex;
12300  allocInfo.allocationSize = size;
12301 
12302 #if VMA_DEDICATED_ALLOCATION
12303  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12304  if(m_UseKhrDedicatedAllocation)
12305  {
12306  if(dedicatedBuffer != VK_NULL_HANDLE)
12307  {
12308  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12309  dedicatedAllocInfo.buffer = dedicatedBuffer;
12310  allocInfo.pNext = &dedicatedAllocInfo;
12311  }
12312  else if(dedicatedImage != VK_NULL_HANDLE)
12313  {
12314  dedicatedAllocInfo.image = dedicatedImage;
12315  allocInfo.pNext = &dedicatedAllocInfo;
12316  }
12317  }
12318 #endif // #if VMA_DEDICATED_ALLOCATION
12319 
12320  // Allocate VkDeviceMemory.
12321  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12322  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12323  if(res < 0)
12324  {
12325  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12326  return res;
12327  }
12328 
12329  void* pMappedData = VMA_NULL;
12330  if(map)
12331  {
12332  res = (*m_VulkanFunctions.vkMapMemory)(
12333  m_hDevice,
12334  hMemory,
12335  0,
12336  VK_WHOLE_SIZE,
12337  0,
12338  &pMappedData);
12339  if(res < 0)
12340  {
12341  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12342  FreeVulkanMemory(memTypeIndex, size, hMemory);
12343  return res;
12344  }
12345  }
12346 
12347  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12348  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12349  (*pAllocation)->SetUserData(this, pUserData);
12350  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12351  {
12352  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12353  }
12354 
12355  // Register it in m_pDedicatedAllocations.
12356  {
12357  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12358  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12359  VMA_ASSERT(pDedicatedAllocations);
12360  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12361  }
12362 
12363  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12364 
12365  return VK_SUCCESS;
12366 }
12367 
12368 void VmaAllocator_T::GetBufferMemoryRequirements(
12369  VkBuffer hBuffer,
12370  VkMemoryRequirements& memReq,
12371  bool& requiresDedicatedAllocation,
12372  bool& prefersDedicatedAllocation) const
12373 {
12374 #if VMA_DEDICATED_ALLOCATION
12375  if(m_UseKhrDedicatedAllocation)
12376  {
12377  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12378  memReqInfo.buffer = hBuffer;
12379 
12380  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12381 
12382  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12383  memReq2.pNext = &memDedicatedReq;
12384 
12385  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12386 
12387  memReq = memReq2.memoryRequirements;
12388  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12389  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12390  }
12391  else
12392 #endif // #if VMA_DEDICATED_ALLOCATION
12393  {
12394  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12395  requiresDedicatedAllocation = false;
12396  prefersDedicatedAllocation = false;
12397  }
12398 }
12399 
12400 void VmaAllocator_T::GetImageMemoryRequirements(
12401  VkImage hImage,
12402  VkMemoryRequirements& memReq,
12403  bool& requiresDedicatedAllocation,
12404  bool& prefersDedicatedAllocation) const
12405 {
12406 #if VMA_DEDICATED_ALLOCATION
12407  if(m_UseKhrDedicatedAllocation)
12408  {
12409  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12410  memReqInfo.image = hImage;
12411 
12412  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12413 
12414  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12415  memReq2.pNext = &memDedicatedReq;
12416 
12417  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12418 
12419  memReq = memReq2.memoryRequirements;
12420  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12421  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12422  }
12423  else
12424 #endif // #if VMA_DEDICATED_ALLOCATION
12425  {
12426  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12427  requiresDedicatedAllocation = false;
12428  prefersDedicatedAllocation = false;
12429  }
12430 }
12431 
12432 VkResult VmaAllocator_T::AllocateMemory(
12433  const VkMemoryRequirements& vkMemReq,
12434  bool requiresDedicatedAllocation,
12435  bool prefersDedicatedAllocation,
12436  VkBuffer dedicatedBuffer,
12437  VkImage dedicatedImage,
12438  const VmaAllocationCreateInfo& createInfo,
12439  VmaSuballocationType suballocType,
12440  VmaAllocation* pAllocation)
12441 {
12442  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12443 
12444  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12445  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12446  {
12447  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12449  }
12450  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12452  {
12453  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12455  }
12456  if(requiresDedicatedAllocation)
12457  {
12458  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12459  {
12460  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12461  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12462  }
12463  if(createInfo.pool != VK_NULL_HANDLE)
12464  {
12465  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12467  }
12468  }
12469  if((createInfo.pool != VK_NULL_HANDLE) &&
12470  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12471  {
12472  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12473  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12474  }
12475 
12476  if(createInfo.pool != VK_NULL_HANDLE)
12477  {
12478  const VkDeviceSize alignmentForPool = VMA_MAX(
12479  vkMemReq.alignment,
12480  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12481  return createInfo.pool->m_BlockVector.Allocate(
12482  createInfo.pool,
12483  m_CurrentFrameIndex.load(),
12484  vkMemReq.size,
12485  alignmentForPool,
12486  createInfo,
12487  suballocType,
12488  pAllocation);
12489  }
12490  else
12491  {
12492  // Bit mask of memory Vulkan types acceptable for this allocation.
12493  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12494  uint32_t memTypeIndex = UINT32_MAX;
12495  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12496  if(res == VK_SUCCESS)
12497  {
12498  VkDeviceSize alignmentForMemType = VMA_MAX(
12499  vkMemReq.alignment,
12500  GetMemoryTypeMinAlignment(memTypeIndex));
12501 
12502  res = AllocateMemoryOfType(
12503  vkMemReq.size,
12504  alignmentForMemType,
12505  requiresDedicatedAllocation || prefersDedicatedAllocation,
12506  dedicatedBuffer,
12507  dedicatedImage,
12508  createInfo,
12509  memTypeIndex,
12510  suballocType,
12511  pAllocation);
12512  // Succeeded on first try.
12513  if(res == VK_SUCCESS)
12514  {
12515  return res;
12516  }
12517  // Allocation from this memory type failed. Try other compatible memory types.
12518  else
12519  {
12520  for(;;)
12521  {
12522  // Remove old memTypeIndex from list of possibilities.
12523  memoryTypeBits &= ~(1u << memTypeIndex);
12524  // Find alternative memTypeIndex.
12525  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12526  if(res == VK_SUCCESS)
12527  {
12528  alignmentForMemType = VMA_MAX(
12529  vkMemReq.alignment,
12530  GetMemoryTypeMinAlignment(memTypeIndex));
12531 
12532  res = AllocateMemoryOfType(
12533  vkMemReq.size,
12534  alignmentForMemType,
12535  requiresDedicatedAllocation || prefersDedicatedAllocation,
12536  dedicatedBuffer,
12537  dedicatedImage,
12538  createInfo,
12539  memTypeIndex,
12540  suballocType,
12541  pAllocation);
12542  // Allocation from this alternative memory type succeeded.
12543  if(res == VK_SUCCESS)
12544  {
12545  return res;
12546  }
12547  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12548  }
12549  // No other matching memory type index could be found.
12550  else
12551  {
12552  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12553  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12554  }
12555  }
12556  }
12557  }
12558  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12559  else
12560  return res;
12561  }
12562 }
12563 
12564 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12565 {
12566  VMA_ASSERT(allocation);
12567 
12568  if(TouchAllocation(allocation))
12569  {
12570  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12571  {
12572  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12573  }
12574 
12575  switch(allocation->GetType())
12576  {
12577  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12578  {
12579  VmaBlockVector* pBlockVector = VMA_NULL;
12580  VmaPool hPool = allocation->GetPool();
12581  if(hPool != VK_NULL_HANDLE)
12582  {
12583  pBlockVector = &hPool->m_BlockVector;
12584  }
12585  else
12586  {
12587  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12588  pBlockVector = m_pBlockVectors[memTypeIndex];
12589  }
12590  pBlockVector->Free(allocation);
12591  }
12592  break;
12593  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12594  FreeDedicatedMemory(allocation);
12595  break;
12596  default:
12597  VMA_ASSERT(0);
12598  }
12599  }
12600 
12601  allocation->SetUserData(this, VMA_NULL);
12602  vma_delete(this, allocation);
12603 }
12604 
12605 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12606 {
12607  // Initialize.
12608  InitStatInfo(pStats->total);
12609  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12610  InitStatInfo(pStats->memoryType[i]);
12611  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12612  InitStatInfo(pStats->memoryHeap[i]);
12613 
12614  // Process default pools.
12615  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12616  {
12617  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12618  VMA_ASSERT(pBlockVector);
12619  pBlockVector->AddStats(pStats);
12620  }
12621 
12622  // Process custom pools.
12623  {
12624  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12625  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12626  {
12627  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12628  }
12629  }
12630 
12631  // Process dedicated allocations.
12632  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12633  {
12634  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12635  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12636  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12637  VMA_ASSERT(pDedicatedAllocVector);
12638  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12639  {
12640  VmaStatInfo allocationStatInfo;
12641  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12642  VmaAddStatInfo(pStats->total, allocationStatInfo);
12643  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12644  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12645  }
12646  }
12647 
12648  // Postprocess.
12649  VmaPostprocessCalcStatInfo(pStats->total);
12650  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12651  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12652  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12653  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12654 }
12655 
12656 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12657 
12658 VkResult VmaAllocator_T::DefragmentationBegin(
12659  const VmaDefragmentationInfo2& info,
12660  VmaDefragmentationStats* pStats,
12661  VmaDefragmentationContext* pContext)
12662 {
12663  if(info.pAllocationsChanged != VMA_NULL)
12664  {
12665  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
12666  }
12667  if(pStats != VMA_NULL)
12668  {
12669  memset(pStats, 0, sizeof(VmaDefragmentationStats));
12670  }
12671 
12672  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12673 
12674  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12675 
12676  const size_t poolCount = m_Pools.size();
12677 
12678  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12679  for(size_t allocIndex = 0; allocIndex < info.allocationCount; ++allocIndex)
12680  {
12681  VmaAllocation hAlloc = info.pAllocations[allocIndex];
12682  VMA_ASSERT(hAlloc);
12683  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12684  // DedicatedAlloc cannot be defragmented.
12685  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12686  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12687  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12688  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12689  // Lost allocation cannot be defragmented.
12690  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12691  {
12692  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12693 
12694  const VmaPool hAllocPool = hAlloc->GetPool();
12695  // This allocation belongs to custom pool.
12696  if(hAllocPool != VK_NULL_HANDLE)
12697  {
12698  // Pools with linear or buddy algorithm are not defragmented.
12699  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12700  {
12701  pAllocBlockVector = &hAllocPool->m_BlockVector;
12702  }
12703  }
12704  // This allocation belongs to general pool.
12705  else
12706  {
12707  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12708  }
12709 
12710  if(pAllocBlockVector != VMA_NULL)
12711  {
12712  VmaDefragmentator* const pDefragmentator =
12713  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12714  VkBool32* const pChanged = (info.pAllocationsChanged != VMA_NULL) ?
12715  &info.pAllocationsChanged[allocIndex] : VMA_NULL;
12716  pDefragmentator->AddAllocation(hAlloc, pChanged);
12717  }
12718  }
12719  }
12720 
12721  VkResult result = VK_SUCCESS;
12722 
12723  // ======== Main processing.
12724 
12725  VkDeviceSize maxBytesToMove = info.maxCpuBytesToMove;
12726  uint32_t maxAllocationsToMove = info.maxCpuAllocationsToMove;
12727 
12728  // Process standard memory.
12729  for(uint32_t memTypeIndex = 0;
12730  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12731  ++memTypeIndex)
12732  {
12733  // Only HOST_VISIBLE memory types can be defragmented.
12734  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12735  {
12736  result = m_pBlockVectors[memTypeIndex]->Defragment(
12737  pStats,
12738  maxBytesToMove,
12739  maxAllocationsToMove);
12740  }
12741  }
12742 
12743  // Process custom pools.
12744  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12745  {
12746  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12747  pStats,
12748  maxBytesToMove,
12749  maxAllocationsToMove);
12750  }
12751 
12752  // ======== Destroy defragmentators.
12753 
12754  // Process custom pools.
12755  for(size_t poolIndex = poolCount; poolIndex--; )
12756  {
12757  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12758  }
12759 
12760  // Process standard memory.
12761  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12762  {
12763  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12764  {
12765  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12766  }
12767  }
12768 
12769  return result;
12770 }
12771 
12772 VkResult VmaAllocator_T::DefragmentationEnd(
12773  VmaDefragmentationContext context)
12774 {
12775  vma_delete(this, context);
12776  return VK_SUCCESS;
12777 }
12778 
12779 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12780 {
12781  if(hAllocation->CanBecomeLost())
12782  {
12783  /*
12784  Warning: This is a carefully designed algorithm.
12785  Do not modify unless you really know what you're doing :)
12786  */
12787  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12788  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12789  for(;;)
12790  {
12791  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12792  {
12793  pAllocationInfo->memoryType = UINT32_MAX;
12794  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12795  pAllocationInfo->offset = 0;
12796  pAllocationInfo->size = hAllocation->GetSize();
12797  pAllocationInfo->pMappedData = VMA_NULL;
12798  pAllocationInfo->pUserData = hAllocation->GetUserData();
12799  return;
12800  }
12801  else if(localLastUseFrameIndex == localCurrFrameIndex)
12802  {
12803  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12804  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12805  pAllocationInfo->offset = hAllocation->GetOffset();
12806  pAllocationInfo->size = hAllocation->GetSize();
12807  pAllocationInfo->pMappedData = VMA_NULL;
12808  pAllocationInfo->pUserData = hAllocation->GetUserData();
12809  return;
12810  }
12811  else // Last use time earlier than current time.
12812  {
12813  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12814  {
12815  localLastUseFrameIndex = localCurrFrameIndex;
12816  }
12817  }
12818  }
12819  }
12820  else
12821  {
12822 #if VMA_STATS_STRING_ENABLED
12823  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12824  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12825  for(;;)
12826  {
12827  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12828  if(localLastUseFrameIndex == localCurrFrameIndex)
12829  {
12830  break;
12831  }
12832  else // Last use time earlier than current time.
12833  {
12834  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12835  {
12836  localLastUseFrameIndex = localCurrFrameIndex;
12837  }
12838  }
12839  }
12840 #endif
12841 
12842  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12843  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12844  pAllocationInfo->offset = hAllocation->GetOffset();
12845  pAllocationInfo->size = hAllocation->GetSize();
12846  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12847  pAllocationInfo->pUserData = hAllocation->GetUserData();
12848  }
12849 }
12850 
12851 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12852 {
12853  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12854  if(hAllocation->CanBecomeLost())
12855  {
12856  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12857  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12858  for(;;)
12859  {
12860  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12861  {
12862  return false;
12863  }
12864  else if(localLastUseFrameIndex == localCurrFrameIndex)
12865  {
12866  return true;
12867  }
12868  else // Last use time earlier than current time.
12869  {
12870  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12871  {
12872  localLastUseFrameIndex = localCurrFrameIndex;
12873  }
12874  }
12875  }
12876  }
12877  else
12878  {
12879 #if VMA_STATS_STRING_ENABLED
12880  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12881  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12882  for(;;)
12883  {
12884  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12885  if(localLastUseFrameIndex == localCurrFrameIndex)
12886  {
12887  break;
12888  }
12889  else // Last use time earlier than current time.
12890  {
12891  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12892  {
12893  localLastUseFrameIndex = localCurrFrameIndex;
12894  }
12895  }
12896  }
12897 #endif
12898 
12899  return true;
12900  }
12901 }
12902 
12903 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12904 {
12905  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12906 
12907  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12908 
12909  if(newCreateInfo.maxBlockCount == 0)
12910  {
12911  newCreateInfo.maxBlockCount = SIZE_MAX;
12912  }
12913  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12914  {
12915  return VK_ERROR_INITIALIZATION_FAILED;
12916  }
12917 
12918  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12919 
12920  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12921 
12922  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12923  if(res != VK_SUCCESS)
12924  {
12925  vma_delete(this, *pPool);
12926  *pPool = VMA_NULL;
12927  return res;
12928  }
12929 
12930  // Add to m_Pools.
12931  {
12932  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12933  (*pPool)->SetId(m_NextPoolId++);
12934  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12935  }
12936 
12937  return VK_SUCCESS;
12938 }
12939 
12940 void VmaAllocator_T::DestroyPool(VmaPool pool)
12941 {
12942  // Remove from m_Pools.
12943  {
12944  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12945  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12946  VMA_ASSERT(success && "Pool not found in Allocator.");
12947  }
12948 
12949  vma_delete(this, pool);
12950 }
12951 
12952 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12953 {
12954  pool->m_BlockVector.GetPoolStats(pPoolStats);
12955 }
12956 
12957 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12958 {
12959  m_CurrentFrameIndex.store(frameIndex);
12960 }
12961 
12962 void VmaAllocator_T::MakePoolAllocationsLost(
12963  VmaPool hPool,
12964  size_t* pLostAllocationCount)
12965 {
12966  hPool->m_BlockVector.MakePoolAllocationsLost(
12967  m_CurrentFrameIndex.load(),
12968  pLostAllocationCount);
12969 }
12970 
12971 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12972 {
12973  return hPool->m_BlockVector.CheckCorruption();
12974 }
12975 
12976 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12977 {
12978  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12979 
12980  // Process default pools.
12981  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12982  {
12983  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12984  {
12985  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12986  VMA_ASSERT(pBlockVector);
12987  VkResult localRes = pBlockVector->CheckCorruption();
12988  switch(localRes)
12989  {
12990  case VK_ERROR_FEATURE_NOT_PRESENT:
12991  break;
12992  case VK_SUCCESS:
12993  finalRes = VK_SUCCESS;
12994  break;
12995  default:
12996  return localRes;
12997  }
12998  }
12999  }
13000 
13001  // Process custom pools.
13002  {
13003  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13004  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13005  {
13006  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13007  {
13008  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13009  switch(localRes)
13010  {
13011  case VK_ERROR_FEATURE_NOT_PRESENT:
13012  break;
13013  case VK_SUCCESS:
13014  finalRes = VK_SUCCESS;
13015  break;
13016  default:
13017  return localRes;
13018  }
13019  }
13020  }
13021  }
13022 
13023  return finalRes;
13024 }
13025 
13026 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13027 {
13028  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13029  (*pAllocation)->InitLost();
13030 }
13031 
13032 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13033 {
13034  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13035 
13036  VkResult res;
13037  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13038  {
13039  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13040  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13041  {
13042  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13043  if(res == VK_SUCCESS)
13044  {
13045  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13046  }
13047  }
13048  else
13049  {
13050  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13051  }
13052  }
13053  else
13054  {
13055  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13056  }
13057 
13058  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13059  {
13060  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13061  }
13062 
13063  return res;
13064 }
13065 
13066 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13067 {
13068  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13069  {
13070  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13071  }
13072 
13073  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13074 
13075  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13076  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13077  {
13078  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13079  m_HeapSizeLimit[heapIndex] += size;
13080  }
13081 }
13082 
13083 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13084 {
13085  if(hAllocation->CanBecomeLost())
13086  {
13087  return VK_ERROR_MEMORY_MAP_FAILED;
13088  }
13089 
13090  switch(hAllocation->GetType())
13091  {
13092  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13093  {
13094  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13095  char *pBytes = VMA_NULL;
13096  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13097  if(res == VK_SUCCESS)
13098  {
13099  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13100  hAllocation->BlockAllocMap();
13101  }
13102  return res;
13103  }
13104  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13105  return hAllocation->DedicatedAllocMap(this, ppData);
13106  default:
13107  VMA_ASSERT(0);
13108  return VK_ERROR_MEMORY_MAP_FAILED;
13109  }
13110 }
13111 
13112 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13113 {
13114  switch(hAllocation->GetType())
13115  {
13116  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13117  {
13118  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13119  hAllocation->BlockAllocUnmap();
13120  pBlock->Unmap(this, 1);
13121  }
13122  break;
13123  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13124  hAllocation->DedicatedAllocUnmap(this);
13125  break;
13126  default:
13127  VMA_ASSERT(0);
13128  }
13129 }
13130 
13131 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13132 {
13133  VkResult res = VK_SUCCESS;
13134  switch(hAllocation->GetType())
13135  {
13136  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13137  res = GetVulkanFunctions().vkBindBufferMemory(
13138  m_hDevice,
13139  hBuffer,
13140  hAllocation->GetMemory(),
13141  0); //memoryOffset
13142  break;
13143  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13144  {
13145  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13146  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13147  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13148  break;
13149  }
13150  default:
13151  VMA_ASSERT(0);
13152  }
13153  return res;
13154 }
13155 
13156 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13157 {
13158  VkResult res = VK_SUCCESS;
13159  switch(hAllocation->GetType())
13160  {
13161  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13162  res = GetVulkanFunctions().vkBindImageMemory(
13163  m_hDevice,
13164  hImage,
13165  hAllocation->GetMemory(),
13166  0); //memoryOffset
13167  break;
13168  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13169  {
13170  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13171  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13172  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13173  break;
13174  }
13175  default:
13176  VMA_ASSERT(0);
13177  }
13178  return res;
13179 }
13180 
13181 void VmaAllocator_T::FlushOrInvalidateAllocation(
13182  VmaAllocation hAllocation,
13183  VkDeviceSize offset, VkDeviceSize size,
13184  VMA_CACHE_OPERATION op)
13185 {
13186  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13187  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13188  {
13189  const VkDeviceSize allocationSize = hAllocation->GetSize();
13190  VMA_ASSERT(offset <= allocationSize);
13191 
13192  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13193 
13194  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13195  memRange.memory = hAllocation->GetMemory();
13196 
13197  switch(hAllocation->GetType())
13198  {
13199  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13200  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13201  if(size == VK_WHOLE_SIZE)
13202  {
13203  memRange.size = allocationSize - memRange.offset;
13204  }
13205  else
13206  {
13207  VMA_ASSERT(offset + size <= allocationSize);
13208  memRange.size = VMA_MIN(
13209  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13210  allocationSize - memRange.offset);
13211  }
13212  break;
13213 
13214  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13215  {
13216  // 1. Still within this allocation.
13217  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13218  if(size == VK_WHOLE_SIZE)
13219  {
13220  size = allocationSize - offset;
13221  }
13222  else
13223  {
13224  VMA_ASSERT(offset + size <= allocationSize);
13225  }
13226  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13227 
13228  // 2. Adjust to whole block.
13229  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13230  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13231  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13232  memRange.offset += allocationOffset;
13233  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13234 
13235  break;
13236  }
13237 
13238  default:
13239  VMA_ASSERT(0);
13240  }
13241 
13242  switch(op)
13243  {
13244  case VMA_CACHE_FLUSH:
13245  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13246  break;
13247  case VMA_CACHE_INVALIDATE:
13248  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13249  break;
13250  default:
13251  VMA_ASSERT(0);
13252  }
13253  }
13254  // else: Just ignore this call.
13255 }
13256 
13257 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13258 {
13259  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13260 
13261  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13262  {
13263  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13264  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13265  VMA_ASSERT(pDedicatedAllocations);
13266  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13267  VMA_ASSERT(success);
13268  }
13269 
13270  VkDeviceMemory hMemory = allocation->GetMemory();
13271 
13272  /*
13273  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13274  before vkFreeMemory.
13275 
13276  if(allocation->GetMappedData() != VMA_NULL)
13277  {
13278  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13279  }
13280  */
13281 
13282  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13283 
13284  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13285 }
13286 
13287 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13288 {
13289  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13290  !hAllocation->CanBecomeLost() &&
13291  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13292  {
13293  void* pData = VMA_NULL;
13294  VkResult res = Map(hAllocation, &pData);
13295  if(res == VK_SUCCESS)
13296  {
13297  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13298  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13299  Unmap(hAllocation);
13300  }
13301  else
13302  {
13303  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13304  }
13305  }
13306 }
13307 
13308 #if VMA_STATS_STRING_ENABLED
13309 
13310 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13311 {
13312  bool dedicatedAllocationsStarted = false;
13313  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13314  {
13315  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13316  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13317  VMA_ASSERT(pDedicatedAllocVector);
13318  if(pDedicatedAllocVector->empty() == false)
13319  {
13320  if(dedicatedAllocationsStarted == false)
13321  {
13322  dedicatedAllocationsStarted = true;
13323  json.WriteString("DedicatedAllocations");
13324  json.BeginObject();
13325  }
13326 
13327  json.BeginString("Type ");
13328  json.ContinueString(memTypeIndex);
13329  json.EndString();
13330 
13331  json.BeginArray();
13332 
13333  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13334  {
13335  json.BeginObject(true);
13336  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13337  hAlloc->PrintParameters(json);
13338  json.EndObject();
13339  }
13340 
13341  json.EndArray();
13342  }
13343  }
13344  if(dedicatedAllocationsStarted)
13345  {
13346  json.EndObject();
13347  }
13348 
13349  {
13350  bool allocationsStarted = false;
13351  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13352  {
13353  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13354  {
13355  if(allocationsStarted == false)
13356  {
13357  allocationsStarted = true;
13358  json.WriteString("DefaultPools");
13359  json.BeginObject();
13360  }
13361 
13362  json.BeginString("Type ");
13363  json.ContinueString(memTypeIndex);
13364  json.EndString();
13365 
13366  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13367  }
13368  }
13369  if(allocationsStarted)
13370  {
13371  json.EndObject();
13372  }
13373  }
13374 
13375  // Custom pools
13376  {
13377  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13378  const size_t poolCount = m_Pools.size();
13379  if(poolCount > 0)
13380  {
13381  json.WriteString("Pools");
13382  json.BeginObject();
13383  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13384  {
13385  json.BeginString();
13386  json.ContinueString(m_Pools[poolIndex]->GetId());
13387  json.EndString();
13388 
13389  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13390  }
13391  json.EndObject();
13392  }
13393  }
13394 }
13395 
13396 #endif // #if VMA_STATS_STRING_ENABLED
13397 
13399 // Public interface
13400 
13401 VkResult vmaCreateAllocator(
13402  const VmaAllocatorCreateInfo* pCreateInfo,
13403  VmaAllocator* pAllocator)
13404 {
13405  VMA_ASSERT(pCreateInfo && pAllocator);
13406  VMA_DEBUG_LOG("vmaCreateAllocator");
13407  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13408  return (*pAllocator)->Init(pCreateInfo);
13409 }
13410 
13411 void vmaDestroyAllocator(
13412  VmaAllocator allocator)
13413 {
13414  if(allocator != VK_NULL_HANDLE)
13415  {
13416  VMA_DEBUG_LOG("vmaDestroyAllocator");
13417  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13418  vma_delete(&allocationCallbacks, allocator);
13419  }
13420 }
13421 
13423  VmaAllocator allocator,
13424  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13425 {
13426  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13427  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13428 }
13429 
13431  VmaAllocator allocator,
13432  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13433 {
13434  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13435  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13436 }
13437 
13439  VmaAllocator allocator,
13440  uint32_t memoryTypeIndex,
13441  VkMemoryPropertyFlags* pFlags)
13442 {
13443  VMA_ASSERT(allocator && pFlags);
13444  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13445  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13446 }
13447 
13449  VmaAllocator allocator,
13450  uint32_t frameIndex)
13451 {
13452  VMA_ASSERT(allocator);
13453  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13454 
13455  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13456 
13457  allocator->SetCurrentFrameIndex(frameIndex);
13458 }
13459 
13460 void vmaCalculateStats(
13461  VmaAllocator allocator,
13462  VmaStats* pStats)
13463 {
13464  VMA_ASSERT(allocator && pStats);
13465  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13466  allocator->CalculateStats(pStats);
13467 }
13468 
13469 #if VMA_STATS_STRING_ENABLED
13470 
13471 void vmaBuildStatsString(
13472  VmaAllocator allocator,
13473  char** ppStatsString,
13474  VkBool32 detailedMap)
13475 {
13476  VMA_ASSERT(allocator && ppStatsString);
13477  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13478 
13479  VmaStringBuilder sb(allocator);
13480  {
13481  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13482  json.BeginObject();
13483 
13484  VmaStats stats;
13485  allocator->CalculateStats(&stats);
13486 
13487  json.WriteString("Total");
13488  VmaPrintStatInfo(json, stats.total);
13489 
13490  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13491  {
13492  json.BeginString("Heap ");
13493  json.ContinueString(heapIndex);
13494  json.EndString();
13495  json.BeginObject();
13496 
13497  json.WriteString("Size");
13498  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13499 
13500  json.WriteString("Flags");
13501  json.BeginArray(true);
13502  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13503  {
13504  json.WriteString("DEVICE_LOCAL");
13505  }
13506  json.EndArray();
13507 
13508  if(stats.memoryHeap[heapIndex].blockCount > 0)
13509  {
13510  json.WriteString("Stats");
13511  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13512  }
13513 
13514  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13515  {
13516  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13517  {
13518  json.BeginString("Type ");
13519  json.ContinueString(typeIndex);
13520  json.EndString();
13521 
13522  json.BeginObject();
13523 
13524  json.WriteString("Flags");
13525  json.BeginArray(true);
13526  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13527  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13528  {
13529  json.WriteString("DEVICE_LOCAL");
13530  }
13531  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13532  {
13533  json.WriteString("HOST_VISIBLE");
13534  }
13535  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13536  {
13537  json.WriteString("HOST_COHERENT");
13538  }
13539  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13540  {
13541  json.WriteString("HOST_CACHED");
13542  }
13543  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13544  {
13545  json.WriteString("LAZILY_ALLOCATED");
13546  }
13547  json.EndArray();
13548 
13549  if(stats.memoryType[typeIndex].blockCount > 0)
13550  {
13551  json.WriteString("Stats");
13552  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13553  }
13554 
13555  json.EndObject();
13556  }
13557  }
13558 
13559  json.EndObject();
13560  }
13561  if(detailedMap == VK_TRUE)
13562  {
13563  allocator->PrintDetailedMap(json);
13564  }
13565 
13566  json.EndObject();
13567  }
13568 
13569  const size_t len = sb.GetLength();
13570  char* const pChars = vma_new_array(allocator, char, len + 1);
13571  if(len > 0)
13572  {
13573  memcpy(pChars, sb.GetData(), len);
13574  }
13575  pChars[len] = '\0';
13576  *ppStatsString = pChars;
13577 }
13578 
13579 void vmaFreeStatsString(
13580  VmaAllocator allocator,
13581  char* pStatsString)
13582 {
13583  if(pStatsString != VMA_NULL)
13584  {
13585  VMA_ASSERT(allocator);
13586  size_t len = strlen(pStatsString);
13587  vma_delete_array(allocator, pStatsString, len + 1);
13588  }
13589 }
13590 
13591 #endif // #if VMA_STATS_STRING_ENABLED
13592 
13593 /*
13594 This function is not protected by any mutex because it just reads immutable data.
13595 */
13596 VkResult vmaFindMemoryTypeIndex(
13597  VmaAllocator allocator,
13598  uint32_t memoryTypeBits,
13599  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13600  uint32_t* pMemoryTypeIndex)
13601 {
13602  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13603  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13604  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13605 
13606  if(pAllocationCreateInfo->memoryTypeBits != 0)
13607  {
13608  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13609  }
13610 
13611  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13612  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13613 
13614  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13615  if(mapped)
13616  {
13617  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13618  }
13619 
13620  // Convert usage to requiredFlags and preferredFlags.
13621  switch(pAllocationCreateInfo->usage)
13622  {
13624  break;
13626  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13627  {
13628  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13629  }
13630  break;
13632  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13633  break;
13635  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13636  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13637  {
13638  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13639  }
13640  break;
13642  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13643  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13644  break;
13645  default:
13646  break;
13647  }
13648 
13649  *pMemoryTypeIndex = UINT32_MAX;
13650  uint32_t minCost = UINT32_MAX;
13651  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13652  memTypeIndex < allocator->GetMemoryTypeCount();
13653  ++memTypeIndex, memTypeBit <<= 1)
13654  {
13655  // This memory type is acceptable according to memoryTypeBits bitmask.
13656  if((memTypeBit & memoryTypeBits) != 0)
13657  {
13658  const VkMemoryPropertyFlags currFlags =
13659  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13660  // This memory type contains requiredFlags.
13661  if((requiredFlags & ~currFlags) == 0)
13662  {
13663  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13664  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13665  // Remember memory type with lowest cost.
13666  if(currCost < minCost)
13667  {
13668  *pMemoryTypeIndex = memTypeIndex;
13669  if(currCost == 0)
13670  {
13671  return VK_SUCCESS;
13672  }
13673  minCost = currCost;
13674  }
13675  }
13676  }
13677  }
13678  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13679 }
13680 
13682  VmaAllocator allocator,
13683  const VkBufferCreateInfo* pBufferCreateInfo,
13684  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13685  uint32_t* pMemoryTypeIndex)
13686 {
13687  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13688  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13689  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13690  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13691 
13692  const VkDevice hDev = allocator->m_hDevice;
13693  VkBuffer hBuffer = VK_NULL_HANDLE;
13694  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13695  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13696  if(res == VK_SUCCESS)
13697  {
13698  VkMemoryRequirements memReq = {};
13699  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13700  hDev, hBuffer, &memReq);
13701 
13702  res = vmaFindMemoryTypeIndex(
13703  allocator,
13704  memReq.memoryTypeBits,
13705  pAllocationCreateInfo,
13706  pMemoryTypeIndex);
13707 
13708  allocator->GetVulkanFunctions().vkDestroyBuffer(
13709  hDev, hBuffer, allocator->GetAllocationCallbacks());
13710  }
13711  return res;
13712 }
13713 
13715  VmaAllocator allocator,
13716  const VkImageCreateInfo* pImageCreateInfo,
13717  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13718  uint32_t* pMemoryTypeIndex)
13719 {
13720  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13721  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13722  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13723  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13724 
13725  const VkDevice hDev = allocator->m_hDevice;
13726  VkImage hImage = VK_NULL_HANDLE;
13727  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13728  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13729  if(res == VK_SUCCESS)
13730  {
13731  VkMemoryRequirements memReq = {};
13732  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13733  hDev, hImage, &memReq);
13734 
13735  res = vmaFindMemoryTypeIndex(
13736  allocator,
13737  memReq.memoryTypeBits,
13738  pAllocationCreateInfo,
13739  pMemoryTypeIndex);
13740 
13741  allocator->GetVulkanFunctions().vkDestroyImage(
13742  hDev, hImage, allocator->GetAllocationCallbacks());
13743  }
13744  return res;
13745 }
13746 
13747 VkResult vmaCreatePool(
13748  VmaAllocator allocator,
13749  const VmaPoolCreateInfo* pCreateInfo,
13750  VmaPool* pPool)
13751 {
13752  VMA_ASSERT(allocator && pCreateInfo && pPool);
13753 
13754  VMA_DEBUG_LOG("vmaCreatePool");
13755 
13756  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13757 
13758  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13759 
13760 #if VMA_RECORDING_ENABLED
13761  if(allocator->GetRecorder() != VMA_NULL)
13762  {
13763  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13764  }
13765 #endif
13766 
13767  return res;
13768 }
13769 
13770 void vmaDestroyPool(
13771  VmaAllocator allocator,
13772  VmaPool pool)
13773 {
13774  VMA_ASSERT(allocator);
13775 
13776  if(pool == VK_NULL_HANDLE)
13777  {
13778  return;
13779  }
13780 
13781  VMA_DEBUG_LOG("vmaDestroyPool");
13782 
13783  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13784 
13785 #if VMA_RECORDING_ENABLED
13786  if(allocator->GetRecorder() != VMA_NULL)
13787  {
13788  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13789  }
13790 #endif
13791 
13792  allocator->DestroyPool(pool);
13793 }
13794 
13795 void vmaGetPoolStats(
13796  VmaAllocator allocator,
13797  VmaPool pool,
13798  VmaPoolStats* pPoolStats)
13799 {
13800  VMA_ASSERT(allocator && pool && pPoolStats);
13801 
13802  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13803 
13804  allocator->GetPoolStats(pool, pPoolStats);
13805 }
13806 
13808  VmaAllocator allocator,
13809  VmaPool pool,
13810  size_t* pLostAllocationCount)
13811 {
13812  VMA_ASSERT(allocator && pool);
13813 
13814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13815 
13816 #if VMA_RECORDING_ENABLED
13817  if(allocator->GetRecorder() != VMA_NULL)
13818  {
13819  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13820  }
13821 #endif
13822 
13823  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13824 }
13825 
13826 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13827 {
13828  VMA_ASSERT(allocator && pool);
13829 
13830  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13831 
13832  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13833 
13834  return allocator->CheckPoolCorruption(pool);
13835 }
13836 
13837 VkResult vmaAllocateMemory(
13838  VmaAllocator allocator,
13839  const VkMemoryRequirements* pVkMemoryRequirements,
13840  const VmaAllocationCreateInfo* pCreateInfo,
13841  VmaAllocation* pAllocation,
13842  VmaAllocationInfo* pAllocationInfo)
13843 {
13844  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13845 
13846  VMA_DEBUG_LOG("vmaAllocateMemory");
13847 
13848  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13849 
13850  VkResult result = allocator->AllocateMemory(
13851  *pVkMemoryRequirements,
13852  false, // requiresDedicatedAllocation
13853  false, // prefersDedicatedAllocation
13854  VK_NULL_HANDLE, // dedicatedBuffer
13855  VK_NULL_HANDLE, // dedicatedImage
13856  *pCreateInfo,
13857  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13858  pAllocation);
13859 
13860 #if VMA_RECORDING_ENABLED
13861  if(allocator->GetRecorder() != VMA_NULL)
13862  {
13863  allocator->GetRecorder()->RecordAllocateMemory(
13864  allocator->GetCurrentFrameIndex(),
13865  *pVkMemoryRequirements,
13866  *pCreateInfo,
13867  *pAllocation);
13868  }
13869 #endif
13870 
13871  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13872  {
13873  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13874  }
13875 
13876  return result;
13877 }
13878 
13880  VmaAllocator allocator,
13881  VkBuffer buffer,
13882  const VmaAllocationCreateInfo* pCreateInfo,
13883  VmaAllocation* pAllocation,
13884  VmaAllocationInfo* pAllocationInfo)
13885 {
13886  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13887 
13888  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13889 
13890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13891 
13892  VkMemoryRequirements vkMemReq = {};
13893  bool requiresDedicatedAllocation = false;
13894  bool prefersDedicatedAllocation = false;
13895  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13896  requiresDedicatedAllocation,
13897  prefersDedicatedAllocation);
13898 
13899  VkResult result = allocator->AllocateMemory(
13900  vkMemReq,
13901  requiresDedicatedAllocation,
13902  prefersDedicatedAllocation,
13903  buffer, // dedicatedBuffer
13904  VK_NULL_HANDLE, // dedicatedImage
13905  *pCreateInfo,
13906  VMA_SUBALLOCATION_TYPE_BUFFER,
13907  pAllocation);
13908 
13909 #if VMA_RECORDING_ENABLED
13910  if(allocator->GetRecorder() != VMA_NULL)
13911  {
13912  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13913  allocator->GetCurrentFrameIndex(),
13914  vkMemReq,
13915  requiresDedicatedAllocation,
13916  prefersDedicatedAllocation,
13917  *pCreateInfo,
13918  *pAllocation);
13919  }
13920 #endif
13921 
13922  if(pAllocationInfo && result == VK_SUCCESS)
13923  {
13924  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13925  }
13926 
13927  return result;
13928 }
13929 
13930 VkResult vmaAllocateMemoryForImage(
13931  VmaAllocator allocator,
13932  VkImage image,
13933  const VmaAllocationCreateInfo* pCreateInfo,
13934  VmaAllocation* pAllocation,
13935  VmaAllocationInfo* pAllocationInfo)
13936 {
13937  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13938 
13939  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13940 
13941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13942 
13943  VkMemoryRequirements vkMemReq = {};
13944  bool requiresDedicatedAllocation = false;
13945  bool prefersDedicatedAllocation = false;
13946  allocator->GetImageMemoryRequirements(image, vkMemReq,
13947  requiresDedicatedAllocation, prefersDedicatedAllocation);
13948 
13949  VkResult result = allocator->AllocateMemory(
13950  vkMemReq,
13951  requiresDedicatedAllocation,
13952  prefersDedicatedAllocation,
13953  VK_NULL_HANDLE, // dedicatedBuffer
13954  image, // dedicatedImage
13955  *pCreateInfo,
13956  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13957  pAllocation);
13958 
13959 #if VMA_RECORDING_ENABLED
13960  if(allocator->GetRecorder() != VMA_NULL)
13961  {
13962  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13963  allocator->GetCurrentFrameIndex(),
13964  vkMemReq,
13965  requiresDedicatedAllocation,
13966  prefersDedicatedAllocation,
13967  *pCreateInfo,
13968  *pAllocation);
13969  }
13970 #endif
13971 
13972  if(pAllocationInfo && result == VK_SUCCESS)
13973  {
13974  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13975  }
13976 
13977  return result;
13978 }
13979 
13980 void vmaFreeMemory(
13981  VmaAllocator allocator,
13982  VmaAllocation allocation)
13983 {
13984  VMA_ASSERT(allocator);
13985 
13986  if(allocation == VK_NULL_HANDLE)
13987  {
13988  return;
13989  }
13990 
13991  VMA_DEBUG_LOG("vmaFreeMemory");
13992 
13993  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13994 
13995 #if VMA_RECORDING_ENABLED
13996  if(allocator->GetRecorder() != VMA_NULL)
13997  {
13998  allocator->GetRecorder()->RecordFreeMemory(
13999  allocator->GetCurrentFrameIndex(),
14000  allocation);
14001  }
14002 #endif
14003 
14004  allocator->FreeMemory(allocation);
14005 }
14006 
14008  VmaAllocator allocator,
14009  VmaAllocation allocation,
14010  VmaAllocationInfo* pAllocationInfo)
14011 {
14012  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14013 
14014  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14015 
14016 #if VMA_RECORDING_ENABLED
14017  if(allocator->GetRecorder() != VMA_NULL)
14018  {
14019  allocator->GetRecorder()->RecordGetAllocationInfo(
14020  allocator->GetCurrentFrameIndex(),
14021  allocation);
14022  }
14023 #endif
14024 
14025  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14026 }
14027 
14028 VkBool32 vmaTouchAllocation(
14029  VmaAllocator allocator,
14030  VmaAllocation allocation)
14031 {
14032  VMA_ASSERT(allocator && allocation);
14033 
14034  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14035 
14036 #if VMA_RECORDING_ENABLED
14037  if(allocator->GetRecorder() != VMA_NULL)
14038  {
14039  allocator->GetRecorder()->RecordTouchAllocation(
14040  allocator->GetCurrentFrameIndex(),
14041  allocation);
14042  }
14043 #endif
14044 
14045  return allocator->TouchAllocation(allocation);
14046 }
14047 
14049  VmaAllocator allocator,
14050  VmaAllocation allocation,
14051  void* pUserData)
14052 {
14053  VMA_ASSERT(allocator && allocation);
14054 
14055  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14056 
14057  allocation->SetUserData(allocator, pUserData);
14058 
14059 #if VMA_RECORDING_ENABLED
14060  if(allocator->GetRecorder() != VMA_NULL)
14061  {
14062  allocator->GetRecorder()->RecordSetAllocationUserData(
14063  allocator->GetCurrentFrameIndex(),
14064  allocation,
14065  pUserData);
14066  }
14067 #endif
14068 }
14069 
14071  VmaAllocator allocator,
14072  VmaAllocation* pAllocation)
14073 {
14074  VMA_ASSERT(allocator && pAllocation);
14075 
14076  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14077 
14078  allocator->CreateLostAllocation(pAllocation);
14079 
14080 #if VMA_RECORDING_ENABLED
14081  if(allocator->GetRecorder() != VMA_NULL)
14082  {
14083  allocator->GetRecorder()->RecordCreateLostAllocation(
14084  allocator->GetCurrentFrameIndex(),
14085  *pAllocation);
14086  }
14087 #endif
14088 }
14089 
14090 VkResult vmaMapMemory(
14091  VmaAllocator allocator,
14092  VmaAllocation allocation,
14093  void** ppData)
14094 {
14095  VMA_ASSERT(allocator && allocation && ppData);
14096 
14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14098 
14099  VkResult res = allocator->Map(allocation, ppData);
14100 
14101 #if VMA_RECORDING_ENABLED
14102  if(allocator->GetRecorder() != VMA_NULL)
14103  {
14104  allocator->GetRecorder()->RecordMapMemory(
14105  allocator->GetCurrentFrameIndex(),
14106  allocation);
14107  }
14108 #endif
14109 
14110  return res;
14111 }
14112 
14113 void vmaUnmapMemory(
14114  VmaAllocator allocator,
14115  VmaAllocation allocation)
14116 {
14117  VMA_ASSERT(allocator && allocation);
14118 
14119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14120 
14121 #if VMA_RECORDING_ENABLED
14122  if(allocator->GetRecorder() != VMA_NULL)
14123  {
14124  allocator->GetRecorder()->RecordUnmapMemory(
14125  allocator->GetCurrentFrameIndex(),
14126  allocation);
14127  }
14128 #endif
14129 
14130  allocator->Unmap(allocation);
14131 }
14132 
14133 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14134 {
14135  VMA_ASSERT(allocator && allocation);
14136 
14137  VMA_DEBUG_LOG("vmaFlushAllocation");
14138 
14139  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14140 
14141  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14142 
14143 #if VMA_RECORDING_ENABLED
14144  if(allocator->GetRecorder() != VMA_NULL)
14145  {
14146  allocator->GetRecorder()->RecordFlushAllocation(
14147  allocator->GetCurrentFrameIndex(),
14148  allocation, offset, size);
14149  }
14150 #endif
14151 }
14152 
14153 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14154 {
14155  VMA_ASSERT(allocator && allocation);
14156 
14157  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14158 
14159  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14160 
14161  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14162 
14163 #if VMA_RECORDING_ENABLED
14164  if(allocator->GetRecorder() != VMA_NULL)
14165  {
14166  allocator->GetRecorder()->RecordInvalidateAllocation(
14167  allocator->GetCurrentFrameIndex(),
14168  allocation, offset, size);
14169  }
14170 #endif
14171 }
14172 
14173 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14174 {
14175  VMA_ASSERT(allocator);
14176 
14177  VMA_DEBUG_LOG("vmaCheckCorruption");
14178 
14179  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14180 
14181  return allocator->CheckCorruption(memoryTypeBits);
14182 }
14183 
14184 VkResult vmaDefragment(
14185  VmaAllocator allocator,
14186  VmaAllocation* pAllocations,
14187  size_t allocationCount,
14188  VkBool32* pAllocationsChanged,
14189  const VmaDefragmentationInfo *pDefragmentationInfo,
14190  VmaDefragmentationStats* pDefragmentationStats)
14191 {
14192  // Deprecated interface, reimplemented using new one.
14193 
14194  VmaDefragmentationInfo2 info2 = {};
14195  info2.allocationCount = allocationCount;
14196  info2.pAllocations = pAllocations;
14197  info2.pAllocationsChanged = pAllocationsChanged;
14198  if(pDefragmentationInfo != VMA_NULL)
14199  {
14200  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
14201  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
14202  }
14203  else
14204  {
14205  info2.maxCpuAllocationsToMove = UINT32_MAX;
14206  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
14207  }
14208  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
14209 
14211  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
14212  if(res == VK_NOT_READY)
14213  {
14214  res = vmaDefragmentationEnd( allocator, ctx);
14215  }
14216  return res;
14217 }
14218 
14219 VkResult vmaDefragmentationBegin(
14220  VmaAllocator allocator,
14221  const VmaDefragmentationInfo2* pInfo,
14222  VmaDefragmentationStats* pStats,
14223  VmaDefragmentationContext *pContext)
14224 {
14225  VMA_ASSERT(allocator && pInfo && pContext);
14226 
14227  VMA_DEBUG_LOG("vmaDefragmentationBegin");
14228 
14229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14230 
14231  return allocator->DefragmentationBegin(*pInfo, pStats, pContext);
14232 }
14233 
14234 VkResult vmaDefragmentationEnd(
14235  VmaAllocator allocator,
14236  VmaDefragmentationContext context)
14237 {
14238  VMA_ASSERT(allocator);
14239 
14240  VMA_DEBUG_LOG("vmaDefragmentationEnd");
14241 
14242  if(context != VK_NULL_HANDLE)
14243  {
14244  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14245 
14246  return allocator->DefragmentationEnd(context);
14247  }
14248  else
14249  {
14250  return VK_SUCCESS;
14251  }
14252 }
14253 
14254 VkResult vmaBindBufferMemory(
14255  VmaAllocator allocator,
14256  VmaAllocation allocation,
14257  VkBuffer buffer)
14258 {
14259  VMA_ASSERT(allocator && allocation && buffer);
14260 
14261  VMA_DEBUG_LOG("vmaBindBufferMemory");
14262 
14263  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14264 
14265  return allocator->BindBufferMemory(allocation, buffer);
14266 }
14267 
14268 VkResult vmaBindImageMemory(
14269  VmaAllocator allocator,
14270  VmaAllocation allocation,
14271  VkImage image)
14272 {
14273  VMA_ASSERT(allocator && allocation && image);
14274 
14275  VMA_DEBUG_LOG("vmaBindImageMemory");
14276 
14277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14278 
14279  return allocator->BindImageMemory(allocation, image);
14280 }
14281 
14282 VkResult vmaCreateBuffer(
14283  VmaAllocator allocator,
14284  const VkBufferCreateInfo* pBufferCreateInfo,
14285  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14286  VkBuffer* pBuffer,
14287  VmaAllocation* pAllocation,
14288  VmaAllocationInfo* pAllocationInfo)
14289 {
14290  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14291 
14292  VMA_DEBUG_LOG("vmaCreateBuffer");
14293 
14294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14295 
14296  *pBuffer = VK_NULL_HANDLE;
14297  *pAllocation = VK_NULL_HANDLE;
14298 
14299  // 1. Create VkBuffer.
14300  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14301  allocator->m_hDevice,
14302  pBufferCreateInfo,
14303  allocator->GetAllocationCallbacks(),
14304  pBuffer);
14305  if(res >= 0)
14306  {
14307  // 2. vkGetBufferMemoryRequirements.
14308  VkMemoryRequirements vkMemReq = {};
14309  bool requiresDedicatedAllocation = false;
14310  bool prefersDedicatedAllocation = false;
14311  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14312  requiresDedicatedAllocation, prefersDedicatedAllocation);
14313 
14314  // Make sure alignment requirements for specific buffer usages reported
14315  // in Physical Device Properties are included in alignment reported by memory requirements.
14316  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14317  {
14318  VMA_ASSERT(vkMemReq.alignment %
14319  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14320  }
14321  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14322  {
14323  VMA_ASSERT(vkMemReq.alignment %
14324  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14325  }
14326  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14327  {
14328  VMA_ASSERT(vkMemReq.alignment %
14329  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14330  }
14331 
14332  // 3. Allocate memory using allocator.
14333  res = allocator->AllocateMemory(
14334  vkMemReq,
14335  requiresDedicatedAllocation,
14336  prefersDedicatedAllocation,
14337  *pBuffer, // dedicatedBuffer
14338  VK_NULL_HANDLE, // dedicatedImage
14339  *pAllocationCreateInfo,
14340  VMA_SUBALLOCATION_TYPE_BUFFER,
14341  pAllocation);
14342 
14343 #if VMA_RECORDING_ENABLED
14344  if(allocator->GetRecorder() != VMA_NULL)
14345  {
14346  allocator->GetRecorder()->RecordCreateBuffer(
14347  allocator->GetCurrentFrameIndex(),
14348  *pBufferCreateInfo,
14349  *pAllocationCreateInfo,
14350  *pAllocation);
14351  }
14352 #endif
14353 
14354  if(res >= 0)
14355  {
14356  // 3. Bind buffer with memory.
14357  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14358  if(res >= 0)
14359  {
14360  // All steps succeeded.
14361  #if VMA_STATS_STRING_ENABLED
14362  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14363  #endif
14364  if(pAllocationInfo != VMA_NULL)
14365  {
14366  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14367  }
14368 
14369  return VK_SUCCESS;
14370  }
14371  allocator->FreeMemory(*pAllocation);
14372  *pAllocation = VK_NULL_HANDLE;
14373  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14374  *pBuffer = VK_NULL_HANDLE;
14375  return res;
14376  }
14377  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14378  *pBuffer = VK_NULL_HANDLE;
14379  return res;
14380  }
14381  return res;
14382 }
14383 
14384 void vmaDestroyBuffer(
14385  VmaAllocator allocator,
14386  VkBuffer buffer,
14387  VmaAllocation allocation)
14388 {
14389  VMA_ASSERT(allocator);
14390 
14391  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14392  {
14393  return;
14394  }
14395 
14396  VMA_DEBUG_LOG("vmaDestroyBuffer");
14397 
14398  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14399 
14400 #if VMA_RECORDING_ENABLED
14401  if(allocator->GetRecorder() != VMA_NULL)
14402  {
14403  allocator->GetRecorder()->RecordDestroyBuffer(
14404  allocator->GetCurrentFrameIndex(),
14405  allocation);
14406  }
14407 #endif
14408 
14409  if(buffer != VK_NULL_HANDLE)
14410  {
14411  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14412  }
14413 
14414  if(allocation != VK_NULL_HANDLE)
14415  {
14416  allocator->FreeMemory(allocation);
14417  }
14418 }
14419 
14420 VkResult vmaCreateImage(
14421  VmaAllocator allocator,
14422  const VkImageCreateInfo* pImageCreateInfo,
14423  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14424  VkImage* pImage,
14425  VmaAllocation* pAllocation,
14426  VmaAllocationInfo* pAllocationInfo)
14427 {
14428  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14429 
14430  VMA_DEBUG_LOG("vmaCreateImage");
14431 
14432  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14433 
14434  *pImage = VK_NULL_HANDLE;
14435  *pAllocation = VK_NULL_HANDLE;
14436 
14437  // 1. Create VkImage.
14438  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14439  allocator->m_hDevice,
14440  pImageCreateInfo,
14441  allocator->GetAllocationCallbacks(),
14442  pImage);
14443  if(res >= 0)
14444  {
14445  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14446  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14447  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14448 
14449  // 2. Allocate memory using allocator.
14450  VkMemoryRequirements vkMemReq = {};
14451  bool requiresDedicatedAllocation = false;
14452  bool prefersDedicatedAllocation = false;
14453  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14454  requiresDedicatedAllocation, prefersDedicatedAllocation);
14455 
14456  res = allocator->AllocateMemory(
14457  vkMemReq,
14458  requiresDedicatedAllocation,
14459  prefersDedicatedAllocation,
14460  VK_NULL_HANDLE, // dedicatedBuffer
14461  *pImage, // dedicatedImage
14462  *pAllocationCreateInfo,
14463  suballocType,
14464  pAllocation);
14465 
14466 #if VMA_RECORDING_ENABLED
14467  if(allocator->GetRecorder() != VMA_NULL)
14468  {
14469  allocator->GetRecorder()->RecordCreateImage(
14470  allocator->GetCurrentFrameIndex(),
14471  *pImageCreateInfo,
14472  *pAllocationCreateInfo,
14473  *pAllocation);
14474  }
14475 #endif
14476 
14477  if(res >= 0)
14478  {
14479  // 3. Bind image with memory.
14480  res = allocator->BindImageMemory(*pAllocation, *pImage);
14481  if(res >= 0)
14482  {
14483  // All steps succeeded.
14484  #if VMA_STATS_STRING_ENABLED
14485  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14486  #endif
14487  if(pAllocationInfo != VMA_NULL)
14488  {
14489  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14490  }
14491 
14492  return VK_SUCCESS;
14493  }
14494  allocator->FreeMemory(*pAllocation);
14495  *pAllocation = VK_NULL_HANDLE;
14496  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14497  *pImage = VK_NULL_HANDLE;
14498  return res;
14499  }
14500  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14501  *pImage = VK_NULL_HANDLE;
14502  return res;
14503  }
14504  return res;
14505 }
14506 
14507 void vmaDestroyImage(
14508  VmaAllocator allocator,
14509  VkImage image,
14510  VmaAllocation allocation)
14511 {
14512  VMA_ASSERT(allocator);
14513 
14514  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14515  {
14516  return;
14517  }
14518 
14519  VMA_DEBUG_LOG("vmaDestroyImage");
14520 
14521  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14522 
14523 #if VMA_RECORDING_ENABLED
14524  if(allocator->GetRecorder() != VMA_NULL)
14525  {
14526  allocator->GetRecorder()->RecordDestroyImage(
14527  allocator->GetCurrentFrameIndex(),
14528  allocation);
14529  }
14530 #endif
14531 
14532  if(image != VK_NULL_HANDLE)
14533  {
14534  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14535  }
14536  if(allocation != VK_NULL_HANDLE)
14537  {
14538  allocator->FreeMemory(allocation);
14539  }
14540 }
14541 
14542 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1567
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1868
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1624
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side...
Definition: vk_mem_alloc.h:2592
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
uint32_t allocationsLost
Number of allocations that became lost in the process of defragmentation.
Definition: vk_mem_alloc.h:2633
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1598
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2190
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1579
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1825
Definition: vk_mem_alloc.h:1928
VmaDefragmentationFlags flags
Flags for defragmentation. Use VmaDefragmentationFlagBits enum.
Definition: vk_mem_alloc.h:2567
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1571
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2290
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1621
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2619
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2079
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1468
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2171
Definition: vk_mem_alloc.h:1905
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1560
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1978
Definition: vk_mem_alloc.h:1852
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1633
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2107
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1686
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1618
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1856
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1758
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1576
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1757
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2623
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1650
VmaStatInfo total
Definition: vk_mem_alloc.h:1767
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2631
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1962
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2614
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1577
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1502
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1627
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2121
Definition: vk_mem_alloc.h:2115
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1693
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2300
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1572
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1596
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1999
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2141
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2177
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1558
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2124
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2597
VmaMemoryUsage
Definition: vk_mem_alloc.h:1803
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2576
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2609
size_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2570
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2627
Definition: vk_mem_alloc.h:1842
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1986
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1575
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1508
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2558
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2556
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2582
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1529
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1600
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1534
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2629
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1973
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2187
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1568
Definition: vk_mem_alloc.h:2537
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1746
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2136
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1521
Definition: vk_mem_alloc.h:2111
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1912
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1759
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1525
Definition: vk_mem_alloc.h:1936
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2127
Definition: vk_mem_alloc.h:1851
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1574
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1968
Definition: vk_mem_alloc.h:1959
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1749
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1570
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2149
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1636
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2180
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1957
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2587
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1992
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1674
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1765
Definition: vk_mem_alloc.h:2544
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1892
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1758
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1581
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1606
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin().
Definition: vk_mem_alloc.h:2533
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1523
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1580
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2163
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1573
Definition: vk_mem_alloc.h:1923
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1614
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2314
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1630
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1758
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1755
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2168
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2564
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1932
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2295
Definition: vk_mem_alloc.h:1943
Definition: vk_mem_alloc.h:1955
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2625
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1566
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1753
Definition: vk_mem_alloc.h:1808
Definition: vk_mem_alloc.h:2549
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2117
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1603
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1751
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1578
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1582
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1879
A bit mask to extract only ALGORITHM bits from entire set of flags.
Definition: vk_mem_alloc.h:2552
Definition: vk_mem_alloc.h:1950
Definition: vk_mem_alloc.h:1835
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2309
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1556
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1569
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2096
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2276
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1940
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2061
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1759
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1590
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1766
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2174
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1759
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side...
Definition: vk_mem_alloc.h:2602
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2281