Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1480 /*
1481 Define this macro to 0/1 to disable/enable support for recording functionality,
1482 available through VmaAllocatorCreateInfo::pRecordSettings.
1483 */
1484 #ifndef VMA_RECORDING_ENABLED
1485  #ifdef _WIN32
1486  #define VMA_RECORDING_ENABLED 1
1487  #else
1488  #define VMA_RECORDING_ENABLED 0
1489  #endif
1490 #endif
1491 
1492 #ifndef NOMINMAX
1493  #define NOMINMAX // For windows.h
1494 #endif
1495 
1496 #ifndef VULKAN_H_
1497  #include <vulkan/vulkan.h>
1498 #endif
1499 
1500 #if VMA_RECORDING_ENABLED
1501  #include <windows.h>
1502 #endif
1503 
1504 #if !defined(VMA_DEDICATED_ALLOCATION)
1505  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1506  #define VMA_DEDICATED_ALLOCATION 1
1507  #else
1508  #define VMA_DEDICATED_ALLOCATION 0
1509  #endif
1510 #endif
1511 
1521 VK_DEFINE_HANDLE(VmaAllocator)
1522 
1523 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1525  VmaAllocator allocator,
1526  uint32_t memoryType,
1527  VkDeviceMemory memory,
1528  VkDeviceSize size);
1530 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1531  VmaAllocator allocator,
1532  uint32_t memoryType,
1533  VkDeviceMemory memory,
1534  VkDeviceSize size);
1535 
1549 
1579 
1582 typedef VkFlags VmaAllocatorCreateFlags;
1583 
1588 typedef struct VmaVulkanFunctions {
1589  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1590  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1591  PFN_vkAllocateMemory vkAllocateMemory;
1592  PFN_vkFreeMemory vkFreeMemory;
1593  PFN_vkMapMemory vkMapMemory;
1594  PFN_vkUnmapMemory vkUnmapMemory;
1595  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1596  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1597  PFN_vkBindBufferMemory vkBindBufferMemory;
1598  PFN_vkBindImageMemory vkBindImageMemory;
1599  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1600  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1601  PFN_vkCreateBuffer vkCreateBuffer;
1602  PFN_vkDestroyBuffer vkDestroyBuffer;
1603  PFN_vkCreateImage vkCreateImage;
1604  PFN_vkDestroyImage vkDestroyImage;
1605 #if VMA_DEDICATED_ALLOCATION
1606  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1607  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1608 #endif
1610 
1612 typedef enum VmaRecordFlagBits {
1619 
1622 typedef VkFlags VmaRecordFlags;
1623 
1625 typedef struct VmaRecordSettings
1626 {
1636  const char* pFilePath;
1638 
1641 {
1645 
1646  VkPhysicalDevice physicalDevice;
1648 
1649  VkDevice device;
1651 
1654 
1655  const VkAllocationCallbacks* pAllocationCallbacks;
1657 
1697  const VkDeviceSize* pHeapSizeLimit;
1718 
1720 VkResult vmaCreateAllocator(
1721  const VmaAllocatorCreateInfo* pCreateInfo,
1722  VmaAllocator* pAllocator);
1723 
1725 void vmaDestroyAllocator(
1726  VmaAllocator allocator);
1727 
1733  VmaAllocator allocator,
1734  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1735 
1741  VmaAllocator allocator,
1742  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1743 
1751  VmaAllocator allocator,
1752  uint32_t memoryTypeIndex,
1753  VkMemoryPropertyFlags* pFlags);
1754 
1764  VmaAllocator allocator,
1765  uint32_t frameIndex);
1766 
1769 typedef struct VmaStatInfo
1770 {
1772  uint32_t blockCount;
1778  VkDeviceSize usedBytes;
1780  VkDeviceSize unusedBytes;
1783 } VmaStatInfo;
1784 
1786 typedef struct VmaStats
1787 {
1788  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1789  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1791 } VmaStats;
1792 
1794 void vmaCalculateStats(
1795  VmaAllocator allocator,
1796  VmaStats* pStats);
1797 
1798 #define VMA_STATS_STRING_ENABLED 1
1799 
1800 #if VMA_STATS_STRING_ENABLED
1801 
1803 
1805 void vmaBuildStatsString(
1806  VmaAllocator allocator,
1807  char** ppStatsString,
1808  VkBool32 detailedMap);
1809 
1810 void vmaFreeStatsString(
1811  VmaAllocator allocator,
1812  char* pStatsString);
1813 
1814 #endif // #if VMA_STATS_STRING_ENABLED
1815 
1824 VK_DEFINE_HANDLE(VmaPool)
1825 
1826 typedef enum VmaMemoryUsage
1827 {
1876 } VmaMemoryUsage;
1877 
1892 
1947 
1963 
1973 
1980 
1984 
1986 {
1999  VkMemoryPropertyFlags requiredFlags;
2004  VkMemoryPropertyFlags preferredFlags;
2012  uint32_t memoryTypeBits;
2025  void* pUserData;
2027 
2044 VkResult vmaFindMemoryTypeIndex(
2045  VmaAllocator allocator,
2046  uint32_t memoryTypeBits,
2047  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2048  uint32_t* pMemoryTypeIndex);
2049 
2063  VmaAllocator allocator,
2064  const VkBufferCreateInfo* pBufferCreateInfo,
2065  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2066  uint32_t* pMemoryTypeIndex);
2067 
2081  VmaAllocator allocator,
2082  const VkImageCreateInfo* pImageCreateInfo,
2083  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2084  uint32_t* pMemoryTypeIndex);
2085 
2106 
2123 
2134 
2140 
2143 typedef VkFlags VmaPoolCreateFlags;
2144 
2147 typedef struct VmaPoolCreateInfo {
2162  VkDeviceSize blockSize;
2191 
2194 typedef struct VmaPoolStats {
2197  VkDeviceSize size;
2200  VkDeviceSize unusedSize;
2213  VkDeviceSize unusedRangeSizeMax;
2216  size_t blockCount;
2217 } VmaPoolStats;
2218 
2225 VkResult vmaCreatePool(
2226  VmaAllocator allocator,
2227  const VmaPoolCreateInfo* pCreateInfo,
2228  VmaPool* pPool);
2229 
2232 void vmaDestroyPool(
2233  VmaAllocator allocator,
2234  VmaPool pool);
2235 
2242 void vmaGetPoolStats(
2243  VmaAllocator allocator,
2244  VmaPool pool,
2245  VmaPoolStats* pPoolStats);
2246 
2254  VmaAllocator allocator,
2255  VmaPool pool,
2256  size_t* pLostAllocationCount);
2257 
2272 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2273 
2298 VK_DEFINE_HANDLE(VmaAllocation)
2299 
2300 
2302 typedef struct VmaAllocationInfo {
2307  uint32_t memoryType;
2316  VkDeviceMemory deviceMemory;
2321  VkDeviceSize offset;
2326  VkDeviceSize size;
2340  void* pUserData;
2342 
2353 VkResult vmaAllocateMemory(
2354  VmaAllocator allocator,
2355  const VkMemoryRequirements* pVkMemoryRequirements,
2356  const VmaAllocationCreateInfo* pCreateInfo,
2357  VmaAllocation* pAllocation,
2358  VmaAllocationInfo* pAllocationInfo);
2359 
2367  VmaAllocator allocator,
2368  VkBuffer buffer,
2369  const VmaAllocationCreateInfo* pCreateInfo,
2370  VmaAllocation* pAllocation,
2371  VmaAllocationInfo* pAllocationInfo);
2372 
2374 VkResult vmaAllocateMemoryForImage(
2375  VmaAllocator allocator,
2376  VkImage image,
2377  const VmaAllocationCreateInfo* pCreateInfo,
2378  VmaAllocation* pAllocation,
2379  VmaAllocationInfo* pAllocationInfo);
2380 
2382 void vmaFreeMemory(
2383  VmaAllocator allocator,
2384  VmaAllocation allocation);
2385 
2406 VkResult vmaResizeAllocation(
2407  VmaAllocator allocator,
2408  VmaAllocation allocation,
2409  VkDeviceSize newSize);
2410 
2428  VmaAllocator allocator,
2429  VmaAllocation allocation,
2430  VmaAllocationInfo* pAllocationInfo);
2431 
2446 VkBool32 vmaTouchAllocation(
2447  VmaAllocator allocator,
2448  VmaAllocation allocation);
2449 
2464  VmaAllocator allocator,
2465  VmaAllocation allocation,
2466  void* pUserData);
2467 
2479  VmaAllocator allocator,
2480  VmaAllocation* pAllocation);
2481 
2516 VkResult vmaMapMemory(
2517  VmaAllocator allocator,
2518  VmaAllocation allocation,
2519  void** ppData);
2520 
2525 void vmaUnmapMemory(
2526  VmaAllocator allocator,
2527  VmaAllocation allocation);
2528 
2541 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2542 
2555 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2556 
2573 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2574 
2576 typedef struct VmaDefragmentationInfo {
2581  VkDeviceSize maxBytesToMove;
2588 
2590 typedef struct VmaDefragmentationStats {
2592  VkDeviceSize bytesMoved;
2594  VkDeviceSize bytesFreed;
2600 
2639 VkResult vmaDefragment(
2640  VmaAllocator allocator,
2641  VmaAllocation* pAllocations,
2642  size_t allocationCount,
2643  VkBool32* pAllocationsChanged,
2644  const VmaDefragmentationInfo *pDefragmentationInfo,
2645  VmaDefragmentationStats* pDefragmentationStats);
2646 
2659 VkResult vmaBindBufferMemory(
2660  VmaAllocator allocator,
2661  VmaAllocation allocation,
2662  VkBuffer buffer);
2663 
2676 VkResult vmaBindImageMemory(
2677  VmaAllocator allocator,
2678  VmaAllocation allocation,
2679  VkImage image);
2680 
2707 VkResult vmaCreateBuffer(
2708  VmaAllocator allocator,
2709  const VkBufferCreateInfo* pBufferCreateInfo,
2710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2711  VkBuffer* pBuffer,
2712  VmaAllocation* pAllocation,
2713  VmaAllocationInfo* pAllocationInfo);
2714 
2726 void vmaDestroyBuffer(
2727  VmaAllocator allocator,
2728  VkBuffer buffer,
2729  VmaAllocation allocation);
2730 
2732 VkResult vmaCreateImage(
2733  VmaAllocator allocator,
2734  const VkImageCreateInfo* pImageCreateInfo,
2735  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2736  VkImage* pImage,
2737  VmaAllocation* pAllocation,
2738  VmaAllocationInfo* pAllocationInfo);
2739 
2751 void vmaDestroyImage(
2752  VmaAllocator allocator,
2753  VkImage image,
2754  VmaAllocation allocation);
2755 
2756 #ifdef __cplusplus
2757 }
2758 #endif
2759 
2760 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2761 
2762 // For Visual Studio IntelliSense.
2763 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2764 #define VMA_IMPLEMENTATION
2765 #endif
2766 
2767 #ifdef VMA_IMPLEMENTATION
2768 #undef VMA_IMPLEMENTATION
2769 
2770 #include <cstdint>
2771 #include <cstdlib>
2772 #include <cstring>
2773 
2774 /*******************************************************************************
2775 CONFIGURATION SECTION
2776 
2777 Define some of these macros before each #include of this header or change them
2778 here if you need other then default behavior depending on your environment.
2779 */
2780 
2781 /*
2782 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2783 internally, like:
2784 
2785  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2786 
2787 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2788 VmaAllocatorCreateInfo::pVulkanFunctions.
2789 */
2790 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2791 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2792 #endif
2793 
2794 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2795 //#define VMA_USE_STL_CONTAINERS 1
2796 
2797 /* Set this macro to 1 to make the library including and using STL containers:
2798 std::pair, std::vector, std::list, std::unordered_map.
2799 
2800 Set it to 0 or undefined to make the library using its own implementation of
2801 the containers.
2802 */
2803 #if VMA_USE_STL_CONTAINERS
2804  #define VMA_USE_STL_VECTOR 1
2805  #define VMA_USE_STL_UNORDERED_MAP 1
2806  #define VMA_USE_STL_LIST 1
2807 #endif
2808 
2809 #if VMA_USE_STL_VECTOR
2810  #include <vector>
2811 #endif
2812 
2813 #if VMA_USE_STL_UNORDERED_MAP
2814  #include <unordered_map>
2815 #endif
2816 
2817 #if VMA_USE_STL_LIST
2818  #include <list>
2819 #endif
2820 
2821 /*
2822 Following headers are used in this CONFIGURATION section only, so feel free to
2823 remove them if not needed.
2824 */
2825 #include <cassert> // for assert
2826 #include <algorithm> // for min, max
2827 #include <mutex> // for std::mutex
2828 #include <atomic> // for std::atomic
2829 
2830 #ifndef VMA_NULL
2831  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2832  #define VMA_NULL nullptr
2833 #endif
2834 
2835 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2836 #include <cstdlib>
2837 void *aligned_alloc(size_t alignment, size_t size)
2838 {
2839  // alignment must be >= sizeof(void*)
2840  if(alignment < sizeof(void*))
2841  {
2842  alignment = sizeof(void*);
2843  }
2844 
2845  return memalign(alignment, size);
2846 }
2847 #elif defined(__APPLE__) || defined(__ANDROID__)
2848 #include <cstdlib>
2849 void *aligned_alloc(size_t alignment, size_t size)
2850 {
2851  // alignment must be >= sizeof(void*)
2852  if(alignment < sizeof(void*))
2853  {
2854  alignment = sizeof(void*);
2855  }
2856 
2857  void *pointer;
2858  if(posix_memalign(&pointer, alignment, size) == 0)
2859  return pointer;
2860  return VMA_NULL;
2861 }
2862 #endif
2863 
2864 // If your compiler is not compatible with C++11 and definition of
2865 // aligned_alloc() function is missing, uncommeting following line may help:
2866 
2867 //#include <malloc.h>
2868 
2869 // Normal assert to check for programmer's errors, especially in Debug configuration.
2870 #ifndef VMA_ASSERT
2871  #ifdef _DEBUG
2872  #define VMA_ASSERT(expr) assert(expr)
2873  #else
2874  #define VMA_ASSERT(expr)
2875  #endif
2876 #endif
2877 
2878 // Assert that will be called very often, like inside data structures e.g. operator[].
2879 // Making it non-empty can make program slow.
2880 #ifndef VMA_HEAVY_ASSERT
2881  #ifdef _DEBUG
2882  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2883  #else
2884  #define VMA_HEAVY_ASSERT(expr)
2885  #endif
2886 #endif
2887 
2888 #ifndef VMA_ALIGN_OF
2889  #define VMA_ALIGN_OF(type) (__alignof(type))
2890 #endif
2891 
2892 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2893  #if defined(_WIN32)
2894  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2895  #else
2896  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2897  #endif
2898 #endif
2899 
2900 #ifndef VMA_SYSTEM_FREE
2901  #if defined(_WIN32)
2902  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2903  #else
2904  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2905  #endif
2906 #endif
2907 
2908 #ifndef VMA_MIN
2909  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2910 #endif
2911 
2912 #ifndef VMA_MAX
2913  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2914 #endif
2915 
2916 #ifndef VMA_SWAP
2917  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2918 #endif
2919 
2920 #ifndef VMA_SORT
2921  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2922 #endif
2923 
2924 #ifndef VMA_DEBUG_LOG
2925  #define VMA_DEBUG_LOG(format, ...)
2926  /*
2927  #define VMA_DEBUG_LOG(format, ...) do { \
2928  printf(format, __VA_ARGS__); \
2929  printf("\n"); \
2930  } while(false)
2931  */
2932 #endif
2933 
2934 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2935 #if VMA_STATS_STRING_ENABLED
2936  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2937  {
2938  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2939  }
2940  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2941  {
2942  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2943  }
2944  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2945  {
2946  snprintf(outStr, strLen, "%p", ptr);
2947  }
2948 #endif
2949 
2950 #ifndef VMA_MUTEX
2951  class VmaMutex
2952  {
2953  public:
2954  VmaMutex() { }
2955  ~VmaMutex() { }
2956  void Lock() { m_Mutex.lock(); }
2957  void Unlock() { m_Mutex.unlock(); }
2958  private:
2959  std::mutex m_Mutex;
2960  };
2961  #define VMA_MUTEX VmaMutex
2962 #endif
2963 
2964 /*
2965 If providing your own implementation, you need to implement a subset of std::atomic:
2966 
2967 - Constructor(uint32_t desired)
2968 - uint32_t load() const
2969 - void store(uint32_t desired)
2970 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2971 */
2972 #ifndef VMA_ATOMIC_UINT32
2973  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2974 #endif
2975 
2976 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2977 
2981  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2982 #endif
2983 
2984 #ifndef VMA_DEBUG_ALIGNMENT
2985 
2989  #define VMA_DEBUG_ALIGNMENT (1)
2990 #endif
2991 
2992 #ifndef VMA_DEBUG_MARGIN
2993 
2997  #define VMA_DEBUG_MARGIN (0)
2998 #endif
2999 
3000 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3001 
3005  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3006 #endif
3007 
3008 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3009 
3014  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3015 #endif
3016 
3017 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3018 
3022  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3023 #endif
3024 
3025 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3026 
3030  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3031 #endif
3032 
3033 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3034  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3036 #endif
3037 
3038 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3039  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3041 #endif
3042 
3043 #ifndef VMA_CLASS_NO_COPY
3044  #define VMA_CLASS_NO_COPY(className) \
3045  private: \
3046  className(const className&) = delete; \
3047  className& operator=(const className&) = delete;
3048 #endif
3049 
3050 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3051 
3052 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3053 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3054 
3055 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3056 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3057 
3058 /*******************************************************************************
3059 END OF CONFIGURATION
3060 */
3061 
3062 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3063  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3064 
3065 // Returns number of bits set to 1 in (v).
3066 static inline uint32_t VmaCountBitsSet(uint32_t v)
3067 {
3068  uint32_t c = v - ((v >> 1) & 0x55555555);
3069  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3070  c = ((c >> 4) + c) & 0x0F0F0F0F;
3071  c = ((c >> 8) + c) & 0x00FF00FF;
3072  c = ((c >> 16) + c) & 0x0000FFFF;
3073  return c;
3074 }
3075 
3076 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3077 // Use types like uint32_t, uint64_t as T.
3078 template <typename T>
3079 static inline T VmaAlignUp(T val, T align)
3080 {
3081  return (val + align - 1) / align * align;
3082 }
3083 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3084 // Use types like uint32_t, uint64_t as T.
3085 template <typename T>
3086 static inline T VmaAlignDown(T val, T align)
3087 {
3088  return val / align * align;
3089 }
3090 
3091 // Division with mathematical rounding to nearest number.
3092 template <typename T>
3093 static inline T VmaRoundDiv(T x, T y)
3094 {
3095  return (x + (y / (T)2)) / y;
3096 }
3097 
3098 /*
3099 Returns true if given number is a power of two.
3100 T must be unsigned integer number or signed integer but always nonnegative.
3101 For 0 returns true.
3102 */
3103 template <typename T>
3104 inline bool VmaIsPow2(T x)
3105 {
3106  return (x & (x-1)) == 0;
3107 }
3108 
3109 // Returns smallest power of 2 greater or equal to v.
3110 static inline uint32_t VmaNextPow2(uint32_t v)
3111 {
3112  v--;
3113  v |= v >> 1;
3114  v |= v >> 2;
3115  v |= v >> 4;
3116  v |= v >> 8;
3117  v |= v >> 16;
3118  v++;
3119  return v;
3120 }
3121 static inline uint64_t VmaNextPow2(uint64_t v)
3122 {
3123  v--;
3124  v |= v >> 1;
3125  v |= v >> 2;
3126  v |= v >> 4;
3127  v |= v >> 8;
3128  v |= v >> 16;
3129  v |= v >> 32;
3130  v++;
3131  return v;
3132 }
3133 
3134 // Returns largest power of 2 less or equal to v.
3135 static inline uint32_t VmaPrevPow2(uint32_t v)
3136 {
3137  v |= v >> 1;
3138  v |= v >> 2;
3139  v |= v >> 4;
3140  v |= v >> 8;
3141  v |= v >> 16;
3142  v = v ^ (v >> 1);
3143  return v;
3144 }
3145 static inline uint64_t VmaPrevPow2(uint64_t v)
3146 {
3147  v |= v >> 1;
3148  v |= v >> 2;
3149  v |= v >> 4;
3150  v |= v >> 8;
3151  v |= v >> 16;
3152  v |= v >> 32;
3153  v = v ^ (v >> 1);
3154  return v;
3155 }
3156 
3157 static inline bool VmaStrIsEmpty(const char* pStr)
3158 {
3159  return pStr == VMA_NULL || *pStr == '\0';
3160 }
3161 
3162 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3163 {
3164  switch(algorithm)
3165  {
3167  return "Linear";
3169  return "Buddy";
3170  case 0:
3171  return "Default";
3172  default:
3173  VMA_ASSERT(0);
3174  return "";
3175  }
3176 }
3177 
3178 #ifndef VMA_SORT
3179 
3180 template<typename Iterator, typename Compare>
3181 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3182 {
3183  Iterator centerValue = end; --centerValue;
3184  Iterator insertIndex = beg;
3185  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3186  {
3187  if(cmp(*memTypeIndex, *centerValue))
3188  {
3189  if(insertIndex != memTypeIndex)
3190  {
3191  VMA_SWAP(*memTypeIndex, *insertIndex);
3192  }
3193  ++insertIndex;
3194  }
3195  }
3196  if(insertIndex != centerValue)
3197  {
3198  VMA_SWAP(*insertIndex, *centerValue);
3199  }
3200  return insertIndex;
3201 }
3202 
3203 template<typename Iterator, typename Compare>
3204 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3205 {
3206  if(beg < end)
3207  {
3208  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3209  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3210  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3211  }
3212 }
3213 
3214 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3215 
3216 #endif // #ifndef VMA_SORT
3217 
3218 /*
3219 Returns true if two memory blocks occupy overlapping pages.
3220 ResourceA must be in less memory offset than ResourceB.
3221 
3222 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3223 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3224 */
3225 static inline bool VmaBlocksOnSamePage(
3226  VkDeviceSize resourceAOffset,
3227  VkDeviceSize resourceASize,
3228  VkDeviceSize resourceBOffset,
3229  VkDeviceSize pageSize)
3230 {
3231  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3232  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3233  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3234  VkDeviceSize resourceBStart = resourceBOffset;
3235  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3236  return resourceAEndPage == resourceBStartPage;
3237 }
3238 
3239 enum VmaSuballocationType
3240 {
3241  VMA_SUBALLOCATION_TYPE_FREE = 0,
3242  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3243  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3244  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3245  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3246  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3247  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3248 };
3249 
3250 /*
3251 Returns true if given suballocation types could conflict and must respect
3252 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3253 or linear image and another one is optimal image. If type is unknown, behave
3254 conservatively.
3255 */
3256 static inline bool VmaIsBufferImageGranularityConflict(
3257  VmaSuballocationType suballocType1,
3258  VmaSuballocationType suballocType2)
3259 {
3260  if(suballocType1 > suballocType2)
3261  {
3262  VMA_SWAP(suballocType1, suballocType2);
3263  }
3264 
3265  switch(suballocType1)
3266  {
3267  case VMA_SUBALLOCATION_TYPE_FREE:
3268  return false;
3269  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3270  return true;
3271  case VMA_SUBALLOCATION_TYPE_BUFFER:
3272  return
3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3275  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3276  return
3277  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3278  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3279  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3280  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3281  return
3282  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3283  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3284  return false;
3285  default:
3286  VMA_ASSERT(0);
3287  return true;
3288  }
3289 }
3290 
3291 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3292 {
3293  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3294  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3295  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3296  {
3297  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3298  }
3299 }
3300 
3301 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3302 {
3303  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3304  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3305  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3306  {
3307  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3308  {
3309  return false;
3310  }
3311  }
3312  return true;
3313 }
3314 
3315 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3316 struct VmaMutexLock
3317 {
3318  VMA_CLASS_NO_COPY(VmaMutexLock)
3319 public:
3320  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3321  m_pMutex(useMutex ? &mutex : VMA_NULL)
3322  {
3323  if(m_pMutex)
3324  {
3325  m_pMutex->Lock();
3326  }
3327  }
3328 
3329  ~VmaMutexLock()
3330  {
3331  if(m_pMutex)
3332  {
3333  m_pMutex->Unlock();
3334  }
3335  }
3336 
3337 private:
3338  VMA_MUTEX* m_pMutex;
3339 };
3340 
3341 #if VMA_DEBUG_GLOBAL_MUTEX
3342  static VMA_MUTEX gDebugGlobalMutex;
3343  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3344 #else
3345  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3346 #endif
3347 
3348 // Minimum size of a free suballocation to register it in the free suballocation collection.
3349 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3350 
3351 /*
3352 Performs binary search and returns iterator to first element that is greater or
3353 equal to (key), according to comparison (cmp).
3354 
3355 Cmp should return true if first argument is less than second argument.
3356 
3357 Returned value is the found element, if present in the collection or place where
3358 new element with value (key) should be inserted.
3359 */
3360 template <typename CmpLess, typename IterT, typename KeyT>
3361 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3362 {
3363  size_t down = 0, up = (end - beg);
3364  while(down < up)
3365  {
3366  const size_t mid = (down + up) / 2;
3367  if(cmp(*(beg+mid), key))
3368  {
3369  down = mid + 1;
3370  }
3371  else
3372  {
3373  up = mid;
3374  }
3375  }
3376  return beg + down;
3377 }
3378 
3380 // Memory allocation
3381 
3382 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3383 {
3384  if((pAllocationCallbacks != VMA_NULL) &&
3385  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3386  {
3387  return (*pAllocationCallbacks->pfnAllocation)(
3388  pAllocationCallbacks->pUserData,
3389  size,
3390  alignment,
3391  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3392  }
3393  else
3394  {
3395  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3396  }
3397 }
3398 
3399 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3400 {
3401  if((pAllocationCallbacks != VMA_NULL) &&
3402  (pAllocationCallbacks->pfnFree != VMA_NULL))
3403  {
3404  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3405  }
3406  else
3407  {
3408  VMA_SYSTEM_FREE(ptr);
3409  }
3410 }
3411 
3412 template<typename T>
3413 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3414 {
3415  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3416 }
3417 
3418 template<typename T>
3419 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3420 {
3421  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3422 }
3423 
3424 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3425 
3426 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3427 
3428 template<typename T>
3429 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3430 {
3431  ptr->~T();
3432  VmaFree(pAllocationCallbacks, ptr);
3433 }
3434 
3435 template<typename T>
3436 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3437 {
3438  if(ptr != VMA_NULL)
3439  {
3440  for(size_t i = count; i--; )
3441  {
3442  ptr[i].~T();
3443  }
3444  VmaFree(pAllocationCallbacks, ptr);
3445  }
3446 }
3447 
3448 // STL-compatible allocator.
3449 template<typename T>
3450 class VmaStlAllocator
3451 {
3452 public:
3453  const VkAllocationCallbacks* const m_pCallbacks;
3454  typedef T value_type;
3455 
3456  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3457  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3458 
3459  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3460  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3461 
3462  template<typename U>
3463  bool operator==(const VmaStlAllocator<U>& rhs) const
3464  {
3465  return m_pCallbacks == rhs.m_pCallbacks;
3466  }
3467  template<typename U>
3468  bool operator!=(const VmaStlAllocator<U>& rhs) const
3469  {
3470  return m_pCallbacks != rhs.m_pCallbacks;
3471  }
3472 
3473  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3474 };
3475 
3476 #if VMA_USE_STL_VECTOR
3477 
3478 #define VmaVector std::vector
3479 
3480 template<typename T, typename allocatorT>
3481 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3482 {
3483  vec.insert(vec.begin() + index, item);
3484 }
3485 
3486 template<typename T, typename allocatorT>
3487 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3488 {
3489  vec.erase(vec.begin() + index);
3490 }
3491 
3492 #else // #if VMA_USE_STL_VECTOR
3493 
3494 /* Class with interface compatible with subset of std::vector.
3495 T must be POD because constructors and destructors are not called and memcpy is
3496 used for these objects. */
3497 template<typename T, typename AllocatorT>
3498 class VmaVector
3499 {
3500 public:
3501  typedef T value_type;
3502 
3503  VmaVector(const AllocatorT& allocator) :
3504  m_Allocator(allocator),
3505  m_pArray(VMA_NULL),
3506  m_Count(0),
3507  m_Capacity(0)
3508  {
3509  }
3510 
3511  VmaVector(size_t count, const AllocatorT& allocator) :
3512  m_Allocator(allocator),
3513  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3514  m_Count(count),
3515  m_Capacity(count)
3516  {
3517  }
3518 
3519  VmaVector(const VmaVector<T, AllocatorT>& src) :
3520  m_Allocator(src.m_Allocator),
3521  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3522  m_Count(src.m_Count),
3523  m_Capacity(src.m_Count)
3524  {
3525  if(m_Count != 0)
3526  {
3527  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3528  }
3529  }
3530 
3531  ~VmaVector()
3532  {
3533  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3534  }
3535 
3536  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3537  {
3538  if(&rhs != this)
3539  {
3540  resize(rhs.m_Count);
3541  if(m_Count != 0)
3542  {
3543  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3544  }
3545  }
3546  return *this;
3547  }
3548 
3549  bool empty() const { return m_Count == 0; }
3550  size_t size() const { return m_Count; }
3551  T* data() { return m_pArray; }
3552  const T* data() const { return m_pArray; }
3553 
3554  T& operator[](size_t index)
3555  {
3556  VMA_HEAVY_ASSERT(index < m_Count);
3557  return m_pArray[index];
3558  }
3559  const T& operator[](size_t index) const
3560  {
3561  VMA_HEAVY_ASSERT(index < m_Count);
3562  return m_pArray[index];
3563  }
3564 
3565  T& front()
3566  {
3567  VMA_HEAVY_ASSERT(m_Count > 0);
3568  return m_pArray[0];
3569  }
3570  const T& front() const
3571  {
3572  VMA_HEAVY_ASSERT(m_Count > 0);
3573  return m_pArray[0];
3574  }
3575  T& back()
3576  {
3577  VMA_HEAVY_ASSERT(m_Count > 0);
3578  return m_pArray[m_Count - 1];
3579  }
3580  const T& back() const
3581  {
3582  VMA_HEAVY_ASSERT(m_Count > 0);
3583  return m_pArray[m_Count - 1];
3584  }
3585 
3586  void reserve(size_t newCapacity, bool freeMemory = false)
3587  {
3588  newCapacity = VMA_MAX(newCapacity, m_Count);
3589 
3590  if((newCapacity < m_Capacity) && !freeMemory)
3591  {
3592  newCapacity = m_Capacity;
3593  }
3594 
3595  if(newCapacity != m_Capacity)
3596  {
3597  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3598  if(m_Count != 0)
3599  {
3600  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3601  }
3602  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3603  m_Capacity = newCapacity;
3604  m_pArray = newArray;
3605  }
3606  }
3607 
3608  void resize(size_t newCount, bool freeMemory = false)
3609  {
3610  size_t newCapacity = m_Capacity;
3611  if(newCount > m_Capacity)
3612  {
3613  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3614  }
3615  else if(freeMemory)
3616  {
3617  newCapacity = newCount;
3618  }
3619 
3620  if(newCapacity != m_Capacity)
3621  {
3622  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3623  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3624  if(elementsToCopy != 0)
3625  {
3626  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3627  }
3628  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3629  m_Capacity = newCapacity;
3630  m_pArray = newArray;
3631  }
3632 
3633  m_Count = newCount;
3634  }
3635 
3636  void clear(bool freeMemory = false)
3637  {
3638  resize(0, freeMemory);
3639  }
3640 
3641  void insert(size_t index, const T& src)
3642  {
3643  VMA_HEAVY_ASSERT(index <= m_Count);
3644  const size_t oldCount = size();
3645  resize(oldCount + 1);
3646  if(index < oldCount)
3647  {
3648  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3649  }
3650  m_pArray[index] = src;
3651  }
3652 
3653  void remove(size_t index)
3654  {
3655  VMA_HEAVY_ASSERT(index < m_Count);
3656  const size_t oldCount = size();
3657  if(index < oldCount - 1)
3658  {
3659  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3660  }
3661  resize(oldCount - 1);
3662  }
3663 
3664  void push_back(const T& src)
3665  {
3666  const size_t newIndex = size();
3667  resize(newIndex + 1);
3668  m_pArray[newIndex] = src;
3669  }
3670 
3671  void pop_back()
3672  {
3673  VMA_HEAVY_ASSERT(m_Count > 0);
3674  resize(size() - 1);
3675  }
3676 
3677  void push_front(const T& src)
3678  {
3679  insert(0, src);
3680  }
3681 
3682  void pop_front()
3683  {
3684  VMA_HEAVY_ASSERT(m_Count > 0);
3685  remove(0);
3686  }
3687 
3688  typedef T* iterator;
3689 
3690  iterator begin() { return m_pArray; }
3691  iterator end() { return m_pArray + m_Count; }
3692 
3693 private:
3694  AllocatorT m_Allocator;
3695  T* m_pArray;
3696  size_t m_Count;
3697  size_t m_Capacity;
3698 };
3699 
3700 template<typename T, typename allocatorT>
3701 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3702 {
3703  vec.insert(index, item);
3704 }
3705 
3706 template<typename T, typename allocatorT>
3707 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3708 {
3709  vec.remove(index);
3710 }
3711 
3712 #endif // #if VMA_USE_STL_VECTOR
3713 
3714 template<typename CmpLess, typename VectorT>
3715 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3716 {
3717  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3718  vector.data(),
3719  vector.data() + vector.size(),
3720  value,
3721  CmpLess()) - vector.data();
3722  VmaVectorInsert(vector, indexToInsert, value);
3723  return indexToInsert;
3724 }
3725 
3726 template<typename CmpLess, typename VectorT>
3727 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3728 {
3729  CmpLess comparator;
3730  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3731  vector.begin(),
3732  vector.end(),
3733  value,
3734  comparator);
3735  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3736  {
3737  size_t indexToRemove = it - vector.begin();
3738  VmaVectorRemove(vector, indexToRemove);
3739  return true;
3740  }
3741  return false;
3742 }
3743 
3744 template<typename CmpLess, typename IterT, typename KeyT>
3745 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3746 {
3747  CmpLess comparator;
3748  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3749  beg, end, value, comparator);
3750  if(it == end ||
3751  (!comparator(*it, value) && !comparator(value, *it)))
3752  {
3753  return it;
3754  }
3755  return end;
3756 }
3757 
3759 // class VmaPoolAllocator
3760 
3761 /*
3762 Allocator for objects of type T using a list of arrays (pools) to speed up
3763 allocation. Number of elements that can be allocated is not bounded because
3764 allocator can create multiple blocks.
3765 */
3766 template<typename T>
3767 class VmaPoolAllocator
3768 {
3769  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3770 public:
3771  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3772  ~VmaPoolAllocator();
3773  void Clear();
3774  T* Alloc();
3775  void Free(T* ptr);
3776 
3777 private:
3778  union Item
3779  {
3780  uint32_t NextFreeIndex;
3781  T Value;
3782  };
3783 
3784  struct ItemBlock
3785  {
3786  Item* pItems;
3787  uint32_t FirstFreeIndex;
3788  };
3789 
3790  const VkAllocationCallbacks* m_pAllocationCallbacks;
3791  size_t m_ItemsPerBlock;
3792  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3793 
3794  ItemBlock& CreateNewBlock();
3795 };
3796 
3797 template<typename T>
3798 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3799  m_pAllocationCallbacks(pAllocationCallbacks),
3800  m_ItemsPerBlock(itemsPerBlock),
3801  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3802 {
3803  VMA_ASSERT(itemsPerBlock > 0);
3804 }
3805 
3806 template<typename T>
3807 VmaPoolAllocator<T>::~VmaPoolAllocator()
3808 {
3809  Clear();
3810 }
3811 
3812 template<typename T>
3813 void VmaPoolAllocator<T>::Clear()
3814 {
3815  for(size_t i = m_ItemBlocks.size(); i--; )
3816  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3817  m_ItemBlocks.clear();
3818 }
3819 
3820 template<typename T>
3821 T* VmaPoolAllocator<T>::Alloc()
3822 {
3823  for(size_t i = m_ItemBlocks.size(); i--; )
3824  {
3825  ItemBlock& block = m_ItemBlocks[i];
3826  // This block has some free items: Use first one.
3827  if(block.FirstFreeIndex != UINT32_MAX)
3828  {
3829  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3830  block.FirstFreeIndex = pItem->NextFreeIndex;
3831  return &pItem->Value;
3832  }
3833  }
3834 
3835  // No block has free item: Create new one and use it.
3836  ItemBlock& newBlock = CreateNewBlock();
3837  Item* const pItem = &newBlock.pItems[0];
3838  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3839  return &pItem->Value;
3840 }
3841 
3842 template<typename T>
3843 void VmaPoolAllocator<T>::Free(T* ptr)
3844 {
3845  // Search all memory blocks to find ptr.
3846  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3847  {
3848  ItemBlock& block = m_ItemBlocks[i];
3849 
3850  // Casting to union.
3851  Item* pItemPtr;
3852  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3853 
3854  // Check if pItemPtr is in address range of this block.
3855  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3856  {
3857  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3858  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3859  block.FirstFreeIndex = index;
3860  return;
3861  }
3862  }
3863  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3864 }
3865 
3866 template<typename T>
3867 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3868 {
3869  ItemBlock newBlock = {
3870  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3871 
3872  m_ItemBlocks.push_back(newBlock);
3873 
3874  // Setup singly-linked list of all free items in this block.
3875  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3876  newBlock.pItems[i].NextFreeIndex = i + 1;
3877  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3878  return m_ItemBlocks.back();
3879 }
3880 
3882 // class VmaRawList, VmaList
3883 
3884 #if VMA_USE_STL_LIST
3885 
3886 #define VmaList std::list
3887 
3888 #else // #if VMA_USE_STL_LIST
3889 
3890 template<typename T>
3891 struct VmaListItem
3892 {
3893  VmaListItem* pPrev;
3894  VmaListItem* pNext;
3895  T Value;
3896 };
3897 
3898 // Doubly linked list.
3899 template<typename T>
3900 class VmaRawList
3901 {
3902  VMA_CLASS_NO_COPY(VmaRawList)
3903 public:
3904  typedef VmaListItem<T> ItemType;
3905 
3906  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3907  ~VmaRawList();
3908  void Clear();
3909 
3910  size_t GetCount() const { return m_Count; }
3911  bool IsEmpty() const { return m_Count == 0; }
3912 
3913  ItemType* Front() { return m_pFront; }
3914  const ItemType* Front() const { return m_pFront; }
3915  ItemType* Back() { return m_pBack; }
3916  const ItemType* Back() const { return m_pBack; }
3917 
3918  ItemType* PushBack();
3919  ItemType* PushFront();
3920  ItemType* PushBack(const T& value);
3921  ItemType* PushFront(const T& value);
3922  void PopBack();
3923  void PopFront();
3924 
3925  // Item can be null - it means PushBack.
3926  ItemType* InsertBefore(ItemType* pItem);
3927  // Item can be null - it means PushFront.
3928  ItemType* InsertAfter(ItemType* pItem);
3929 
3930  ItemType* InsertBefore(ItemType* pItem, const T& value);
3931  ItemType* InsertAfter(ItemType* pItem, const T& value);
3932 
3933  void Remove(ItemType* pItem);
3934 
3935 private:
3936  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3937  VmaPoolAllocator<ItemType> m_ItemAllocator;
3938  ItemType* m_pFront;
3939  ItemType* m_pBack;
3940  size_t m_Count;
3941 };
3942 
3943 template<typename T>
3944 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3945  m_pAllocationCallbacks(pAllocationCallbacks),
3946  m_ItemAllocator(pAllocationCallbacks, 128),
3947  m_pFront(VMA_NULL),
3948  m_pBack(VMA_NULL),
3949  m_Count(0)
3950 {
3951 }
3952 
3953 template<typename T>
3954 VmaRawList<T>::~VmaRawList()
3955 {
3956  // Intentionally not calling Clear, because that would be unnecessary
3957  // computations to return all items to m_ItemAllocator as free.
3958 }
3959 
3960 template<typename T>
3961 void VmaRawList<T>::Clear()
3962 {
3963  if(IsEmpty() == false)
3964  {
3965  ItemType* pItem = m_pBack;
3966  while(pItem != VMA_NULL)
3967  {
3968  ItemType* const pPrevItem = pItem->pPrev;
3969  m_ItemAllocator.Free(pItem);
3970  pItem = pPrevItem;
3971  }
3972  m_pFront = VMA_NULL;
3973  m_pBack = VMA_NULL;
3974  m_Count = 0;
3975  }
3976 }
3977 
3978 template<typename T>
3979 VmaListItem<T>* VmaRawList<T>::PushBack()
3980 {
3981  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3982  pNewItem->pNext = VMA_NULL;
3983  if(IsEmpty())
3984  {
3985  pNewItem->pPrev = VMA_NULL;
3986  m_pFront = pNewItem;
3987  m_pBack = pNewItem;
3988  m_Count = 1;
3989  }
3990  else
3991  {
3992  pNewItem->pPrev = m_pBack;
3993  m_pBack->pNext = pNewItem;
3994  m_pBack = pNewItem;
3995  ++m_Count;
3996  }
3997  return pNewItem;
3998 }
3999 
4000 template<typename T>
4001 VmaListItem<T>* VmaRawList<T>::PushFront()
4002 {
4003  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4004  pNewItem->pPrev = VMA_NULL;
4005  if(IsEmpty())
4006  {
4007  pNewItem->pNext = VMA_NULL;
4008  m_pFront = pNewItem;
4009  m_pBack = pNewItem;
4010  m_Count = 1;
4011  }
4012  else
4013  {
4014  pNewItem->pNext = m_pFront;
4015  m_pFront->pPrev = pNewItem;
4016  m_pFront = pNewItem;
4017  ++m_Count;
4018  }
4019  return pNewItem;
4020 }
4021 
4022 template<typename T>
4023 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4024 {
4025  ItemType* const pNewItem = PushBack();
4026  pNewItem->Value = value;
4027  return pNewItem;
4028 }
4029 
4030 template<typename T>
4031 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4032 {
4033  ItemType* const pNewItem = PushFront();
4034  pNewItem->Value = value;
4035  return pNewItem;
4036 }
4037 
4038 template<typename T>
4039 void VmaRawList<T>::PopBack()
4040 {
4041  VMA_HEAVY_ASSERT(m_Count > 0);
4042  ItemType* const pBackItem = m_pBack;
4043  ItemType* const pPrevItem = pBackItem->pPrev;
4044  if(pPrevItem != VMA_NULL)
4045  {
4046  pPrevItem->pNext = VMA_NULL;
4047  }
4048  m_pBack = pPrevItem;
4049  m_ItemAllocator.Free(pBackItem);
4050  --m_Count;
4051 }
4052 
4053 template<typename T>
4054 void VmaRawList<T>::PopFront()
4055 {
4056  VMA_HEAVY_ASSERT(m_Count > 0);
4057  ItemType* const pFrontItem = m_pFront;
4058  ItemType* const pNextItem = pFrontItem->pNext;
4059  if(pNextItem != VMA_NULL)
4060  {
4061  pNextItem->pPrev = VMA_NULL;
4062  }
4063  m_pFront = pNextItem;
4064  m_ItemAllocator.Free(pFrontItem);
4065  --m_Count;
4066 }
4067 
4068 template<typename T>
4069 void VmaRawList<T>::Remove(ItemType* pItem)
4070 {
4071  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4072  VMA_HEAVY_ASSERT(m_Count > 0);
4073 
4074  if(pItem->pPrev != VMA_NULL)
4075  {
4076  pItem->pPrev->pNext = pItem->pNext;
4077  }
4078  else
4079  {
4080  VMA_HEAVY_ASSERT(m_pFront == pItem);
4081  m_pFront = pItem->pNext;
4082  }
4083 
4084  if(pItem->pNext != VMA_NULL)
4085  {
4086  pItem->pNext->pPrev = pItem->pPrev;
4087  }
4088  else
4089  {
4090  VMA_HEAVY_ASSERT(m_pBack == pItem);
4091  m_pBack = pItem->pPrev;
4092  }
4093 
4094  m_ItemAllocator.Free(pItem);
4095  --m_Count;
4096 }
4097 
4098 template<typename T>
4099 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4100 {
4101  if(pItem != VMA_NULL)
4102  {
4103  ItemType* const prevItem = pItem->pPrev;
4104  ItemType* const newItem = m_ItemAllocator.Alloc();
4105  newItem->pPrev = prevItem;
4106  newItem->pNext = pItem;
4107  pItem->pPrev = newItem;
4108  if(prevItem != VMA_NULL)
4109  {
4110  prevItem->pNext = newItem;
4111  }
4112  else
4113  {
4114  VMA_HEAVY_ASSERT(m_pFront == pItem);
4115  m_pFront = newItem;
4116  }
4117  ++m_Count;
4118  return newItem;
4119  }
4120  else
4121  return PushBack();
4122 }
4123 
4124 template<typename T>
4125 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4126 {
4127  if(pItem != VMA_NULL)
4128  {
4129  ItemType* const nextItem = pItem->pNext;
4130  ItemType* const newItem = m_ItemAllocator.Alloc();
4131  newItem->pNext = nextItem;
4132  newItem->pPrev = pItem;
4133  pItem->pNext = newItem;
4134  if(nextItem != VMA_NULL)
4135  {
4136  nextItem->pPrev = newItem;
4137  }
4138  else
4139  {
4140  VMA_HEAVY_ASSERT(m_pBack == pItem);
4141  m_pBack = newItem;
4142  }
4143  ++m_Count;
4144  return newItem;
4145  }
4146  else
4147  return PushFront();
4148 }
4149 
4150 template<typename T>
4151 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4152 {
4153  ItemType* const newItem = InsertBefore(pItem);
4154  newItem->Value = value;
4155  return newItem;
4156 }
4157 
4158 template<typename T>
4159 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4160 {
4161  ItemType* const newItem = InsertAfter(pItem);
4162  newItem->Value = value;
4163  return newItem;
4164 }
4165 
4166 template<typename T, typename AllocatorT>
4167 class VmaList
4168 {
4169  VMA_CLASS_NO_COPY(VmaList)
4170 public:
4171  class iterator
4172  {
4173  public:
4174  iterator() :
4175  m_pList(VMA_NULL),
4176  m_pItem(VMA_NULL)
4177  {
4178  }
4179 
4180  T& operator*() const
4181  {
4182  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4183  return m_pItem->Value;
4184  }
4185  T* operator->() const
4186  {
4187  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4188  return &m_pItem->Value;
4189  }
4190 
4191  iterator& operator++()
4192  {
4193  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4194  m_pItem = m_pItem->pNext;
4195  return *this;
4196  }
4197  iterator& operator--()
4198  {
4199  if(m_pItem != VMA_NULL)
4200  {
4201  m_pItem = m_pItem->pPrev;
4202  }
4203  else
4204  {
4205  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4206  m_pItem = m_pList->Back();
4207  }
4208  return *this;
4209  }
4210 
4211  iterator operator++(int)
4212  {
4213  iterator result = *this;
4214  ++*this;
4215  return result;
4216  }
4217  iterator operator--(int)
4218  {
4219  iterator result = *this;
4220  --*this;
4221  return result;
4222  }
4223 
4224  bool operator==(const iterator& rhs) const
4225  {
4226  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4227  return m_pItem == rhs.m_pItem;
4228  }
4229  bool operator!=(const iterator& rhs) const
4230  {
4231  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4232  return m_pItem != rhs.m_pItem;
4233  }
4234 
4235  private:
4236  VmaRawList<T>* m_pList;
4237  VmaListItem<T>* m_pItem;
4238 
4239  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4240  m_pList(pList),
4241  m_pItem(pItem)
4242  {
4243  }
4244 
4245  friend class VmaList<T, AllocatorT>;
4246  };
4247 
4248  class const_iterator
4249  {
4250  public:
4251  const_iterator() :
4252  m_pList(VMA_NULL),
4253  m_pItem(VMA_NULL)
4254  {
4255  }
4256 
4257  const_iterator(const iterator& src) :
4258  m_pList(src.m_pList),
4259  m_pItem(src.m_pItem)
4260  {
4261  }
4262 
4263  const T& operator*() const
4264  {
4265  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4266  return m_pItem->Value;
4267  }
4268  const T* operator->() const
4269  {
4270  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4271  return &m_pItem->Value;
4272  }
4273 
4274  const_iterator& operator++()
4275  {
4276  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4277  m_pItem = m_pItem->pNext;
4278  return *this;
4279  }
4280  const_iterator& operator--()
4281  {
4282  if(m_pItem != VMA_NULL)
4283  {
4284  m_pItem = m_pItem->pPrev;
4285  }
4286  else
4287  {
4288  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4289  m_pItem = m_pList->Back();
4290  }
4291  return *this;
4292  }
4293 
4294  const_iterator operator++(int)
4295  {
4296  const_iterator result = *this;
4297  ++*this;
4298  return result;
4299  }
4300  const_iterator operator--(int)
4301  {
4302  const_iterator result = *this;
4303  --*this;
4304  return result;
4305  }
4306 
4307  bool operator==(const const_iterator& rhs) const
4308  {
4309  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4310  return m_pItem == rhs.m_pItem;
4311  }
4312  bool operator!=(const const_iterator& rhs) const
4313  {
4314  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4315  return m_pItem != rhs.m_pItem;
4316  }
4317 
4318  private:
4319  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4320  m_pList(pList),
4321  m_pItem(pItem)
4322  {
4323  }
4324 
4325  const VmaRawList<T>* m_pList;
4326  const VmaListItem<T>* m_pItem;
4327 
4328  friend class VmaList<T, AllocatorT>;
4329  };
4330 
4331  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4332 
4333  bool empty() const { return m_RawList.IsEmpty(); }
4334  size_t size() const { return m_RawList.GetCount(); }
4335 
4336  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4337  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4338 
4339  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4340  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4341 
4342  void clear() { m_RawList.Clear(); }
4343  void push_back(const T& value) { m_RawList.PushBack(value); }
4344  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4345  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4346 
4347 private:
4348  VmaRawList<T> m_RawList;
4349 };
4350 
4351 #endif // #if VMA_USE_STL_LIST
4352 
4354 // class VmaMap
4355 
4356 // Unused in this version.
4357 #if 0
4358 
4359 #if VMA_USE_STL_UNORDERED_MAP
4360 
4361 #define VmaPair std::pair
4362 
4363 #define VMA_MAP_TYPE(KeyT, ValueT) \
4364  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4365 
4366 #else // #if VMA_USE_STL_UNORDERED_MAP
4367 
4368 template<typename T1, typename T2>
4369 struct VmaPair
4370 {
4371  T1 first;
4372  T2 second;
4373 
4374  VmaPair() : first(), second() { }
4375  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4376 };
4377 
4378 /* Class compatible with subset of interface of std::unordered_map.
4379 KeyT, ValueT must be POD because they will be stored in VmaVector.
4380 */
4381 template<typename KeyT, typename ValueT>
4382 class VmaMap
4383 {
4384 public:
4385  typedef VmaPair<KeyT, ValueT> PairType;
4386  typedef PairType* iterator;
4387 
4388  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4389 
4390  iterator begin() { return m_Vector.begin(); }
4391  iterator end() { return m_Vector.end(); }
4392 
4393  void insert(const PairType& pair);
4394  iterator find(const KeyT& key);
4395  void erase(iterator it);
4396 
4397 private:
4398  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4399 };
4400 
4401 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4402 
4403 template<typename FirstT, typename SecondT>
4404 struct VmaPairFirstLess
4405 {
4406  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4407  {
4408  return lhs.first < rhs.first;
4409  }
4410  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4411  {
4412  return lhs.first < rhsFirst;
4413  }
4414 };
4415 
4416 template<typename KeyT, typename ValueT>
4417 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4418 {
4419  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4420  m_Vector.data(),
4421  m_Vector.data() + m_Vector.size(),
4422  pair,
4423  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4424  VmaVectorInsert(m_Vector, indexToInsert, pair);
4425 }
4426 
4427 template<typename KeyT, typename ValueT>
4428 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4429 {
4430  PairType* it = VmaBinaryFindFirstNotLess(
4431  m_Vector.data(),
4432  m_Vector.data() + m_Vector.size(),
4433  key,
4434  VmaPairFirstLess<KeyT, ValueT>());
4435  if((it != m_Vector.end()) && (it->first == key))
4436  {
4437  return it;
4438  }
4439  else
4440  {
4441  return m_Vector.end();
4442  }
4443 }
4444 
4445 template<typename KeyT, typename ValueT>
4446 void VmaMap<KeyT, ValueT>::erase(iterator it)
4447 {
4448  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4449 }
4450 
4451 #endif // #if VMA_USE_STL_UNORDERED_MAP
4452 
4453 #endif // #if 0
4454 
4456 
4457 class VmaDeviceMemoryBlock;
4458 
4459 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4460 
4461 struct VmaAllocation_T
4462 {
4463  VMA_CLASS_NO_COPY(VmaAllocation_T)
4464 private:
4465  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4466 
4467  enum FLAGS
4468  {
4469  FLAG_USER_DATA_STRING = 0x01,
4470  };
4471 
4472 public:
4473  enum ALLOCATION_TYPE
4474  {
4475  ALLOCATION_TYPE_NONE,
4476  ALLOCATION_TYPE_BLOCK,
4477  ALLOCATION_TYPE_DEDICATED,
4478  };
4479 
4480  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4481  m_Alignment(1),
4482  m_Size(0),
4483  m_pUserData(VMA_NULL),
4484  m_LastUseFrameIndex(currentFrameIndex),
4485  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4486  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4487  m_MapCount(0),
4488  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4489  {
4490 #if VMA_STATS_STRING_ENABLED
4491  m_CreationFrameIndex = currentFrameIndex;
4492  m_BufferImageUsage = 0;
4493 #endif
4494  }
4495 
4496  ~VmaAllocation_T()
4497  {
4498  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4499 
4500  // Check if owned string was freed.
4501  VMA_ASSERT(m_pUserData == VMA_NULL);
4502  }
4503 
4504  void InitBlockAllocation(
4505  VmaPool hPool,
4506  VmaDeviceMemoryBlock* block,
4507  VkDeviceSize offset,
4508  VkDeviceSize alignment,
4509  VkDeviceSize size,
4510  VmaSuballocationType suballocationType,
4511  bool mapped,
4512  bool canBecomeLost)
4513  {
4514  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4515  VMA_ASSERT(block != VMA_NULL);
4516  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4517  m_Alignment = alignment;
4518  m_Size = size;
4519  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4520  m_SuballocationType = (uint8_t)suballocationType;
4521  m_BlockAllocation.m_hPool = hPool;
4522  m_BlockAllocation.m_Block = block;
4523  m_BlockAllocation.m_Offset = offset;
4524  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4525  }
4526 
4527  void InitLost()
4528  {
4529  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4530  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4531  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4532  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4533  m_BlockAllocation.m_Block = VMA_NULL;
4534  m_BlockAllocation.m_Offset = 0;
4535  m_BlockAllocation.m_CanBecomeLost = true;
4536  }
4537 
4538  void ChangeBlockAllocation(
4539  VmaAllocator hAllocator,
4540  VmaDeviceMemoryBlock* block,
4541  VkDeviceSize offset);
4542 
4543  void ChangeSize(VkDeviceSize newSize);
4544 
4545  // pMappedData not null means allocation is created with MAPPED flag.
4546  void InitDedicatedAllocation(
4547  uint32_t memoryTypeIndex,
4548  VkDeviceMemory hMemory,
4549  VmaSuballocationType suballocationType,
4550  void* pMappedData,
4551  VkDeviceSize size)
4552  {
4553  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4554  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4555  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4556  m_Alignment = 0;
4557  m_Size = size;
4558  m_SuballocationType = (uint8_t)suballocationType;
4559  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4560  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4561  m_DedicatedAllocation.m_hMemory = hMemory;
4562  m_DedicatedAllocation.m_pMappedData = pMappedData;
4563  }
4564 
4565  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4566  VkDeviceSize GetAlignment() const { return m_Alignment; }
4567  VkDeviceSize GetSize() const { return m_Size; }
4568  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4569  void* GetUserData() const { return m_pUserData; }
4570  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4571  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4572 
4573  VmaDeviceMemoryBlock* GetBlock() const
4574  {
4575  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4576  return m_BlockAllocation.m_Block;
4577  }
4578  VkDeviceSize GetOffset() const;
4579  VkDeviceMemory GetMemory() const;
4580  uint32_t GetMemoryTypeIndex() const;
4581  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4582  void* GetMappedData() const;
4583  bool CanBecomeLost() const;
4584  VmaPool GetPool() const;
4585 
4586  uint32_t GetLastUseFrameIndex() const
4587  {
4588  return m_LastUseFrameIndex.load();
4589  }
4590  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4591  {
4592  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4593  }
4594  /*
4595  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4596  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4597  - Else, returns false.
4598 
4599  If hAllocation is already lost, assert - you should not call it then.
4600  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4601  */
4602  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4603 
4604  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4605  {
4606  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4607  outInfo.blockCount = 1;
4608  outInfo.allocationCount = 1;
4609  outInfo.unusedRangeCount = 0;
4610  outInfo.usedBytes = m_Size;
4611  outInfo.unusedBytes = 0;
4612  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4613  outInfo.unusedRangeSizeMin = UINT64_MAX;
4614  outInfo.unusedRangeSizeMax = 0;
4615  }
4616 
4617  void BlockAllocMap();
4618  void BlockAllocUnmap();
4619  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4620  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4621 
4622 #if VMA_STATS_STRING_ENABLED
4623  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4624  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4625 
4626  void InitBufferImageUsage(uint32_t bufferImageUsage)
4627  {
4628  VMA_ASSERT(m_BufferImageUsage == 0);
4629  m_BufferImageUsage = bufferImageUsage;
4630  }
4631 
4632  void PrintParameters(class VmaJsonWriter& json) const;
4633 #endif
4634 
4635 private:
4636  VkDeviceSize m_Alignment;
4637  VkDeviceSize m_Size;
4638  void* m_pUserData;
4639  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4640  uint8_t m_Type; // ALLOCATION_TYPE
4641  uint8_t m_SuballocationType; // VmaSuballocationType
4642  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4643  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4644  uint8_t m_MapCount;
4645  uint8_t m_Flags; // enum FLAGS
4646 
4647  // Allocation out of VmaDeviceMemoryBlock.
4648  struct BlockAllocation
4649  {
4650  VmaPool m_hPool; // Null if belongs to general memory.
4651  VmaDeviceMemoryBlock* m_Block;
4652  VkDeviceSize m_Offset;
4653  bool m_CanBecomeLost;
4654  };
4655 
4656  // Allocation for an object that has its own private VkDeviceMemory.
4657  struct DedicatedAllocation
4658  {
4659  uint32_t m_MemoryTypeIndex;
4660  VkDeviceMemory m_hMemory;
4661  void* m_pMappedData; // Not null means memory is mapped.
4662  };
4663 
4664  union
4665  {
4666  // Allocation out of VmaDeviceMemoryBlock.
4667  BlockAllocation m_BlockAllocation;
4668  // Allocation for an object that has its own private VkDeviceMemory.
4669  DedicatedAllocation m_DedicatedAllocation;
4670  };
4671 
4672 #if VMA_STATS_STRING_ENABLED
4673  uint32_t m_CreationFrameIndex;
4674  uint32_t m_BufferImageUsage; // 0 if unknown.
4675 #endif
4676 
4677  void FreeUserDataString(VmaAllocator hAllocator);
4678 };
4679 
4680 /*
4681 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4682 allocated memory block or free.
4683 */
4684 struct VmaSuballocation
4685 {
4686  VkDeviceSize offset;
4687  VkDeviceSize size;
4688  VmaAllocation hAllocation;
4689  VmaSuballocationType type;
4690 };
4691 
4692 // Comparator for offsets.
4693 struct VmaSuballocationOffsetLess
4694 {
4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4696  {
4697  return lhs.offset < rhs.offset;
4698  }
4699 };
4700 struct VmaSuballocationOffsetGreater
4701 {
4702  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4703  {
4704  return lhs.offset > rhs.offset;
4705  }
4706 };
4707 
4708 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4709 
4710 // Cost of one additional allocation lost, as equivalent in bytes.
4711 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4712 
4713 /*
4714 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4715 
4716 If canMakeOtherLost was false:
4717 - item points to a FREE suballocation.
4718 - itemsToMakeLostCount is 0.
4719 
4720 If canMakeOtherLost was true:
4721 - item points to first of sequence of suballocations, which are either FREE,
4722  or point to VmaAllocations that can become lost.
4723 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4724  the requested allocation to succeed.
4725 */
4726 struct VmaAllocationRequest
4727 {
4728  VkDeviceSize offset;
4729  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4730  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4731  VmaSuballocationList::iterator item;
4732  size_t itemsToMakeLostCount;
4733  void* customData;
4734 
4735  VkDeviceSize CalcCost() const
4736  {
4737  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4738  }
4739 };
4740 
4741 /*
4742 Data structure used for bookkeeping of allocations and unused ranges of memory
4743 in a single VkDeviceMemory block.
4744 */
4745 class VmaBlockMetadata
4746 {
4747 public:
4748  VmaBlockMetadata(VmaAllocator hAllocator);
4749  virtual ~VmaBlockMetadata() { }
4750  virtual void Init(VkDeviceSize size) { m_Size = size; }
4751 
4752  // Validates all data structures inside this object. If not valid, returns false.
4753  virtual bool Validate() const = 0;
4754  VkDeviceSize GetSize() const { return m_Size; }
4755  virtual size_t GetAllocationCount() const = 0;
4756  virtual VkDeviceSize GetSumFreeSize() const = 0;
4757  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4758  // Returns true if this block is empty - contains only single free suballocation.
4759  virtual bool IsEmpty() const = 0;
4760 
4761  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4762  // Shouldn't modify blockCount.
4763  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4764 
4765 #if VMA_STATS_STRING_ENABLED
4766  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4767 #endif
4768 
4769  // Tries to find a place for suballocation with given parameters inside this block.
4770  // If succeeded, fills pAllocationRequest and returns true.
4771  // If failed, returns false.
4772  virtual bool CreateAllocationRequest(
4773  uint32_t currentFrameIndex,
4774  uint32_t frameInUseCount,
4775  VkDeviceSize bufferImageGranularity,
4776  VkDeviceSize allocSize,
4777  VkDeviceSize allocAlignment,
4778  bool upperAddress,
4779  VmaSuballocationType allocType,
4780  bool canMakeOtherLost,
4781  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4782  VmaAllocationRequest* pAllocationRequest) = 0;
4783 
4784  virtual bool MakeRequestedAllocationsLost(
4785  uint32_t currentFrameIndex,
4786  uint32_t frameInUseCount,
4787  VmaAllocationRequest* pAllocationRequest) = 0;
4788 
4789  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4790 
4791  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4792 
4793  // Makes actual allocation based on request. Request must already be checked and valid.
4794  virtual void Alloc(
4795  const VmaAllocationRequest& request,
4796  VmaSuballocationType type,
4797  VkDeviceSize allocSize,
4798  bool upperAddress,
4799  VmaAllocation hAllocation) = 0;
4800 
4801  // Frees suballocation assigned to given memory region.
4802  virtual void Free(const VmaAllocation allocation) = 0;
4803  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4804 
4805  // Tries to resize (grow or shrink) space for given allocation, in place.
4806  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4807 
4808 protected:
4809  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4810 
4811 #if VMA_STATS_STRING_ENABLED
4812  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4813  VkDeviceSize unusedBytes,
4814  size_t allocationCount,
4815  size_t unusedRangeCount) const;
4816  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4817  VkDeviceSize offset,
4818  VmaAllocation hAllocation) const;
4819  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4820  VkDeviceSize offset,
4821  VkDeviceSize size) const;
4822  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4823 #endif
4824 
4825 private:
4826  VkDeviceSize m_Size;
4827  const VkAllocationCallbacks* m_pAllocationCallbacks;
4828 };
4829 
4830 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4831  VMA_ASSERT(0 && "Validation failed: " #cond); \
4832  return false; \
4833  } } while(false)
4834 
4835 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4836 {
4837  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4838 public:
4839  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4840  virtual ~VmaBlockMetadata_Generic();
4841  virtual void Init(VkDeviceSize size);
4842 
4843  virtual bool Validate() const;
4844  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4845  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4846  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4847  virtual bool IsEmpty() const;
4848 
4849  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4850  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4851 
4852 #if VMA_STATS_STRING_ENABLED
4853  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4854 #endif
4855 
4856  virtual bool CreateAllocationRequest(
4857  uint32_t currentFrameIndex,
4858  uint32_t frameInUseCount,
4859  VkDeviceSize bufferImageGranularity,
4860  VkDeviceSize allocSize,
4861  VkDeviceSize allocAlignment,
4862  bool upperAddress,
4863  VmaSuballocationType allocType,
4864  bool canMakeOtherLost,
4865  uint32_t strategy,
4866  VmaAllocationRequest* pAllocationRequest);
4867 
4868  virtual bool MakeRequestedAllocationsLost(
4869  uint32_t currentFrameIndex,
4870  uint32_t frameInUseCount,
4871  VmaAllocationRequest* pAllocationRequest);
4872 
4873  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4874 
4875  virtual VkResult CheckCorruption(const void* pBlockData);
4876 
4877  virtual void Alloc(
4878  const VmaAllocationRequest& request,
4879  VmaSuballocationType type,
4880  VkDeviceSize allocSize,
4881  bool upperAddress,
4882  VmaAllocation hAllocation);
4883 
4884  virtual void Free(const VmaAllocation allocation);
4885  virtual void FreeAtOffset(VkDeviceSize offset);
4886 
4887  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4888 
4889 private:
4890  uint32_t m_FreeCount;
4891  VkDeviceSize m_SumFreeSize;
4892  VmaSuballocationList m_Suballocations;
4893  // Suballocations that are free and have size greater than certain threshold.
4894  // Sorted by size, ascending.
4895  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4896 
4897  bool ValidateFreeSuballocationList() const;
4898 
4899  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4900  // If yes, fills pOffset and returns true. If no, returns false.
4901  bool CheckAllocation(
4902  uint32_t currentFrameIndex,
4903  uint32_t frameInUseCount,
4904  VkDeviceSize bufferImageGranularity,
4905  VkDeviceSize allocSize,
4906  VkDeviceSize allocAlignment,
4907  VmaSuballocationType allocType,
4908  VmaSuballocationList::const_iterator suballocItem,
4909  bool canMakeOtherLost,
4910  VkDeviceSize* pOffset,
4911  size_t* itemsToMakeLostCount,
4912  VkDeviceSize* pSumFreeSize,
4913  VkDeviceSize* pSumItemSize) const;
4914  // Given free suballocation, it merges it with following one, which must also be free.
4915  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4916  // Releases given suballocation, making it free.
4917  // Merges it with adjacent free suballocations if applicable.
4918  // Returns iterator to new free suballocation at this place.
4919  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4920  // Given free suballocation, it inserts it into sorted list of
4921  // m_FreeSuballocationsBySize if it's suitable.
4922  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4923  // Given free suballocation, it removes it from sorted list of
4924  // m_FreeSuballocationsBySize if it's suitable.
4925  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4926 };
4927 
4928 /*
4929 Allocations and their references in internal data structure look like this:
4930 
4931 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4932 
4933  0 +-------+
4934  | |
4935  | |
4936  | |
4937  +-------+
4938  | Alloc | 1st[m_1stNullItemsBeginCount]
4939  +-------+
4940  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4941  +-------+
4942  | ... |
4943  +-------+
4944  | Alloc | 1st[1st.size() - 1]
4945  +-------+
4946  | |
4947  | |
4948  | |
4949 GetSize() +-------+
4950 
4951 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4952 
4953  0 +-------+
4954  | Alloc | 2nd[0]
4955  +-------+
4956  | Alloc | 2nd[1]
4957  +-------+
4958  | ... |
4959  +-------+
4960  | Alloc | 2nd[2nd.size() - 1]
4961  +-------+
4962  | |
4963  | |
4964  | |
4965  +-------+
4966  | Alloc | 1st[m_1stNullItemsBeginCount]
4967  +-------+
4968  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4969  +-------+
4970  | ... |
4971  +-------+
4972  | Alloc | 1st[1st.size() - 1]
4973  +-------+
4974  | |
4975 GetSize() +-------+
4976 
4977 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4978 
4979  0 +-------+
4980  | |
4981  | |
4982  | |
4983  +-------+
4984  | Alloc | 1st[m_1stNullItemsBeginCount]
4985  +-------+
4986  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4987  +-------+
4988  | ... |
4989  +-------+
4990  | Alloc | 1st[1st.size() - 1]
4991  +-------+
4992  | |
4993  | |
4994  | |
4995  +-------+
4996  | Alloc | 2nd[2nd.size() - 1]
4997  +-------+
4998  | ... |
4999  +-------+
5000  | Alloc | 2nd[1]
5001  +-------+
5002  | Alloc | 2nd[0]
5003 GetSize() +-------+
5004 
5005 */
5006 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5007 {
5008  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5009 public:
5010  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5011  virtual ~VmaBlockMetadata_Linear();
5012  virtual void Init(VkDeviceSize size);
5013 
5014  virtual bool Validate() const;
5015  virtual size_t GetAllocationCount() const;
5016  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5017  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5018  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5019 
5020  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5021  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5022 
5023 #if VMA_STATS_STRING_ENABLED
5024  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5025 #endif
5026 
5027  virtual bool CreateAllocationRequest(
5028  uint32_t currentFrameIndex,
5029  uint32_t frameInUseCount,
5030  VkDeviceSize bufferImageGranularity,
5031  VkDeviceSize allocSize,
5032  VkDeviceSize allocAlignment,
5033  bool upperAddress,
5034  VmaSuballocationType allocType,
5035  bool canMakeOtherLost,
5036  uint32_t strategy,
5037  VmaAllocationRequest* pAllocationRequest);
5038 
5039  virtual bool MakeRequestedAllocationsLost(
5040  uint32_t currentFrameIndex,
5041  uint32_t frameInUseCount,
5042  VmaAllocationRequest* pAllocationRequest);
5043 
5044  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5045 
5046  virtual VkResult CheckCorruption(const void* pBlockData);
5047 
5048  virtual void Alloc(
5049  const VmaAllocationRequest& request,
5050  VmaSuballocationType type,
5051  VkDeviceSize allocSize,
5052  bool upperAddress,
5053  VmaAllocation hAllocation);
5054 
5055  virtual void Free(const VmaAllocation allocation);
5056  virtual void FreeAtOffset(VkDeviceSize offset);
5057 
5058 private:
5059  /*
5060  There are two suballocation vectors, used in ping-pong way.
5061  The one with index m_1stVectorIndex is called 1st.
5062  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5063  2nd can be non-empty only when 1st is not empty.
5064  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5065  */
5066  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5067 
5068  enum SECOND_VECTOR_MODE
5069  {
5070  SECOND_VECTOR_EMPTY,
5071  /*
5072  Suballocations in 2nd vector are created later than the ones in 1st, but they
5073  all have smaller offset.
5074  */
5075  SECOND_VECTOR_RING_BUFFER,
5076  /*
5077  Suballocations in 2nd vector are upper side of double stack.
5078  They all have offsets higher than those in 1st vector.
5079  Top of this stack means smaller offsets, but higher indices in this vector.
5080  */
5081  SECOND_VECTOR_DOUBLE_STACK,
5082  };
5083 
5084  VkDeviceSize m_SumFreeSize;
5085  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5086  uint32_t m_1stVectorIndex;
5087  SECOND_VECTOR_MODE m_2ndVectorMode;
5088 
5089  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5090  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5091  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5092  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5093 
5094  // Number of items in 1st vector with hAllocation = null at the beginning.
5095  size_t m_1stNullItemsBeginCount;
5096  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5097  size_t m_1stNullItemsMiddleCount;
5098  // Number of items in 2nd vector with hAllocation = null.
5099  size_t m_2ndNullItemsCount;
5100 
5101  bool ShouldCompact1st() const;
5102  void CleanupAfterFree();
5103 };
5104 
5105 /*
5106 - GetSize() is the original size of allocated memory block.
5107 - m_UsableSize is this size aligned down to a power of two.
5108  All allocations and calculations happen relative to m_UsableSize.
5109 - GetUnusableSize() is the difference between them.
5110  It is repoted as separate, unused range, not available for allocations.
5111 
5112 Node at level 0 has size = m_UsableSize.
5113 Each next level contains nodes with size 2 times smaller than current level.
5114 m_LevelCount is the maximum number of levels to use in the current object.
5115 */
5116 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5117 {
5118  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5119 public:
5120  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5121  virtual ~VmaBlockMetadata_Buddy();
5122  virtual void Init(VkDeviceSize size);
5123 
5124  virtual bool Validate() const;
5125  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5126  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5127  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5128  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5129 
5130  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5131  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5132 
5133 #if VMA_STATS_STRING_ENABLED
5134  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5135 #endif
5136 
5137  virtual bool CreateAllocationRequest(
5138  uint32_t currentFrameIndex,
5139  uint32_t frameInUseCount,
5140  VkDeviceSize bufferImageGranularity,
5141  VkDeviceSize allocSize,
5142  VkDeviceSize allocAlignment,
5143  bool upperAddress,
5144  VmaSuballocationType allocType,
5145  bool canMakeOtherLost,
5146  uint32_t strategy,
5147  VmaAllocationRequest* pAllocationRequest);
5148 
5149  virtual bool MakeRequestedAllocationsLost(
5150  uint32_t currentFrameIndex,
5151  uint32_t frameInUseCount,
5152  VmaAllocationRequest* pAllocationRequest);
5153 
5154  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5155 
5156  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5157 
5158  virtual void Alloc(
5159  const VmaAllocationRequest& request,
5160  VmaSuballocationType type,
5161  VkDeviceSize allocSize,
5162  bool upperAddress,
5163  VmaAllocation hAllocation);
5164 
5165  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5166  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5167 
5168 private:
5169  static const VkDeviceSize MIN_NODE_SIZE = 32;
5170  static const size_t MAX_LEVELS = 30;
5171 
5172  struct ValidationContext
5173  {
5174  size_t calculatedAllocationCount;
5175  size_t calculatedFreeCount;
5176  VkDeviceSize calculatedSumFreeSize;
5177 
5178  ValidationContext() :
5179  calculatedAllocationCount(0),
5180  calculatedFreeCount(0),
5181  calculatedSumFreeSize(0) { }
5182  };
5183 
5184  struct Node
5185  {
5186  VkDeviceSize offset;
5187  enum TYPE
5188  {
5189  TYPE_FREE,
5190  TYPE_ALLOCATION,
5191  TYPE_SPLIT,
5192  TYPE_COUNT
5193  } type;
5194  Node* parent;
5195  Node* buddy;
5196 
5197  union
5198  {
5199  struct
5200  {
5201  Node* prev;
5202  Node* next;
5203  } free;
5204  struct
5205  {
5206  VmaAllocation alloc;
5207  } allocation;
5208  struct
5209  {
5210  Node* leftChild;
5211  } split;
5212  };
5213  };
5214 
5215  // Size of the memory block aligned down to a power of two.
5216  VkDeviceSize m_UsableSize;
5217  uint32_t m_LevelCount;
5218 
5219  Node* m_Root;
5220  struct {
5221  Node* front;
5222  Node* back;
5223  } m_FreeList[MAX_LEVELS];
5224  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5225  size_t m_AllocationCount;
5226  // Number of nodes in the tree with type == TYPE_FREE.
5227  size_t m_FreeCount;
5228  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5229  VkDeviceSize m_SumFreeSize;
5230 
5231  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5232  void DeleteNode(Node* node);
5233  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5234  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5235  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5236  // Alloc passed just for validation. Can be null.
5237  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5238  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5239  // Adds node to the front of FreeList at given level.
5240  // node->type must be FREE.
5241  // node->free.prev, next can be undefined.
5242  void AddToFreeListFront(uint32_t level, Node* node);
5243  // Removes node from FreeList at given level.
5244  // node->type must be FREE.
5245  // node->free.prev, next stay untouched.
5246  void RemoveFromFreeList(uint32_t level, Node* node);
5247 
5248 #if VMA_STATS_STRING_ENABLED
5249  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5250 #endif
5251 };
5252 
5253 /*
5254 Represents a single block of device memory (`VkDeviceMemory`) with all the
5255 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5256 
5257 Thread-safety: This class must be externally synchronized.
5258 */
5259 class VmaDeviceMemoryBlock
5260 {
5261  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5262 public:
5263  VmaBlockMetadata* m_pMetadata;
5264 
5265  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5266 
5267  ~VmaDeviceMemoryBlock()
5268  {
5269  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5270  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5271  }
5272 
5273  // Always call after construction.
5274  void Init(
5275  VmaAllocator hAllocator,
5276  uint32_t newMemoryTypeIndex,
5277  VkDeviceMemory newMemory,
5278  VkDeviceSize newSize,
5279  uint32_t id,
5280  uint32_t algorithm);
5281  // Always call before destruction.
5282  void Destroy(VmaAllocator allocator);
5283 
5284  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5285  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5286  uint32_t GetId() const { return m_Id; }
5287  void* GetMappedData() const { return m_pMappedData; }
5288 
5289  // Validates all data structures inside this object. If not valid, returns false.
5290  bool Validate() const;
5291 
5292  VkResult CheckCorruption(VmaAllocator hAllocator);
5293 
5294  // ppData can be null.
5295  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5296  void Unmap(VmaAllocator hAllocator, uint32_t count);
5297 
5298  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5299  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5300 
5301  VkResult BindBufferMemory(
5302  const VmaAllocator hAllocator,
5303  const VmaAllocation hAllocation,
5304  VkBuffer hBuffer);
5305  VkResult BindImageMemory(
5306  const VmaAllocator hAllocator,
5307  const VmaAllocation hAllocation,
5308  VkImage hImage);
5309 
5310 private:
5311  uint32_t m_MemoryTypeIndex;
5312  uint32_t m_Id;
5313  VkDeviceMemory m_hMemory;
5314 
5315  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5316  // Also protects m_MapCount, m_pMappedData.
5317  VMA_MUTEX m_Mutex;
5318  uint32_t m_MapCount;
5319  void* m_pMappedData;
5320 };
5321 
5322 struct VmaPointerLess
5323 {
5324  bool operator()(const void* lhs, const void* rhs) const
5325  {
5326  return lhs < rhs;
5327  }
5328 };
5329 
5330 class VmaDefragmentator;
5331 
5332 /*
5333 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5334 Vulkan memory type.
5335 
5336 Synchronized internally with a mutex.
5337 */
5338 struct VmaBlockVector
5339 {
5340  VMA_CLASS_NO_COPY(VmaBlockVector)
5341 public:
5342  VmaBlockVector(
5343  VmaAllocator hAllocator,
5344  uint32_t memoryTypeIndex,
5345  VkDeviceSize preferredBlockSize,
5346  size_t minBlockCount,
5347  size_t maxBlockCount,
5348  VkDeviceSize bufferImageGranularity,
5349  uint32_t frameInUseCount,
5350  bool isCustomPool,
5351  bool explicitBlockSize,
5352  uint32_t algorithm);
5353  ~VmaBlockVector();
5354 
5355  VkResult CreateMinBlocks();
5356 
5357  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5358  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5359  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5360  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5361  uint32_t GetAlgorithm() const { return m_Algorithm; }
5362 
5363  void GetPoolStats(VmaPoolStats* pStats);
5364 
5365  bool IsEmpty() const { return m_Blocks.empty(); }
5366  bool IsCorruptionDetectionEnabled() const;
5367 
5368  VkResult Allocate(
5369  VmaPool hCurrentPool,
5370  uint32_t currentFrameIndex,
5371  VkDeviceSize size,
5372  VkDeviceSize alignment,
5373  const VmaAllocationCreateInfo& createInfo,
5374  VmaSuballocationType suballocType,
5375  VmaAllocation* pAllocation);
5376 
5377  void Free(
5378  VmaAllocation hAllocation);
5379 
5380  // Adds statistics of this BlockVector to pStats.
5381  void AddStats(VmaStats* pStats);
5382 
5383 #if VMA_STATS_STRING_ENABLED
5384  void PrintDetailedMap(class VmaJsonWriter& json);
5385 #endif
5386 
5387  void MakePoolAllocationsLost(
5388  uint32_t currentFrameIndex,
5389  size_t* pLostAllocationCount);
5390  VkResult CheckCorruption();
5391 
5392  VmaDefragmentator* EnsureDefragmentator(
5393  VmaAllocator hAllocator,
5394  uint32_t currentFrameIndex);
5395 
5396  VkResult Defragment(
5397  VmaDefragmentationStats* pDefragmentationStats,
5398  VkDeviceSize& maxBytesToMove,
5399  uint32_t& maxAllocationsToMove);
5400 
5401  void DestroyDefragmentator();
5402 
5403 private:
5404  friend class VmaDefragmentator;
5405 
5406  const VmaAllocator m_hAllocator;
5407  const uint32_t m_MemoryTypeIndex;
5408  const VkDeviceSize m_PreferredBlockSize;
5409  const size_t m_MinBlockCount;
5410  const size_t m_MaxBlockCount;
5411  const VkDeviceSize m_BufferImageGranularity;
5412  const uint32_t m_FrameInUseCount;
5413  const bool m_IsCustomPool;
5414  const bool m_ExplicitBlockSize;
5415  const uint32_t m_Algorithm;
5416  bool m_HasEmptyBlock;
5417  VMA_MUTEX m_Mutex;
5418  // Incrementally sorted by sumFreeSize, ascending.
5419  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5420  /* There can be at most one allocation that is completely empty - a
5421  hysteresis to avoid pessimistic case of alternating creation and destruction
5422  of a VkDeviceMemory. */
5423  VmaDefragmentator* m_pDefragmentator;
5424  uint32_t m_NextBlockId;
5425 
5426  VkDeviceSize CalcMaxBlockSize() const;
5427 
5428  // Finds and removes given block from vector.
5429  void Remove(VmaDeviceMemoryBlock* pBlock);
5430 
5431  // Performs single step in sorting m_Blocks. They may not be fully sorted
5432  // after this call.
5433  void IncrementallySortBlocks();
5434 
5435  // To be used only without CAN_MAKE_OTHER_LOST flag.
5436  VkResult AllocateFromBlock(
5437  VmaDeviceMemoryBlock* pBlock,
5438  VmaPool hCurrentPool,
5439  uint32_t currentFrameIndex,
5440  VkDeviceSize size,
5441  VkDeviceSize alignment,
5442  VmaAllocationCreateFlags allocFlags,
5443  void* pUserData,
5444  VmaSuballocationType suballocType,
5445  uint32_t strategy,
5446  VmaAllocation* pAllocation);
5447 
5448  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5449 };
5450 
5451 struct VmaPool_T
5452 {
5453  VMA_CLASS_NO_COPY(VmaPool_T)
5454 public:
5455  VmaBlockVector m_BlockVector;
5456 
5457  VmaPool_T(
5458  VmaAllocator hAllocator,
5459  const VmaPoolCreateInfo& createInfo,
5460  VkDeviceSize preferredBlockSize);
5461  ~VmaPool_T();
5462 
5463  uint32_t GetId() const { return m_Id; }
5464  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5465 
5466 #if VMA_STATS_STRING_ENABLED
5467  //void PrintDetailedMap(class VmaStringBuilder& sb);
5468 #endif
5469 
5470 private:
5471  uint32_t m_Id;
5472 };
5473 
5474 class VmaDefragmentator
5475 {
5476  VMA_CLASS_NO_COPY(VmaDefragmentator)
5477 private:
5478  const VmaAllocator m_hAllocator;
5479  VmaBlockVector* const m_pBlockVector;
5480  uint32_t m_CurrentFrameIndex;
5481  VkDeviceSize m_BytesMoved;
5482  uint32_t m_AllocationsMoved;
5483 
5484  struct AllocationInfo
5485  {
5486  VmaAllocation m_hAllocation;
5487  VkBool32* m_pChanged;
5488 
5489  AllocationInfo() :
5490  m_hAllocation(VK_NULL_HANDLE),
5491  m_pChanged(VMA_NULL)
5492  {
5493  }
5494  };
5495 
5496  struct AllocationInfoSizeGreater
5497  {
5498  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5499  {
5500  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5501  }
5502  };
5503 
5504  // Used between AddAllocation and Defragment.
5505  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5506 
5507  struct BlockInfo
5508  {
5509  VmaDeviceMemoryBlock* m_pBlock;
5510  bool m_HasNonMovableAllocations;
5511  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5512 
5513  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5514  m_pBlock(VMA_NULL),
5515  m_HasNonMovableAllocations(true),
5516  m_Allocations(pAllocationCallbacks),
5517  m_pMappedDataForDefragmentation(VMA_NULL)
5518  {
5519  }
5520 
5521  void CalcHasNonMovableAllocations()
5522  {
5523  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5524  const size_t defragmentAllocCount = m_Allocations.size();
5525  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5526  }
5527 
5528  void SortAllocationsBySizeDescecnding()
5529  {
5530  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5531  }
5532 
5533  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5534  void Unmap(VmaAllocator hAllocator);
5535 
5536  private:
5537  // Not null if mapped for defragmentation only, not originally mapped.
5538  void* m_pMappedDataForDefragmentation;
5539  };
5540 
5541  struct BlockPointerLess
5542  {
5543  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5544  {
5545  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5546  }
5547  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5548  {
5549  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5550  }
5551  };
5552 
5553  // 1. Blocks with some non-movable allocations go first.
5554  // 2. Blocks with smaller sumFreeSize go first.
5555  struct BlockInfoCompareMoveDestination
5556  {
5557  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5558  {
5559  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5560  {
5561  return true;
5562  }
5563  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5564  {
5565  return false;
5566  }
5567  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5568  {
5569  return true;
5570  }
5571  return false;
5572  }
5573  };
5574 
5575  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5576  BlockInfoVector m_Blocks;
5577 
5578  VkResult DefragmentRound(
5579  VkDeviceSize maxBytesToMove,
5580  uint32_t maxAllocationsToMove);
5581 
5582  static bool MoveMakesSense(
5583  size_t dstBlockIndex, VkDeviceSize dstOffset,
5584  size_t srcBlockIndex, VkDeviceSize srcOffset);
5585 
5586 public:
5587  VmaDefragmentator(
5588  VmaAllocator hAllocator,
5589  VmaBlockVector* pBlockVector,
5590  uint32_t currentFrameIndex);
5591 
5592  ~VmaDefragmentator();
5593 
5594  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5595  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5596 
5597  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5598 
5599  VkResult Defragment(
5600  VkDeviceSize maxBytesToMove,
5601  uint32_t maxAllocationsToMove);
5602 };
5603 
5604 #if VMA_RECORDING_ENABLED
5605 
5606 class VmaRecorder
5607 {
5608 public:
5609  VmaRecorder();
5610  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5611  void WriteConfiguration(
5612  const VkPhysicalDeviceProperties& devProps,
5613  const VkPhysicalDeviceMemoryProperties& memProps,
5614  bool dedicatedAllocationExtensionEnabled);
5615  ~VmaRecorder();
5616 
5617  void RecordCreateAllocator(uint32_t frameIndex);
5618  void RecordDestroyAllocator(uint32_t frameIndex);
5619  void RecordCreatePool(uint32_t frameIndex,
5620  const VmaPoolCreateInfo& createInfo,
5621  VmaPool pool);
5622  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5623  void RecordAllocateMemory(uint32_t frameIndex,
5624  const VkMemoryRequirements& vkMemReq,
5625  const VmaAllocationCreateInfo& createInfo,
5626  VmaAllocation allocation);
5627  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5628  const VkMemoryRequirements& vkMemReq,
5629  bool requiresDedicatedAllocation,
5630  bool prefersDedicatedAllocation,
5631  const VmaAllocationCreateInfo& createInfo,
5632  VmaAllocation allocation);
5633  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5634  const VkMemoryRequirements& vkMemReq,
5635  bool requiresDedicatedAllocation,
5636  bool prefersDedicatedAllocation,
5637  const VmaAllocationCreateInfo& createInfo,
5638  VmaAllocation allocation);
5639  void RecordFreeMemory(uint32_t frameIndex,
5640  VmaAllocation allocation);
5641  void RecordResizeAllocation(
5642  uint32_t frameIndex,
5643  VmaAllocation allocation,
5644  VkDeviceSize newSize);
5645  void RecordSetAllocationUserData(uint32_t frameIndex,
5646  VmaAllocation allocation,
5647  const void* pUserData);
5648  void RecordCreateLostAllocation(uint32_t frameIndex,
5649  VmaAllocation allocation);
5650  void RecordMapMemory(uint32_t frameIndex,
5651  VmaAllocation allocation);
5652  void RecordUnmapMemory(uint32_t frameIndex,
5653  VmaAllocation allocation);
5654  void RecordFlushAllocation(uint32_t frameIndex,
5655  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5656  void RecordInvalidateAllocation(uint32_t frameIndex,
5657  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5658  void RecordCreateBuffer(uint32_t frameIndex,
5659  const VkBufferCreateInfo& bufCreateInfo,
5660  const VmaAllocationCreateInfo& allocCreateInfo,
5661  VmaAllocation allocation);
5662  void RecordCreateImage(uint32_t frameIndex,
5663  const VkImageCreateInfo& imageCreateInfo,
5664  const VmaAllocationCreateInfo& allocCreateInfo,
5665  VmaAllocation allocation);
5666  void RecordDestroyBuffer(uint32_t frameIndex,
5667  VmaAllocation allocation);
5668  void RecordDestroyImage(uint32_t frameIndex,
5669  VmaAllocation allocation);
5670  void RecordTouchAllocation(uint32_t frameIndex,
5671  VmaAllocation allocation);
5672  void RecordGetAllocationInfo(uint32_t frameIndex,
5673  VmaAllocation allocation);
5674  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5675  VmaPool pool);
5676 
5677 private:
5678  struct CallParams
5679  {
5680  uint32_t threadId;
5681  double time;
5682  };
5683 
5684  class UserDataString
5685  {
5686  public:
5687  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5688  const char* GetString() const { return m_Str; }
5689 
5690  private:
5691  char m_PtrStr[17];
5692  const char* m_Str;
5693  };
5694 
5695  bool m_UseMutex;
5696  VmaRecordFlags m_Flags;
5697  FILE* m_File;
5698  VMA_MUTEX m_FileMutex;
5699  int64_t m_Freq;
5700  int64_t m_StartCounter;
5701 
5702  void GetBasicParams(CallParams& outParams);
5703  void Flush();
5704 };
5705 
5706 #endif // #if VMA_RECORDING_ENABLED
5707 
5708 // Main allocator object.
5709 struct VmaAllocator_T
5710 {
5711  VMA_CLASS_NO_COPY(VmaAllocator_T)
5712 public:
5713  bool m_UseMutex;
5714  bool m_UseKhrDedicatedAllocation;
5715  VkDevice m_hDevice;
5716  bool m_AllocationCallbacksSpecified;
5717  VkAllocationCallbacks m_AllocationCallbacks;
5718  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5719 
5720  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5721  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5722  VMA_MUTEX m_HeapSizeLimitMutex;
5723 
5724  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5725  VkPhysicalDeviceMemoryProperties m_MemProps;
5726 
5727  // Default pools.
5728  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5729 
5730  // Each vector is sorted by memory (handle value).
5731  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5732  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5733  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5734 
5735  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5736  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5737  ~VmaAllocator_T();
5738 
5739  const VkAllocationCallbacks* GetAllocationCallbacks() const
5740  {
5741  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5742  }
5743  const VmaVulkanFunctions& GetVulkanFunctions() const
5744  {
5745  return m_VulkanFunctions;
5746  }
5747 
5748  VkDeviceSize GetBufferImageGranularity() const
5749  {
5750  return VMA_MAX(
5751  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5752  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5753  }
5754 
5755  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5756  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5757 
5758  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5759  {
5760  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5761  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5762  }
5763  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5764  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5765  {
5766  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5767  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5768  }
5769  // Minimum alignment for all allocations in specific memory type.
5770  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5771  {
5772  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5773  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5774  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5775  }
5776 
5777  bool IsIntegratedGpu() const
5778  {
5779  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5780  }
5781 
5782 #if VMA_RECORDING_ENABLED
5783  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5784 #endif
5785 
5786  void GetBufferMemoryRequirements(
5787  VkBuffer hBuffer,
5788  VkMemoryRequirements& memReq,
5789  bool& requiresDedicatedAllocation,
5790  bool& prefersDedicatedAllocation) const;
5791  void GetImageMemoryRequirements(
5792  VkImage hImage,
5793  VkMemoryRequirements& memReq,
5794  bool& requiresDedicatedAllocation,
5795  bool& prefersDedicatedAllocation) const;
5796 
5797  // Main allocation function.
5798  VkResult AllocateMemory(
5799  const VkMemoryRequirements& vkMemReq,
5800  bool requiresDedicatedAllocation,
5801  bool prefersDedicatedAllocation,
5802  VkBuffer dedicatedBuffer,
5803  VkImage dedicatedImage,
5804  const VmaAllocationCreateInfo& createInfo,
5805  VmaSuballocationType suballocType,
5806  VmaAllocation* pAllocation);
5807 
5808  // Main deallocation function.
5809  void FreeMemory(const VmaAllocation allocation);
5810 
5811  VkResult ResizeAllocation(
5812  const VmaAllocation alloc,
5813  VkDeviceSize newSize);
5814 
5815  void CalculateStats(VmaStats* pStats);
5816 
5817 #if VMA_STATS_STRING_ENABLED
5818  void PrintDetailedMap(class VmaJsonWriter& json);
5819 #endif
5820 
5821  VkResult Defragment(
5822  VmaAllocation* pAllocations,
5823  size_t allocationCount,
5824  VkBool32* pAllocationsChanged,
5825  const VmaDefragmentationInfo* pDefragmentationInfo,
5826  VmaDefragmentationStats* pDefragmentationStats);
5827 
5828  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5829  bool TouchAllocation(VmaAllocation hAllocation);
5830 
5831  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5832  void DestroyPool(VmaPool pool);
5833  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5834 
5835  void SetCurrentFrameIndex(uint32_t frameIndex);
5836  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5837 
5838  void MakePoolAllocationsLost(
5839  VmaPool hPool,
5840  size_t* pLostAllocationCount);
5841  VkResult CheckPoolCorruption(VmaPool hPool);
5842  VkResult CheckCorruption(uint32_t memoryTypeBits);
5843 
5844  void CreateLostAllocation(VmaAllocation* pAllocation);
5845 
5846  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5847  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5848 
5849  VkResult Map(VmaAllocation hAllocation, void** ppData);
5850  void Unmap(VmaAllocation hAllocation);
5851 
5852  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5853  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5854 
5855  void FlushOrInvalidateAllocation(
5856  VmaAllocation hAllocation,
5857  VkDeviceSize offset, VkDeviceSize size,
5858  VMA_CACHE_OPERATION op);
5859 
5860  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5861 
5862 private:
5863  VkDeviceSize m_PreferredLargeHeapBlockSize;
5864 
5865  VkPhysicalDevice m_PhysicalDevice;
5866  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5867 
5868  VMA_MUTEX m_PoolsMutex;
5869  // Protected by m_PoolsMutex. Sorted by pointer value.
5870  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5871  uint32_t m_NextPoolId;
5872 
5873  VmaVulkanFunctions m_VulkanFunctions;
5874 
5875 #if VMA_RECORDING_ENABLED
5876  VmaRecorder* m_pRecorder;
5877 #endif
5878 
5879  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5880 
5881  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5882 
5883  VkResult AllocateMemoryOfType(
5884  VkDeviceSize size,
5885  VkDeviceSize alignment,
5886  bool dedicatedAllocation,
5887  VkBuffer dedicatedBuffer,
5888  VkImage dedicatedImage,
5889  const VmaAllocationCreateInfo& createInfo,
5890  uint32_t memTypeIndex,
5891  VmaSuballocationType suballocType,
5892  VmaAllocation* pAllocation);
5893 
5894  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5895  VkResult AllocateDedicatedMemory(
5896  VkDeviceSize size,
5897  VmaSuballocationType suballocType,
5898  uint32_t memTypeIndex,
5899  bool map,
5900  bool isUserDataString,
5901  void* pUserData,
5902  VkBuffer dedicatedBuffer,
5903  VkImage dedicatedImage,
5904  VmaAllocation* pAllocation);
5905 
5906  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5907  void FreeDedicatedMemory(VmaAllocation allocation);
5908 };
5909 
5911 // Memory allocation #2 after VmaAllocator_T definition
5912 
5913 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5914 {
5915  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5916 }
5917 
5918 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5919 {
5920  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5921 }
5922 
5923 template<typename T>
5924 static T* VmaAllocate(VmaAllocator hAllocator)
5925 {
5926  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5927 }
5928 
5929 template<typename T>
5930 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5931 {
5932  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5933 }
5934 
5935 template<typename T>
5936 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5937 {
5938  if(ptr != VMA_NULL)
5939  {
5940  ptr->~T();
5941  VmaFree(hAllocator, ptr);
5942  }
5943 }
5944 
5945 template<typename T>
5946 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5947 {
5948  if(ptr != VMA_NULL)
5949  {
5950  for(size_t i = count; i--; )
5951  ptr[i].~T();
5952  VmaFree(hAllocator, ptr);
5953  }
5954 }
5955 
5957 // VmaStringBuilder
5958 
5959 #if VMA_STATS_STRING_ENABLED
5960 
5961 class VmaStringBuilder
5962 {
5963 public:
5964  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5965  size_t GetLength() const { return m_Data.size(); }
5966  const char* GetData() const { return m_Data.data(); }
5967 
5968  void Add(char ch) { m_Data.push_back(ch); }
5969  void Add(const char* pStr);
5970  void AddNewLine() { Add('\n'); }
5971  void AddNumber(uint32_t num);
5972  void AddNumber(uint64_t num);
5973  void AddPointer(const void* ptr);
5974 
5975 private:
5976  VmaVector< char, VmaStlAllocator<char> > m_Data;
5977 };
5978 
5979 void VmaStringBuilder::Add(const char* pStr)
5980 {
5981  const size_t strLen = strlen(pStr);
5982  if(strLen > 0)
5983  {
5984  const size_t oldCount = m_Data.size();
5985  m_Data.resize(oldCount + strLen);
5986  memcpy(m_Data.data() + oldCount, pStr, strLen);
5987  }
5988 }
5989 
5990 void VmaStringBuilder::AddNumber(uint32_t num)
5991 {
5992  char buf[11];
5993  VmaUint32ToStr(buf, sizeof(buf), num);
5994  Add(buf);
5995 }
5996 
5997 void VmaStringBuilder::AddNumber(uint64_t num)
5998 {
5999  char buf[21];
6000  VmaUint64ToStr(buf, sizeof(buf), num);
6001  Add(buf);
6002 }
6003 
6004 void VmaStringBuilder::AddPointer(const void* ptr)
6005 {
6006  char buf[21];
6007  VmaPtrToStr(buf, sizeof(buf), ptr);
6008  Add(buf);
6009 }
6010 
6011 #endif // #if VMA_STATS_STRING_ENABLED
6012 
6014 // VmaJsonWriter
6015 
6016 #if VMA_STATS_STRING_ENABLED
6017 
6018 class VmaJsonWriter
6019 {
6020  VMA_CLASS_NO_COPY(VmaJsonWriter)
6021 public:
6022  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6023  ~VmaJsonWriter();
6024 
6025  void BeginObject(bool singleLine = false);
6026  void EndObject();
6027 
6028  void BeginArray(bool singleLine = false);
6029  void EndArray();
6030 
6031  void WriteString(const char* pStr);
6032  void BeginString(const char* pStr = VMA_NULL);
6033  void ContinueString(const char* pStr);
6034  void ContinueString(uint32_t n);
6035  void ContinueString(uint64_t n);
6036  void ContinueString_Pointer(const void* ptr);
6037  void EndString(const char* pStr = VMA_NULL);
6038 
6039  void WriteNumber(uint32_t n);
6040  void WriteNumber(uint64_t n);
6041  void WriteBool(bool b);
6042  void WriteNull();
6043 
6044 private:
6045  static const char* const INDENT;
6046 
6047  enum COLLECTION_TYPE
6048  {
6049  COLLECTION_TYPE_OBJECT,
6050  COLLECTION_TYPE_ARRAY,
6051  };
6052  struct StackItem
6053  {
6054  COLLECTION_TYPE type;
6055  uint32_t valueCount;
6056  bool singleLineMode;
6057  };
6058 
6059  VmaStringBuilder& m_SB;
6060  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6061  bool m_InsideString;
6062 
6063  void BeginValue(bool isString);
6064  void WriteIndent(bool oneLess = false);
6065 };
6066 
6067 const char* const VmaJsonWriter::INDENT = " ";
6068 
6069 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6070  m_SB(sb),
6071  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6072  m_InsideString(false)
6073 {
6074 }
6075 
6076 VmaJsonWriter::~VmaJsonWriter()
6077 {
6078  VMA_ASSERT(!m_InsideString);
6079  VMA_ASSERT(m_Stack.empty());
6080 }
6081 
6082 void VmaJsonWriter::BeginObject(bool singleLine)
6083 {
6084  VMA_ASSERT(!m_InsideString);
6085 
6086  BeginValue(false);
6087  m_SB.Add('{');
6088 
6089  StackItem item;
6090  item.type = COLLECTION_TYPE_OBJECT;
6091  item.valueCount = 0;
6092  item.singleLineMode = singleLine;
6093  m_Stack.push_back(item);
6094 }
6095 
6096 void VmaJsonWriter::EndObject()
6097 {
6098  VMA_ASSERT(!m_InsideString);
6099 
6100  WriteIndent(true);
6101  m_SB.Add('}');
6102 
6103  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6104  m_Stack.pop_back();
6105 }
6106 
6107 void VmaJsonWriter::BeginArray(bool singleLine)
6108 {
6109  VMA_ASSERT(!m_InsideString);
6110 
6111  BeginValue(false);
6112  m_SB.Add('[');
6113 
6114  StackItem item;
6115  item.type = COLLECTION_TYPE_ARRAY;
6116  item.valueCount = 0;
6117  item.singleLineMode = singleLine;
6118  m_Stack.push_back(item);
6119 }
6120 
6121 void VmaJsonWriter::EndArray()
6122 {
6123  VMA_ASSERT(!m_InsideString);
6124 
6125  WriteIndent(true);
6126  m_SB.Add(']');
6127 
6128  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6129  m_Stack.pop_back();
6130 }
6131 
6132 void VmaJsonWriter::WriteString(const char* pStr)
6133 {
6134  BeginString(pStr);
6135  EndString();
6136 }
6137 
6138 void VmaJsonWriter::BeginString(const char* pStr)
6139 {
6140  VMA_ASSERT(!m_InsideString);
6141 
6142  BeginValue(true);
6143  m_SB.Add('"');
6144  m_InsideString = true;
6145  if(pStr != VMA_NULL && pStr[0] != '\0')
6146  {
6147  ContinueString(pStr);
6148  }
6149 }
6150 
6151 void VmaJsonWriter::ContinueString(const char* pStr)
6152 {
6153  VMA_ASSERT(m_InsideString);
6154 
6155  const size_t strLen = strlen(pStr);
6156  for(size_t i = 0; i < strLen; ++i)
6157  {
6158  char ch = pStr[i];
6159  if(ch == '\\')
6160  {
6161  m_SB.Add("\\\\");
6162  }
6163  else if(ch == '"')
6164  {
6165  m_SB.Add("\\\"");
6166  }
6167  else if(ch >= 32)
6168  {
6169  m_SB.Add(ch);
6170  }
6171  else switch(ch)
6172  {
6173  case '\b':
6174  m_SB.Add("\\b");
6175  break;
6176  case '\f':
6177  m_SB.Add("\\f");
6178  break;
6179  case '\n':
6180  m_SB.Add("\\n");
6181  break;
6182  case '\r':
6183  m_SB.Add("\\r");
6184  break;
6185  case '\t':
6186  m_SB.Add("\\t");
6187  break;
6188  default:
6189  VMA_ASSERT(0 && "Character not currently supported.");
6190  break;
6191  }
6192  }
6193 }
6194 
6195 void VmaJsonWriter::ContinueString(uint32_t n)
6196 {
6197  VMA_ASSERT(m_InsideString);
6198  m_SB.AddNumber(n);
6199 }
6200 
6201 void VmaJsonWriter::ContinueString(uint64_t n)
6202 {
6203  VMA_ASSERT(m_InsideString);
6204  m_SB.AddNumber(n);
6205 }
6206 
6207 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6208 {
6209  VMA_ASSERT(m_InsideString);
6210  m_SB.AddPointer(ptr);
6211 }
6212 
6213 void VmaJsonWriter::EndString(const char* pStr)
6214 {
6215  VMA_ASSERT(m_InsideString);
6216  if(pStr != VMA_NULL && pStr[0] != '\0')
6217  {
6218  ContinueString(pStr);
6219  }
6220  m_SB.Add('"');
6221  m_InsideString = false;
6222 }
6223 
6224 void VmaJsonWriter::WriteNumber(uint32_t n)
6225 {
6226  VMA_ASSERT(!m_InsideString);
6227  BeginValue(false);
6228  m_SB.AddNumber(n);
6229 }
6230 
6231 void VmaJsonWriter::WriteNumber(uint64_t n)
6232 {
6233  VMA_ASSERT(!m_InsideString);
6234  BeginValue(false);
6235  m_SB.AddNumber(n);
6236 }
6237 
6238 void VmaJsonWriter::WriteBool(bool b)
6239 {
6240  VMA_ASSERT(!m_InsideString);
6241  BeginValue(false);
6242  m_SB.Add(b ? "true" : "false");
6243 }
6244 
6245 void VmaJsonWriter::WriteNull()
6246 {
6247  VMA_ASSERT(!m_InsideString);
6248  BeginValue(false);
6249  m_SB.Add("null");
6250 }
6251 
6252 void VmaJsonWriter::BeginValue(bool isString)
6253 {
6254  if(!m_Stack.empty())
6255  {
6256  StackItem& currItem = m_Stack.back();
6257  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6258  currItem.valueCount % 2 == 0)
6259  {
6260  VMA_ASSERT(isString);
6261  }
6262 
6263  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6264  currItem.valueCount % 2 != 0)
6265  {
6266  m_SB.Add(": ");
6267  }
6268  else if(currItem.valueCount > 0)
6269  {
6270  m_SB.Add(", ");
6271  WriteIndent();
6272  }
6273  else
6274  {
6275  WriteIndent();
6276  }
6277  ++currItem.valueCount;
6278  }
6279 }
6280 
6281 void VmaJsonWriter::WriteIndent(bool oneLess)
6282 {
6283  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6284  {
6285  m_SB.AddNewLine();
6286 
6287  size_t count = m_Stack.size();
6288  if(count > 0 && oneLess)
6289  {
6290  --count;
6291  }
6292  for(size_t i = 0; i < count; ++i)
6293  {
6294  m_SB.Add(INDENT);
6295  }
6296  }
6297 }
6298 
6299 #endif // #if VMA_STATS_STRING_ENABLED
6300 
6302 
6303 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6304 {
6305  if(IsUserDataString())
6306  {
6307  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6308 
6309  FreeUserDataString(hAllocator);
6310 
6311  if(pUserData != VMA_NULL)
6312  {
6313  const char* const newStrSrc = (char*)pUserData;
6314  const size_t newStrLen = strlen(newStrSrc);
6315  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6316  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6317  m_pUserData = newStrDst;
6318  }
6319  }
6320  else
6321  {
6322  m_pUserData = pUserData;
6323  }
6324 }
6325 
6326 void VmaAllocation_T::ChangeBlockAllocation(
6327  VmaAllocator hAllocator,
6328  VmaDeviceMemoryBlock* block,
6329  VkDeviceSize offset)
6330 {
6331  VMA_ASSERT(block != VMA_NULL);
6332  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6333 
6334  // Move mapping reference counter from old block to new block.
6335  if(block != m_BlockAllocation.m_Block)
6336  {
6337  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6338  if(IsPersistentMap())
6339  ++mapRefCount;
6340  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6341  block->Map(hAllocator, mapRefCount, VMA_NULL);
6342  }
6343 
6344  m_BlockAllocation.m_Block = block;
6345  m_BlockAllocation.m_Offset = offset;
6346 }
6347 
6348 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6349 {
6350  VMA_ASSERT(newSize > 0);
6351  m_Size = newSize;
6352 }
6353 
6354 VkDeviceSize VmaAllocation_T::GetOffset() const
6355 {
6356  switch(m_Type)
6357  {
6358  case ALLOCATION_TYPE_BLOCK:
6359  return m_BlockAllocation.m_Offset;
6360  case ALLOCATION_TYPE_DEDICATED:
6361  return 0;
6362  default:
6363  VMA_ASSERT(0);
6364  return 0;
6365  }
6366 }
6367 
6368 VkDeviceMemory VmaAllocation_T::GetMemory() const
6369 {
6370  switch(m_Type)
6371  {
6372  case ALLOCATION_TYPE_BLOCK:
6373  return m_BlockAllocation.m_Block->GetDeviceMemory();
6374  case ALLOCATION_TYPE_DEDICATED:
6375  return m_DedicatedAllocation.m_hMemory;
6376  default:
6377  VMA_ASSERT(0);
6378  return VK_NULL_HANDLE;
6379  }
6380 }
6381 
6382 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6383 {
6384  switch(m_Type)
6385  {
6386  case ALLOCATION_TYPE_BLOCK:
6387  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6388  case ALLOCATION_TYPE_DEDICATED:
6389  return m_DedicatedAllocation.m_MemoryTypeIndex;
6390  default:
6391  VMA_ASSERT(0);
6392  return UINT32_MAX;
6393  }
6394 }
6395 
6396 void* VmaAllocation_T::GetMappedData() const
6397 {
6398  switch(m_Type)
6399  {
6400  case ALLOCATION_TYPE_BLOCK:
6401  if(m_MapCount != 0)
6402  {
6403  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6404  VMA_ASSERT(pBlockData != VMA_NULL);
6405  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6406  }
6407  else
6408  {
6409  return VMA_NULL;
6410  }
6411  break;
6412  case ALLOCATION_TYPE_DEDICATED:
6413  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6414  return m_DedicatedAllocation.m_pMappedData;
6415  default:
6416  VMA_ASSERT(0);
6417  return VMA_NULL;
6418  }
6419 }
6420 
6421 bool VmaAllocation_T::CanBecomeLost() const
6422 {
6423  switch(m_Type)
6424  {
6425  case ALLOCATION_TYPE_BLOCK:
6426  return m_BlockAllocation.m_CanBecomeLost;
6427  case ALLOCATION_TYPE_DEDICATED:
6428  return false;
6429  default:
6430  VMA_ASSERT(0);
6431  return false;
6432  }
6433 }
6434 
6435 VmaPool VmaAllocation_T::GetPool() const
6436 {
6437  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6438  return m_BlockAllocation.m_hPool;
6439 }
6440 
6441 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6442 {
6443  VMA_ASSERT(CanBecomeLost());
6444 
6445  /*
6446  Warning: This is a carefully designed algorithm.
6447  Do not modify unless you really know what you're doing :)
6448  */
6449  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6450  for(;;)
6451  {
6452  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6453  {
6454  VMA_ASSERT(0);
6455  return false;
6456  }
6457  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6458  {
6459  return false;
6460  }
6461  else // Last use time earlier than current time.
6462  {
6463  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6464  {
6465  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6466  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6467  return true;
6468  }
6469  }
6470  }
6471 }
6472 
6473 #if VMA_STATS_STRING_ENABLED
6474 
6475 // Correspond to values of enum VmaSuballocationType.
6476 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6477  "FREE",
6478  "UNKNOWN",
6479  "BUFFER",
6480  "IMAGE_UNKNOWN",
6481  "IMAGE_LINEAR",
6482  "IMAGE_OPTIMAL",
6483 };
6484 
6485 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6486 {
6487  json.WriteString("Type");
6488  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6489 
6490  json.WriteString("Size");
6491  json.WriteNumber(m_Size);
6492 
6493  if(m_pUserData != VMA_NULL)
6494  {
6495  json.WriteString("UserData");
6496  if(IsUserDataString())
6497  {
6498  json.WriteString((const char*)m_pUserData);
6499  }
6500  else
6501  {
6502  json.BeginString();
6503  json.ContinueString_Pointer(m_pUserData);
6504  json.EndString();
6505  }
6506  }
6507 
6508  json.WriteString("CreationFrameIndex");
6509  json.WriteNumber(m_CreationFrameIndex);
6510 
6511  json.WriteString("LastUseFrameIndex");
6512  json.WriteNumber(GetLastUseFrameIndex());
6513 
6514  if(m_BufferImageUsage != 0)
6515  {
6516  json.WriteString("Usage");
6517  json.WriteNumber(m_BufferImageUsage);
6518  }
6519 }
6520 
6521 #endif
6522 
6523 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6524 {
6525  VMA_ASSERT(IsUserDataString());
6526  if(m_pUserData != VMA_NULL)
6527  {
6528  char* const oldStr = (char*)m_pUserData;
6529  const size_t oldStrLen = strlen(oldStr);
6530  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6531  m_pUserData = VMA_NULL;
6532  }
6533 }
6534 
6535 void VmaAllocation_T::BlockAllocMap()
6536 {
6537  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6538 
6539  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6540  {
6541  ++m_MapCount;
6542  }
6543  else
6544  {
6545  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6546  }
6547 }
6548 
6549 void VmaAllocation_T::BlockAllocUnmap()
6550 {
6551  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6552 
6553  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6554  {
6555  --m_MapCount;
6556  }
6557  else
6558  {
6559  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6560  }
6561 }
6562 
6563 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6564 {
6565  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6566 
6567  if(m_MapCount != 0)
6568  {
6569  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6570  {
6571  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6572  *ppData = m_DedicatedAllocation.m_pMappedData;
6573  ++m_MapCount;
6574  return VK_SUCCESS;
6575  }
6576  else
6577  {
6578  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6579  return VK_ERROR_MEMORY_MAP_FAILED;
6580  }
6581  }
6582  else
6583  {
6584  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6585  hAllocator->m_hDevice,
6586  m_DedicatedAllocation.m_hMemory,
6587  0, // offset
6588  VK_WHOLE_SIZE,
6589  0, // flags
6590  ppData);
6591  if(result == VK_SUCCESS)
6592  {
6593  m_DedicatedAllocation.m_pMappedData = *ppData;
6594  m_MapCount = 1;
6595  }
6596  return result;
6597  }
6598 }
6599 
6600 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6601 {
6602  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6603 
6604  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6605  {
6606  --m_MapCount;
6607  if(m_MapCount == 0)
6608  {
6609  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6610  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6611  hAllocator->m_hDevice,
6612  m_DedicatedAllocation.m_hMemory);
6613  }
6614  }
6615  else
6616  {
6617  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6618  }
6619 }
6620 
6621 #if VMA_STATS_STRING_ENABLED
6622 
6623 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6624 {
6625  json.BeginObject();
6626 
6627  json.WriteString("Blocks");
6628  json.WriteNumber(stat.blockCount);
6629 
6630  json.WriteString("Allocations");
6631  json.WriteNumber(stat.allocationCount);
6632 
6633  json.WriteString("UnusedRanges");
6634  json.WriteNumber(stat.unusedRangeCount);
6635 
6636  json.WriteString("UsedBytes");
6637  json.WriteNumber(stat.usedBytes);
6638 
6639  json.WriteString("UnusedBytes");
6640  json.WriteNumber(stat.unusedBytes);
6641 
6642  if(stat.allocationCount > 1)
6643  {
6644  json.WriteString("AllocationSize");
6645  json.BeginObject(true);
6646  json.WriteString("Min");
6647  json.WriteNumber(stat.allocationSizeMin);
6648  json.WriteString("Avg");
6649  json.WriteNumber(stat.allocationSizeAvg);
6650  json.WriteString("Max");
6651  json.WriteNumber(stat.allocationSizeMax);
6652  json.EndObject();
6653  }
6654 
6655  if(stat.unusedRangeCount > 1)
6656  {
6657  json.WriteString("UnusedRangeSize");
6658  json.BeginObject(true);
6659  json.WriteString("Min");
6660  json.WriteNumber(stat.unusedRangeSizeMin);
6661  json.WriteString("Avg");
6662  json.WriteNumber(stat.unusedRangeSizeAvg);
6663  json.WriteString("Max");
6664  json.WriteNumber(stat.unusedRangeSizeMax);
6665  json.EndObject();
6666  }
6667 
6668  json.EndObject();
6669 }
6670 
6671 #endif // #if VMA_STATS_STRING_ENABLED
6672 
6673 struct VmaSuballocationItemSizeLess
6674 {
6675  bool operator()(
6676  const VmaSuballocationList::iterator lhs,
6677  const VmaSuballocationList::iterator rhs) const
6678  {
6679  return lhs->size < rhs->size;
6680  }
6681  bool operator()(
6682  const VmaSuballocationList::iterator lhs,
6683  VkDeviceSize rhsSize) const
6684  {
6685  return lhs->size < rhsSize;
6686  }
6687 };
6688 
6689 
6691 // class VmaBlockMetadata
6692 
6693 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6694  m_Size(0),
6695  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6696 {
6697 }
6698 
6699 #if VMA_STATS_STRING_ENABLED
6700 
6701 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6702  VkDeviceSize unusedBytes,
6703  size_t allocationCount,
6704  size_t unusedRangeCount) const
6705 {
6706  json.BeginObject();
6707 
6708  json.WriteString("TotalBytes");
6709  json.WriteNumber(GetSize());
6710 
6711  json.WriteString("UnusedBytes");
6712  json.WriteNumber(unusedBytes);
6713 
6714  json.WriteString("Allocations");
6715  json.WriteNumber((uint64_t)allocationCount);
6716 
6717  json.WriteString("UnusedRanges");
6718  json.WriteNumber((uint64_t)unusedRangeCount);
6719 
6720  json.WriteString("Suballocations");
6721  json.BeginArray();
6722 }
6723 
6724 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6725  VkDeviceSize offset,
6726  VmaAllocation hAllocation) const
6727 {
6728  json.BeginObject(true);
6729 
6730  json.WriteString("Offset");
6731  json.WriteNumber(offset);
6732 
6733  hAllocation->PrintParameters(json);
6734 
6735  json.EndObject();
6736 }
6737 
6738 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6739  VkDeviceSize offset,
6740  VkDeviceSize size) const
6741 {
6742  json.BeginObject(true);
6743 
6744  json.WriteString("Offset");
6745  json.WriteNumber(offset);
6746 
6747  json.WriteString("Type");
6748  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6749 
6750  json.WriteString("Size");
6751  json.WriteNumber(size);
6752 
6753  json.EndObject();
6754 }
6755 
6756 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6757 {
6758  json.EndArray();
6759  json.EndObject();
6760 }
6761 
6762 #endif // #if VMA_STATS_STRING_ENABLED
6763 
6765 // class VmaBlockMetadata_Generic
6766 
6767 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6768  VmaBlockMetadata(hAllocator),
6769  m_FreeCount(0),
6770  m_SumFreeSize(0),
6771  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6772  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6773 {
6774 }
6775 
6776 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6777 {
6778 }
6779 
6780 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6781 {
6782  VmaBlockMetadata::Init(size);
6783 
6784  m_FreeCount = 1;
6785  m_SumFreeSize = size;
6786 
6787  VmaSuballocation suballoc = {};
6788  suballoc.offset = 0;
6789  suballoc.size = size;
6790  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6791  suballoc.hAllocation = VK_NULL_HANDLE;
6792 
6793  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6794  m_Suballocations.push_back(suballoc);
6795  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6796  --suballocItem;
6797  m_FreeSuballocationsBySize.push_back(suballocItem);
6798 }
6799 
6800 bool VmaBlockMetadata_Generic::Validate() const
6801 {
6802  VMA_VALIDATE(!m_Suballocations.empty());
6803 
6804  // Expected offset of new suballocation as calculated from previous ones.
6805  VkDeviceSize calculatedOffset = 0;
6806  // Expected number of free suballocations as calculated from traversing their list.
6807  uint32_t calculatedFreeCount = 0;
6808  // Expected sum size of free suballocations as calculated from traversing their list.
6809  VkDeviceSize calculatedSumFreeSize = 0;
6810  // Expected number of free suballocations that should be registered in
6811  // m_FreeSuballocationsBySize calculated from traversing their list.
6812  size_t freeSuballocationsToRegister = 0;
6813  // True if previous visited suballocation was free.
6814  bool prevFree = false;
6815 
6816  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6817  suballocItem != m_Suballocations.cend();
6818  ++suballocItem)
6819  {
6820  const VmaSuballocation& subAlloc = *suballocItem;
6821 
6822  // Actual offset of this suballocation doesn't match expected one.
6823  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6824 
6825  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6826  // Two adjacent free suballocations are invalid. They should be merged.
6827  VMA_VALIDATE(!prevFree || !currFree);
6828 
6829  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6830 
6831  if(currFree)
6832  {
6833  calculatedSumFreeSize += subAlloc.size;
6834  ++calculatedFreeCount;
6835  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6836  {
6837  ++freeSuballocationsToRegister;
6838  }
6839 
6840  // Margin required between allocations - every free space must be at least that large.
6841  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6842  }
6843  else
6844  {
6845  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6846  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6847 
6848  // Margin required between allocations - previous allocation must be free.
6849  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6850  }
6851 
6852  calculatedOffset += subAlloc.size;
6853  prevFree = currFree;
6854  }
6855 
6856  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6857  // match expected one.
6858  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6859 
6860  VkDeviceSize lastSize = 0;
6861  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6862  {
6863  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6864 
6865  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6866  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6867  // They must be sorted by size ascending.
6868  VMA_VALIDATE(suballocItem->size >= lastSize);
6869 
6870  lastSize = suballocItem->size;
6871  }
6872 
6873  // Check if totals match calculacted values.
6874  VMA_VALIDATE(ValidateFreeSuballocationList());
6875  VMA_VALIDATE(calculatedOffset == GetSize());
6876  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6877  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6878 
6879  return true;
6880 }
6881 
6882 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6883 {
6884  if(!m_FreeSuballocationsBySize.empty())
6885  {
6886  return m_FreeSuballocationsBySize.back()->size;
6887  }
6888  else
6889  {
6890  return 0;
6891  }
6892 }
6893 
6894 bool VmaBlockMetadata_Generic::IsEmpty() const
6895 {
6896  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6897 }
6898 
6899 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6900 {
6901  outInfo.blockCount = 1;
6902 
6903  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6904  outInfo.allocationCount = rangeCount - m_FreeCount;
6905  outInfo.unusedRangeCount = m_FreeCount;
6906 
6907  outInfo.unusedBytes = m_SumFreeSize;
6908  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6909 
6910  outInfo.allocationSizeMin = UINT64_MAX;
6911  outInfo.allocationSizeMax = 0;
6912  outInfo.unusedRangeSizeMin = UINT64_MAX;
6913  outInfo.unusedRangeSizeMax = 0;
6914 
6915  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6916  suballocItem != m_Suballocations.cend();
6917  ++suballocItem)
6918  {
6919  const VmaSuballocation& suballoc = *suballocItem;
6920  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6921  {
6922  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6923  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6924  }
6925  else
6926  {
6927  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6928  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6929  }
6930  }
6931 }
6932 
6933 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6934 {
6935  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6936 
6937  inoutStats.size += GetSize();
6938  inoutStats.unusedSize += m_SumFreeSize;
6939  inoutStats.allocationCount += rangeCount - m_FreeCount;
6940  inoutStats.unusedRangeCount += m_FreeCount;
6941  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6942 }
6943 
6944 #if VMA_STATS_STRING_ENABLED
6945 
6946 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6947 {
6948  PrintDetailedMap_Begin(json,
6949  m_SumFreeSize, // unusedBytes
6950  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6951  m_FreeCount); // unusedRangeCount
6952 
6953  size_t i = 0;
6954  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6955  suballocItem != m_Suballocations.cend();
6956  ++suballocItem, ++i)
6957  {
6958  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6959  {
6960  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6961  }
6962  else
6963  {
6964  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6965  }
6966  }
6967 
6968  PrintDetailedMap_End(json);
6969 }
6970 
6971 #endif // #if VMA_STATS_STRING_ENABLED
6972 
6973 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6974  uint32_t currentFrameIndex,
6975  uint32_t frameInUseCount,
6976  VkDeviceSize bufferImageGranularity,
6977  VkDeviceSize allocSize,
6978  VkDeviceSize allocAlignment,
6979  bool upperAddress,
6980  VmaSuballocationType allocType,
6981  bool canMakeOtherLost,
6982  uint32_t strategy,
6983  VmaAllocationRequest* pAllocationRequest)
6984 {
6985  VMA_ASSERT(allocSize > 0);
6986  VMA_ASSERT(!upperAddress);
6987  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6988  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6989  VMA_HEAVY_ASSERT(Validate());
6990 
6991  // There is not enough total free space in this block to fullfill the request: Early return.
6992  if(canMakeOtherLost == false &&
6993  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6994  {
6995  return false;
6996  }
6997 
6998  // New algorithm, efficiently searching freeSuballocationsBySize.
6999  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7000  if(freeSuballocCount > 0)
7001  {
7003  {
7004  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7005  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7006  m_FreeSuballocationsBySize.data(),
7007  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7008  allocSize + 2 * VMA_DEBUG_MARGIN,
7009  VmaSuballocationItemSizeLess());
7010  size_t index = it - m_FreeSuballocationsBySize.data();
7011  for(; index < freeSuballocCount; ++index)
7012  {
7013  if(CheckAllocation(
7014  currentFrameIndex,
7015  frameInUseCount,
7016  bufferImageGranularity,
7017  allocSize,
7018  allocAlignment,
7019  allocType,
7020  m_FreeSuballocationsBySize[index],
7021  false, // canMakeOtherLost
7022  &pAllocationRequest->offset,
7023  &pAllocationRequest->itemsToMakeLostCount,
7024  &pAllocationRequest->sumFreeSize,
7025  &pAllocationRequest->sumItemSize))
7026  {
7027  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7028  return true;
7029  }
7030  }
7031  }
7032  else // WORST_FIT, FIRST_FIT
7033  {
7034  // Search staring from biggest suballocations.
7035  for(size_t index = freeSuballocCount; index--; )
7036  {
7037  if(CheckAllocation(
7038  currentFrameIndex,
7039  frameInUseCount,
7040  bufferImageGranularity,
7041  allocSize,
7042  allocAlignment,
7043  allocType,
7044  m_FreeSuballocationsBySize[index],
7045  false, // canMakeOtherLost
7046  &pAllocationRequest->offset,
7047  &pAllocationRequest->itemsToMakeLostCount,
7048  &pAllocationRequest->sumFreeSize,
7049  &pAllocationRequest->sumItemSize))
7050  {
7051  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7052  return true;
7053  }
7054  }
7055  }
7056  }
7057 
7058  if(canMakeOtherLost)
7059  {
7060  // Brute-force algorithm. TODO: Come up with something better.
7061 
7062  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7063  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7064 
7065  VmaAllocationRequest tmpAllocRequest = {};
7066  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7067  suballocIt != m_Suballocations.end();
7068  ++suballocIt)
7069  {
7070  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7071  suballocIt->hAllocation->CanBecomeLost())
7072  {
7073  if(CheckAllocation(
7074  currentFrameIndex,
7075  frameInUseCount,
7076  bufferImageGranularity,
7077  allocSize,
7078  allocAlignment,
7079  allocType,
7080  suballocIt,
7081  canMakeOtherLost,
7082  &tmpAllocRequest.offset,
7083  &tmpAllocRequest.itemsToMakeLostCount,
7084  &tmpAllocRequest.sumFreeSize,
7085  &tmpAllocRequest.sumItemSize))
7086  {
7087  tmpAllocRequest.item = suballocIt;
7088 
7089  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7091  {
7092  *pAllocationRequest = tmpAllocRequest;
7093  }
7094  }
7095  }
7096  }
7097 
7098  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7099  {
7100  return true;
7101  }
7102  }
7103 
7104  return false;
7105 }
7106 
7107 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7108  uint32_t currentFrameIndex,
7109  uint32_t frameInUseCount,
7110  VmaAllocationRequest* pAllocationRequest)
7111 {
7112  while(pAllocationRequest->itemsToMakeLostCount > 0)
7113  {
7114  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7115  {
7116  ++pAllocationRequest->item;
7117  }
7118  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7119  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7120  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7121  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7122  {
7123  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7124  --pAllocationRequest->itemsToMakeLostCount;
7125  }
7126  else
7127  {
7128  return false;
7129  }
7130  }
7131 
7132  VMA_HEAVY_ASSERT(Validate());
7133  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7134  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7135 
7136  return true;
7137 }
7138 
7139 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7140 {
7141  uint32_t lostAllocationCount = 0;
7142  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7143  it != m_Suballocations.end();
7144  ++it)
7145  {
7146  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7147  it->hAllocation->CanBecomeLost() &&
7148  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7149  {
7150  it = FreeSuballocation(it);
7151  ++lostAllocationCount;
7152  }
7153  }
7154  return lostAllocationCount;
7155 }
7156 
7157 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7158 {
7159  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7160  it != m_Suballocations.end();
7161  ++it)
7162  {
7163  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7164  {
7165  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7166  {
7167  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7168  return VK_ERROR_VALIDATION_FAILED_EXT;
7169  }
7170  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7171  {
7172  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7173  return VK_ERROR_VALIDATION_FAILED_EXT;
7174  }
7175  }
7176  }
7177 
7178  return VK_SUCCESS;
7179 }
7180 
7181 void VmaBlockMetadata_Generic::Alloc(
7182  const VmaAllocationRequest& request,
7183  VmaSuballocationType type,
7184  VkDeviceSize allocSize,
7185  bool upperAddress,
7186  VmaAllocation hAllocation)
7187 {
7188  VMA_ASSERT(!upperAddress);
7189  VMA_ASSERT(request.item != m_Suballocations.end());
7190  VmaSuballocation& suballoc = *request.item;
7191  // Given suballocation is a free block.
7192  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7193  // Given offset is inside this suballocation.
7194  VMA_ASSERT(request.offset >= suballoc.offset);
7195  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7196  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7197  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7198 
7199  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7200  // it to become used.
7201  UnregisterFreeSuballocation(request.item);
7202 
7203  suballoc.offset = request.offset;
7204  suballoc.size = allocSize;
7205  suballoc.type = type;
7206  suballoc.hAllocation = hAllocation;
7207 
7208  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7209  if(paddingEnd)
7210  {
7211  VmaSuballocation paddingSuballoc = {};
7212  paddingSuballoc.offset = request.offset + allocSize;
7213  paddingSuballoc.size = paddingEnd;
7214  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7215  VmaSuballocationList::iterator next = request.item;
7216  ++next;
7217  const VmaSuballocationList::iterator paddingEndItem =
7218  m_Suballocations.insert(next, paddingSuballoc);
7219  RegisterFreeSuballocation(paddingEndItem);
7220  }
7221 
7222  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7223  if(paddingBegin)
7224  {
7225  VmaSuballocation paddingSuballoc = {};
7226  paddingSuballoc.offset = request.offset - paddingBegin;
7227  paddingSuballoc.size = paddingBegin;
7228  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7229  const VmaSuballocationList::iterator paddingBeginItem =
7230  m_Suballocations.insert(request.item, paddingSuballoc);
7231  RegisterFreeSuballocation(paddingBeginItem);
7232  }
7233 
7234  // Update totals.
7235  m_FreeCount = m_FreeCount - 1;
7236  if(paddingBegin > 0)
7237  {
7238  ++m_FreeCount;
7239  }
7240  if(paddingEnd > 0)
7241  {
7242  ++m_FreeCount;
7243  }
7244  m_SumFreeSize -= allocSize;
7245 }
7246 
7247 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7248 {
7249  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7250  suballocItem != m_Suballocations.end();
7251  ++suballocItem)
7252  {
7253  VmaSuballocation& suballoc = *suballocItem;
7254  if(suballoc.hAllocation == allocation)
7255  {
7256  FreeSuballocation(suballocItem);
7257  VMA_HEAVY_ASSERT(Validate());
7258  return;
7259  }
7260  }
7261  VMA_ASSERT(0 && "Not found!");
7262 }
7263 
7264 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7265 {
7266  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7267  suballocItem != m_Suballocations.end();
7268  ++suballocItem)
7269  {
7270  VmaSuballocation& suballoc = *suballocItem;
7271  if(suballoc.offset == offset)
7272  {
7273  FreeSuballocation(suballocItem);
7274  return;
7275  }
7276  }
7277  VMA_ASSERT(0 && "Not found!");
7278 }
7279 
7280 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7281 {
7282  typedef VmaSuballocationList::iterator iter_type;
7283  for(iter_type suballocItem = m_Suballocations.begin();
7284  suballocItem != m_Suballocations.end();
7285  ++suballocItem)
7286  {
7287  VmaSuballocation& suballoc = *suballocItem;
7288  if(suballoc.hAllocation == alloc)
7289  {
7290  iter_type nextItem = suballocItem;
7291  ++nextItem;
7292 
7293  // Should have been ensured on higher level.
7294  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7295 
7296  // Shrinking.
7297  if(newSize < alloc->GetSize())
7298  {
7299  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7300 
7301  // There is next item.
7302  if(nextItem != m_Suballocations.end())
7303  {
7304  // Next item is free.
7305  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7306  {
7307  // Grow this next item backward.
7308  UnregisterFreeSuballocation(nextItem);
7309  nextItem->offset -= sizeDiff;
7310  nextItem->size += sizeDiff;
7311  RegisterFreeSuballocation(nextItem);
7312  }
7313  // Next item is not free.
7314  else
7315  {
7316  // Create free item after current one.
7317  VmaSuballocation newFreeSuballoc;
7318  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7319  newFreeSuballoc.offset = suballoc.offset + newSize;
7320  newFreeSuballoc.size = sizeDiff;
7321  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7322  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7323  RegisterFreeSuballocation(newFreeSuballocIt);
7324 
7325  ++m_FreeCount;
7326  }
7327  }
7328  // This is the last item.
7329  else
7330  {
7331  // Create free item at the end.
7332  VmaSuballocation newFreeSuballoc;
7333  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7334  newFreeSuballoc.offset = suballoc.offset + newSize;
7335  newFreeSuballoc.size = sizeDiff;
7336  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7337  m_Suballocations.push_back(newFreeSuballoc);
7338 
7339  iter_type newFreeSuballocIt = m_Suballocations.end();
7340  RegisterFreeSuballocation(--newFreeSuballocIt);
7341 
7342  ++m_FreeCount;
7343  }
7344 
7345  suballoc.size = newSize;
7346  m_SumFreeSize += sizeDiff;
7347  }
7348  // Growing.
7349  else
7350  {
7351  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7352 
7353  // There is next item.
7354  if(nextItem != m_Suballocations.end())
7355  {
7356  // Next item is free.
7357  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7358  {
7359  // There is not enough free space, including margin.
7360  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7361  {
7362  return false;
7363  }
7364 
7365  // There is more free space than required.
7366  if(nextItem->size > sizeDiff)
7367  {
7368  // Move and shrink this next item.
7369  UnregisterFreeSuballocation(nextItem);
7370  nextItem->offset += sizeDiff;
7371  nextItem->size -= sizeDiff;
7372  RegisterFreeSuballocation(nextItem);
7373  }
7374  // There is exactly the amount of free space required.
7375  else
7376  {
7377  // Remove this next free item.
7378  UnregisterFreeSuballocation(nextItem);
7379  m_Suballocations.erase(nextItem);
7380  --m_FreeCount;
7381  }
7382  }
7383  // Next item is not free - there is no space to grow.
7384  else
7385  {
7386  return false;
7387  }
7388  }
7389  // This is the last item - there is no space to grow.
7390  else
7391  {
7392  return false;
7393  }
7394 
7395  suballoc.size = newSize;
7396  m_SumFreeSize -= sizeDiff;
7397  }
7398 
7399  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7400  return true;
7401  }
7402  }
7403  VMA_ASSERT(0 && "Not found!");
7404  return false;
7405 }
7406 
7407 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7408 {
7409  VkDeviceSize lastSize = 0;
7410  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7411  {
7412  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7413 
7414  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7415  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7416  VMA_VALIDATE(it->size >= lastSize);
7417  lastSize = it->size;
7418  }
7419  return true;
7420 }
7421 
7422 bool VmaBlockMetadata_Generic::CheckAllocation(
7423  uint32_t currentFrameIndex,
7424  uint32_t frameInUseCount,
7425  VkDeviceSize bufferImageGranularity,
7426  VkDeviceSize allocSize,
7427  VkDeviceSize allocAlignment,
7428  VmaSuballocationType allocType,
7429  VmaSuballocationList::const_iterator suballocItem,
7430  bool canMakeOtherLost,
7431  VkDeviceSize* pOffset,
7432  size_t* itemsToMakeLostCount,
7433  VkDeviceSize* pSumFreeSize,
7434  VkDeviceSize* pSumItemSize) const
7435 {
7436  VMA_ASSERT(allocSize > 0);
7437  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7438  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7439  VMA_ASSERT(pOffset != VMA_NULL);
7440 
7441  *itemsToMakeLostCount = 0;
7442  *pSumFreeSize = 0;
7443  *pSumItemSize = 0;
7444 
7445  if(canMakeOtherLost)
7446  {
7447  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7448  {
7449  *pSumFreeSize = suballocItem->size;
7450  }
7451  else
7452  {
7453  if(suballocItem->hAllocation->CanBecomeLost() &&
7454  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7455  {
7456  ++*itemsToMakeLostCount;
7457  *pSumItemSize = suballocItem->size;
7458  }
7459  else
7460  {
7461  return false;
7462  }
7463  }
7464 
7465  // Remaining size is too small for this request: Early return.
7466  if(GetSize() - suballocItem->offset < allocSize)
7467  {
7468  return false;
7469  }
7470 
7471  // Start from offset equal to beginning of this suballocation.
7472  *pOffset = suballocItem->offset;
7473 
7474  // Apply VMA_DEBUG_MARGIN at the beginning.
7475  if(VMA_DEBUG_MARGIN > 0)
7476  {
7477  *pOffset += VMA_DEBUG_MARGIN;
7478  }
7479 
7480  // Apply alignment.
7481  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7482 
7483  // Check previous suballocations for BufferImageGranularity conflicts.
7484  // Make bigger alignment if necessary.
7485  if(bufferImageGranularity > 1)
7486  {
7487  bool bufferImageGranularityConflict = false;
7488  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7489  while(prevSuballocItem != m_Suballocations.cbegin())
7490  {
7491  --prevSuballocItem;
7492  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7493  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7494  {
7495  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7496  {
7497  bufferImageGranularityConflict = true;
7498  break;
7499  }
7500  }
7501  else
7502  // Already on previous page.
7503  break;
7504  }
7505  if(bufferImageGranularityConflict)
7506  {
7507  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7508  }
7509  }
7510 
7511  // Now that we have final *pOffset, check if we are past suballocItem.
7512  // If yes, return false - this function should be called for another suballocItem as starting point.
7513  if(*pOffset >= suballocItem->offset + suballocItem->size)
7514  {
7515  return false;
7516  }
7517 
7518  // Calculate padding at the beginning based on current offset.
7519  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7520 
7521  // Calculate required margin at the end.
7522  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7523 
7524  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7525  // Another early return check.
7526  if(suballocItem->offset + totalSize > GetSize())
7527  {
7528  return false;
7529  }
7530 
7531  // Advance lastSuballocItem until desired size is reached.
7532  // Update itemsToMakeLostCount.
7533  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7534  if(totalSize > suballocItem->size)
7535  {
7536  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7537  while(remainingSize > 0)
7538  {
7539  ++lastSuballocItem;
7540  if(lastSuballocItem == m_Suballocations.cend())
7541  {
7542  return false;
7543  }
7544  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7545  {
7546  *pSumFreeSize += lastSuballocItem->size;
7547  }
7548  else
7549  {
7550  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7551  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7552  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7553  {
7554  ++*itemsToMakeLostCount;
7555  *pSumItemSize += lastSuballocItem->size;
7556  }
7557  else
7558  {
7559  return false;
7560  }
7561  }
7562  remainingSize = (lastSuballocItem->size < remainingSize) ?
7563  remainingSize - lastSuballocItem->size : 0;
7564  }
7565  }
7566 
7567  // Check next suballocations for BufferImageGranularity conflicts.
7568  // If conflict exists, we must mark more allocations lost or fail.
7569  if(bufferImageGranularity > 1)
7570  {
7571  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7572  ++nextSuballocItem;
7573  while(nextSuballocItem != m_Suballocations.cend())
7574  {
7575  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7576  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7577  {
7578  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7579  {
7580  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7581  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7582  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7583  {
7584  ++*itemsToMakeLostCount;
7585  }
7586  else
7587  {
7588  return false;
7589  }
7590  }
7591  }
7592  else
7593  {
7594  // Already on next page.
7595  break;
7596  }
7597  ++nextSuballocItem;
7598  }
7599  }
7600  }
7601  else
7602  {
7603  const VmaSuballocation& suballoc = *suballocItem;
7604  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7605 
7606  *pSumFreeSize = suballoc.size;
7607 
7608  // Size of this suballocation is too small for this request: Early return.
7609  if(suballoc.size < allocSize)
7610  {
7611  return false;
7612  }
7613 
7614  // Start from offset equal to beginning of this suballocation.
7615  *pOffset = suballoc.offset;
7616 
7617  // Apply VMA_DEBUG_MARGIN at the beginning.
7618  if(VMA_DEBUG_MARGIN > 0)
7619  {
7620  *pOffset += VMA_DEBUG_MARGIN;
7621  }
7622 
7623  // Apply alignment.
7624  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7625 
7626  // Check previous suballocations for BufferImageGranularity conflicts.
7627  // Make bigger alignment if necessary.
7628  if(bufferImageGranularity > 1)
7629  {
7630  bool bufferImageGranularityConflict = false;
7631  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7632  while(prevSuballocItem != m_Suballocations.cbegin())
7633  {
7634  --prevSuballocItem;
7635  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7636  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7637  {
7638  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7639  {
7640  bufferImageGranularityConflict = true;
7641  break;
7642  }
7643  }
7644  else
7645  // Already on previous page.
7646  break;
7647  }
7648  if(bufferImageGranularityConflict)
7649  {
7650  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7651  }
7652  }
7653 
7654  // Calculate padding at the beginning based on current offset.
7655  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7656 
7657  // Calculate required margin at the end.
7658  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7659 
7660  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7661  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7662  {
7663  return false;
7664  }
7665 
7666  // Check next suballocations for BufferImageGranularity conflicts.
7667  // If conflict exists, allocation cannot be made here.
7668  if(bufferImageGranularity > 1)
7669  {
7670  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7671  ++nextSuballocItem;
7672  while(nextSuballocItem != m_Suballocations.cend())
7673  {
7674  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7675  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7676  {
7677  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7678  {
7679  return false;
7680  }
7681  }
7682  else
7683  {
7684  // Already on next page.
7685  break;
7686  }
7687  ++nextSuballocItem;
7688  }
7689  }
7690  }
7691 
7692  // All tests passed: Success. pOffset is already filled.
7693  return true;
7694 }
7695 
7696 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7697 {
7698  VMA_ASSERT(item != m_Suballocations.end());
7699  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7700 
7701  VmaSuballocationList::iterator nextItem = item;
7702  ++nextItem;
7703  VMA_ASSERT(nextItem != m_Suballocations.end());
7704  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7705 
7706  item->size += nextItem->size;
7707  --m_FreeCount;
7708  m_Suballocations.erase(nextItem);
7709 }
7710 
7711 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7712 {
7713  // Change this suballocation to be marked as free.
7714  VmaSuballocation& suballoc = *suballocItem;
7715  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7716  suballoc.hAllocation = VK_NULL_HANDLE;
7717 
7718  // Update totals.
7719  ++m_FreeCount;
7720  m_SumFreeSize += suballoc.size;
7721 
7722  // Merge with previous and/or next suballocation if it's also free.
7723  bool mergeWithNext = false;
7724  bool mergeWithPrev = false;
7725 
7726  VmaSuballocationList::iterator nextItem = suballocItem;
7727  ++nextItem;
7728  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7729  {
7730  mergeWithNext = true;
7731  }
7732 
7733  VmaSuballocationList::iterator prevItem = suballocItem;
7734  if(suballocItem != m_Suballocations.begin())
7735  {
7736  --prevItem;
7737  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7738  {
7739  mergeWithPrev = true;
7740  }
7741  }
7742 
7743  if(mergeWithNext)
7744  {
7745  UnregisterFreeSuballocation(nextItem);
7746  MergeFreeWithNext(suballocItem);
7747  }
7748 
7749  if(mergeWithPrev)
7750  {
7751  UnregisterFreeSuballocation(prevItem);
7752  MergeFreeWithNext(prevItem);
7753  RegisterFreeSuballocation(prevItem);
7754  return prevItem;
7755  }
7756  else
7757  {
7758  RegisterFreeSuballocation(suballocItem);
7759  return suballocItem;
7760  }
7761 }
7762 
7763 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7764 {
7765  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7766  VMA_ASSERT(item->size > 0);
7767 
7768  // You may want to enable this validation at the beginning or at the end of
7769  // this function, depending on what do you want to check.
7770  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7771 
7772  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7773  {
7774  if(m_FreeSuballocationsBySize.empty())
7775  {
7776  m_FreeSuballocationsBySize.push_back(item);
7777  }
7778  else
7779  {
7780  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7781  }
7782  }
7783 
7784  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7785 }
7786 
7787 
7788 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7789 {
7790  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7791  VMA_ASSERT(item->size > 0);
7792 
7793  // You may want to enable this validation at the beginning or at the end of
7794  // this function, depending on what do you want to check.
7795  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7796 
7797  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7798  {
7799  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7800  m_FreeSuballocationsBySize.data(),
7801  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7802  item,
7803  VmaSuballocationItemSizeLess());
7804  for(size_t index = it - m_FreeSuballocationsBySize.data();
7805  index < m_FreeSuballocationsBySize.size();
7806  ++index)
7807  {
7808  if(m_FreeSuballocationsBySize[index] == item)
7809  {
7810  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7811  return;
7812  }
7813  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7814  }
7815  VMA_ASSERT(0 && "Not found.");
7816  }
7817 
7818  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7819 }
7820 
7822 // class VmaBlockMetadata_Linear
7823 
7824 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7825  VmaBlockMetadata(hAllocator),
7826  m_SumFreeSize(0),
7827  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7828  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7829  m_1stVectorIndex(0),
7830  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7831  m_1stNullItemsBeginCount(0),
7832  m_1stNullItemsMiddleCount(0),
7833  m_2ndNullItemsCount(0)
7834 {
7835 }
7836 
7837 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7838 {
7839 }
7840 
7841 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7842 {
7843  VmaBlockMetadata::Init(size);
7844  m_SumFreeSize = size;
7845 }
7846 
7847 bool VmaBlockMetadata_Linear::Validate() const
7848 {
7849  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7850  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7851 
7852  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7853  VMA_VALIDATE(!suballocations1st.empty() ||
7854  suballocations2nd.empty() ||
7855  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7856 
7857  if(!suballocations1st.empty())
7858  {
7859  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7860  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7861  // Null item at the end should be just pop_back().
7862  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7863  }
7864  if(!suballocations2nd.empty())
7865  {
7866  // Null item at the end should be just pop_back().
7867  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7868  }
7869 
7870  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7871  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7872 
7873  VkDeviceSize sumUsedSize = 0;
7874  const size_t suballoc1stCount = suballocations1st.size();
7875  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7876 
7877  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7878  {
7879  const size_t suballoc2ndCount = suballocations2nd.size();
7880  size_t nullItem2ndCount = 0;
7881  for(size_t i = 0; i < suballoc2ndCount; ++i)
7882  {
7883  const VmaSuballocation& suballoc = suballocations2nd[i];
7884  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7885 
7886  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7887  VMA_VALIDATE(suballoc.offset >= offset);
7888 
7889  if(!currFree)
7890  {
7891  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7892  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7893  sumUsedSize += suballoc.size;
7894  }
7895  else
7896  {
7897  ++nullItem2ndCount;
7898  }
7899 
7900  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7901  }
7902 
7903  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7904  }
7905 
7906  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7907  {
7908  const VmaSuballocation& suballoc = suballocations1st[i];
7909  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7910  suballoc.hAllocation == VK_NULL_HANDLE);
7911  }
7912 
7913  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7914 
7915  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7916  {
7917  const VmaSuballocation& suballoc = suballocations1st[i];
7918  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7919 
7920  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7921  VMA_VALIDATE(suballoc.offset >= offset);
7922  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7923 
7924  if(!currFree)
7925  {
7926  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7927  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7928  sumUsedSize += suballoc.size;
7929  }
7930  else
7931  {
7932  ++nullItem1stCount;
7933  }
7934 
7935  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7936  }
7937  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7938 
7939  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7940  {
7941  const size_t suballoc2ndCount = suballocations2nd.size();
7942  size_t nullItem2ndCount = 0;
7943  for(size_t i = suballoc2ndCount; i--; )
7944  {
7945  const VmaSuballocation& suballoc = suballocations2nd[i];
7946  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7947 
7948  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7949  VMA_VALIDATE(suballoc.offset >= offset);
7950 
7951  if(!currFree)
7952  {
7953  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7954  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7955  sumUsedSize += suballoc.size;
7956  }
7957  else
7958  {
7959  ++nullItem2ndCount;
7960  }
7961 
7962  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7963  }
7964 
7965  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7966  }
7967 
7968  VMA_VALIDATE(offset <= GetSize());
7969  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7970 
7971  return true;
7972 }
7973 
7974 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7975 {
7976  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7977  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7978 }
7979 
7980 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7981 {
7982  const VkDeviceSize size = GetSize();
7983 
7984  /*
7985  We don't consider gaps inside allocation vectors with freed allocations because
7986  they are not suitable for reuse in linear allocator. We consider only space that
7987  is available for new allocations.
7988  */
7989  if(IsEmpty())
7990  {
7991  return size;
7992  }
7993 
7994  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7995 
7996  switch(m_2ndVectorMode)
7997  {
7998  case SECOND_VECTOR_EMPTY:
7999  /*
8000  Available space is after end of 1st, as well as before beginning of 1st (which
8001  whould make it a ring buffer).
8002  */
8003  {
8004  const size_t suballocations1stCount = suballocations1st.size();
8005  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8006  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8007  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8008  return VMA_MAX(
8009  firstSuballoc.offset,
8010  size - (lastSuballoc.offset + lastSuballoc.size));
8011  }
8012  break;
8013 
8014  case SECOND_VECTOR_RING_BUFFER:
8015  /*
8016  Available space is only between end of 2nd and beginning of 1st.
8017  */
8018  {
8019  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8020  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8021  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8022  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8023  }
8024  break;
8025 
8026  case SECOND_VECTOR_DOUBLE_STACK:
8027  /*
8028  Available space is only between end of 1st and top of 2nd.
8029  */
8030  {
8031  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8032  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8033  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8034  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8035  }
8036  break;
8037 
8038  default:
8039  VMA_ASSERT(0);
8040  return 0;
8041  }
8042 }
8043 
8044 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8045 {
8046  const VkDeviceSize size = GetSize();
8047  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8048  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8049  const size_t suballoc1stCount = suballocations1st.size();
8050  const size_t suballoc2ndCount = suballocations2nd.size();
8051 
8052  outInfo.blockCount = 1;
8053  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8054  outInfo.unusedRangeCount = 0;
8055  outInfo.usedBytes = 0;
8056  outInfo.allocationSizeMin = UINT64_MAX;
8057  outInfo.allocationSizeMax = 0;
8058  outInfo.unusedRangeSizeMin = UINT64_MAX;
8059  outInfo.unusedRangeSizeMax = 0;
8060 
8061  VkDeviceSize lastOffset = 0;
8062 
8063  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8064  {
8065  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8066  size_t nextAlloc2ndIndex = 0;
8067  while(lastOffset < freeSpace2ndTo1stEnd)
8068  {
8069  // Find next non-null allocation or move nextAllocIndex to the end.
8070  while(nextAlloc2ndIndex < suballoc2ndCount &&
8071  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8072  {
8073  ++nextAlloc2ndIndex;
8074  }
8075 
8076  // Found non-null allocation.
8077  if(nextAlloc2ndIndex < suballoc2ndCount)
8078  {
8079  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8080 
8081  // 1. Process free space before this allocation.
8082  if(lastOffset < suballoc.offset)
8083  {
8084  // There is free space from lastOffset to suballoc.offset.
8085  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8086  ++outInfo.unusedRangeCount;
8087  outInfo.unusedBytes += unusedRangeSize;
8088  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8089  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8090  }
8091 
8092  // 2. Process this allocation.
8093  // There is allocation with suballoc.offset, suballoc.size.
8094  outInfo.usedBytes += suballoc.size;
8095  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8096  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8097 
8098  // 3. Prepare for next iteration.
8099  lastOffset = suballoc.offset + suballoc.size;
8100  ++nextAlloc2ndIndex;
8101  }
8102  // We are at the end.
8103  else
8104  {
8105  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8106  if(lastOffset < freeSpace2ndTo1stEnd)
8107  {
8108  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8109  ++outInfo.unusedRangeCount;
8110  outInfo.unusedBytes += unusedRangeSize;
8111  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8112  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8113  }
8114 
8115  // End of loop.
8116  lastOffset = freeSpace2ndTo1stEnd;
8117  }
8118  }
8119  }
8120 
8121  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8122  const VkDeviceSize freeSpace1stTo2ndEnd =
8123  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8124  while(lastOffset < freeSpace1stTo2ndEnd)
8125  {
8126  // Find next non-null allocation or move nextAllocIndex to the end.
8127  while(nextAlloc1stIndex < suballoc1stCount &&
8128  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8129  {
8130  ++nextAlloc1stIndex;
8131  }
8132 
8133  // Found non-null allocation.
8134  if(nextAlloc1stIndex < suballoc1stCount)
8135  {
8136  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8137 
8138  // 1. Process free space before this allocation.
8139  if(lastOffset < suballoc.offset)
8140  {
8141  // There is free space from lastOffset to suballoc.offset.
8142  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8143  ++outInfo.unusedRangeCount;
8144  outInfo.unusedBytes += unusedRangeSize;
8145  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8146  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8147  }
8148 
8149  // 2. Process this allocation.
8150  // There is allocation with suballoc.offset, suballoc.size.
8151  outInfo.usedBytes += suballoc.size;
8152  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8153  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8154 
8155  // 3. Prepare for next iteration.
8156  lastOffset = suballoc.offset + suballoc.size;
8157  ++nextAlloc1stIndex;
8158  }
8159  // We are at the end.
8160  else
8161  {
8162  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8163  if(lastOffset < freeSpace1stTo2ndEnd)
8164  {
8165  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8166  ++outInfo.unusedRangeCount;
8167  outInfo.unusedBytes += unusedRangeSize;
8168  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8169  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8170  }
8171 
8172  // End of loop.
8173  lastOffset = freeSpace1stTo2ndEnd;
8174  }
8175  }
8176 
8177  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8178  {
8179  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8180  while(lastOffset < size)
8181  {
8182  // Find next non-null allocation or move nextAllocIndex to the end.
8183  while(nextAlloc2ndIndex != SIZE_MAX &&
8184  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8185  {
8186  --nextAlloc2ndIndex;
8187  }
8188 
8189  // Found non-null allocation.
8190  if(nextAlloc2ndIndex != SIZE_MAX)
8191  {
8192  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8193 
8194  // 1. Process free space before this allocation.
8195  if(lastOffset < suballoc.offset)
8196  {
8197  // There is free space from lastOffset to suballoc.offset.
8198  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8199  ++outInfo.unusedRangeCount;
8200  outInfo.unusedBytes += unusedRangeSize;
8201  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8202  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8203  }
8204 
8205  // 2. Process this allocation.
8206  // There is allocation with suballoc.offset, suballoc.size.
8207  outInfo.usedBytes += suballoc.size;
8208  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8209  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8210 
8211  // 3. Prepare for next iteration.
8212  lastOffset = suballoc.offset + suballoc.size;
8213  --nextAlloc2ndIndex;
8214  }
8215  // We are at the end.
8216  else
8217  {
8218  // There is free space from lastOffset to size.
8219  if(lastOffset < size)
8220  {
8221  const VkDeviceSize unusedRangeSize = size - lastOffset;
8222  ++outInfo.unusedRangeCount;
8223  outInfo.unusedBytes += unusedRangeSize;
8224  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8225  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8226  }
8227 
8228  // End of loop.
8229  lastOffset = size;
8230  }
8231  }
8232  }
8233 
8234  outInfo.unusedBytes = size - outInfo.usedBytes;
8235 }
8236 
8237 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8238 {
8239  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8240  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8241  const VkDeviceSize size = GetSize();
8242  const size_t suballoc1stCount = suballocations1st.size();
8243  const size_t suballoc2ndCount = suballocations2nd.size();
8244 
8245  inoutStats.size += size;
8246 
8247  VkDeviceSize lastOffset = 0;
8248 
8249  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8250  {
8251  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8252  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8253  while(lastOffset < freeSpace2ndTo1stEnd)
8254  {
8255  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8256  while(nextAlloc2ndIndex < suballoc2ndCount &&
8257  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8258  {
8259  ++nextAlloc2ndIndex;
8260  }
8261 
8262  // Found non-null allocation.
8263  if(nextAlloc2ndIndex < suballoc2ndCount)
8264  {
8265  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8266 
8267  // 1. Process free space before this allocation.
8268  if(lastOffset < suballoc.offset)
8269  {
8270  // There is free space from lastOffset to suballoc.offset.
8271  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8272  inoutStats.unusedSize += unusedRangeSize;
8273  ++inoutStats.unusedRangeCount;
8274  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8275  }
8276 
8277  // 2. Process this allocation.
8278  // There is allocation with suballoc.offset, suballoc.size.
8279  ++inoutStats.allocationCount;
8280 
8281  // 3. Prepare for next iteration.
8282  lastOffset = suballoc.offset + suballoc.size;
8283  ++nextAlloc2ndIndex;
8284  }
8285  // We are at the end.
8286  else
8287  {
8288  if(lastOffset < freeSpace2ndTo1stEnd)
8289  {
8290  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8291  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8292  inoutStats.unusedSize += unusedRangeSize;
8293  ++inoutStats.unusedRangeCount;
8294  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8295  }
8296 
8297  // End of loop.
8298  lastOffset = freeSpace2ndTo1stEnd;
8299  }
8300  }
8301  }
8302 
8303  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8304  const VkDeviceSize freeSpace1stTo2ndEnd =
8305  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8306  while(lastOffset < freeSpace1stTo2ndEnd)
8307  {
8308  // Find next non-null allocation or move nextAllocIndex to the end.
8309  while(nextAlloc1stIndex < suballoc1stCount &&
8310  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8311  {
8312  ++nextAlloc1stIndex;
8313  }
8314 
8315  // Found non-null allocation.
8316  if(nextAlloc1stIndex < suballoc1stCount)
8317  {
8318  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8319 
8320  // 1. Process free space before this allocation.
8321  if(lastOffset < suballoc.offset)
8322  {
8323  // There is free space from lastOffset to suballoc.offset.
8324  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8325  inoutStats.unusedSize += unusedRangeSize;
8326  ++inoutStats.unusedRangeCount;
8327  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8328  }
8329 
8330  // 2. Process this allocation.
8331  // There is allocation with suballoc.offset, suballoc.size.
8332  ++inoutStats.allocationCount;
8333 
8334  // 3. Prepare for next iteration.
8335  lastOffset = suballoc.offset + suballoc.size;
8336  ++nextAlloc1stIndex;
8337  }
8338  // We are at the end.
8339  else
8340  {
8341  if(lastOffset < freeSpace1stTo2ndEnd)
8342  {
8343  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8344  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8345  inoutStats.unusedSize += unusedRangeSize;
8346  ++inoutStats.unusedRangeCount;
8347  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8348  }
8349 
8350  // End of loop.
8351  lastOffset = freeSpace1stTo2ndEnd;
8352  }
8353  }
8354 
8355  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8356  {
8357  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8358  while(lastOffset < size)
8359  {
8360  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8361  while(nextAlloc2ndIndex != SIZE_MAX &&
8362  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8363  {
8364  --nextAlloc2ndIndex;
8365  }
8366 
8367  // Found non-null allocation.
8368  if(nextAlloc2ndIndex != SIZE_MAX)
8369  {
8370  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8371 
8372  // 1. Process free space before this allocation.
8373  if(lastOffset < suballoc.offset)
8374  {
8375  // There is free space from lastOffset to suballoc.offset.
8376  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8377  inoutStats.unusedSize += unusedRangeSize;
8378  ++inoutStats.unusedRangeCount;
8379  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8380  }
8381 
8382  // 2. Process this allocation.
8383  // There is allocation with suballoc.offset, suballoc.size.
8384  ++inoutStats.allocationCount;
8385 
8386  // 3. Prepare for next iteration.
8387  lastOffset = suballoc.offset + suballoc.size;
8388  --nextAlloc2ndIndex;
8389  }
8390  // We are at the end.
8391  else
8392  {
8393  if(lastOffset < size)
8394  {
8395  // There is free space from lastOffset to size.
8396  const VkDeviceSize unusedRangeSize = size - lastOffset;
8397  inoutStats.unusedSize += unusedRangeSize;
8398  ++inoutStats.unusedRangeCount;
8399  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8400  }
8401 
8402  // End of loop.
8403  lastOffset = size;
8404  }
8405  }
8406  }
8407 }
8408 
8409 #if VMA_STATS_STRING_ENABLED
8410 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8411 {
8412  const VkDeviceSize size = GetSize();
8413  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8414  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8415  const size_t suballoc1stCount = suballocations1st.size();
8416  const size_t suballoc2ndCount = suballocations2nd.size();
8417 
8418  // FIRST PASS
8419 
8420  size_t unusedRangeCount = 0;
8421  VkDeviceSize usedBytes = 0;
8422 
8423  VkDeviceSize lastOffset = 0;
8424 
8425  size_t alloc2ndCount = 0;
8426  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8427  {
8428  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8429  size_t nextAlloc2ndIndex = 0;
8430  while(lastOffset < freeSpace2ndTo1stEnd)
8431  {
8432  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8433  while(nextAlloc2ndIndex < suballoc2ndCount &&
8434  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8435  {
8436  ++nextAlloc2ndIndex;
8437  }
8438 
8439  // Found non-null allocation.
8440  if(nextAlloc2ndIndex < suballoc2ndCount)
8441  {
8442  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8443 
8444  // 1. Process free space before this allocation.
8445  if(lastOffset < suballoc.offset)
8446  {
8447  // There is free space from lastOffset to suballoc.offset.
8448  ++unusedRangeCount;
8449  }
8450 
8451  // 2. Process this allocation.
8452  // There is allocation with suballoc.offset, suballoc.size.
8453  ++alloc2ndCount;
8454  usedBytes += suballoc.size;
8455 
8456  // 3. Prepare for next iteration.
8457  lastOffset = suballoc.offset + suballoc.size;
8458  ++nextAlloc2ndIndex;
8459  }
8460  // We are at the end.
8461  else
8462  {
8463  if(lastOffset < freeSpace2ndTo1stEnd)
8464  {
8465  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8466  ++unusedRangeCount;
8467  }
8468 
8469  // End of loop.
8470  lastOffset = freeSpace2ndTo1stEnd;
8471  }
8472  }
8473  }
8474 
8475  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8476  size_t alloc1stCount = 0;
8477  const VkDeviceSize freeSpace1stTo2ndEnd =
8478  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8479  while(lastOffset < freeSpace1stTo2ndEnd)
8480  {
8481  // Find next non-null allocation or move nextAllocIndex to the end.
8482  while(nextAlloc1stIndex < suballoc1stCount &&
8483  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8484  {
8485  ++nextAlloc1stIndex;
8486  }
8487 
8488  // Found non-null allocation.
8489  if(nextAlloc1stIndex < suballoc1stCount)
8490  {
8491  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8492 
8493  // 1. Process free space before this allocation.
8494  if(lastOffset < suballoc.offset)
8495  {
8496  // There is free space from lastOffset to suballoc.offset.
8497  ++unusedRangeCount;
8498  }
8499 
8500  // 2. Process this allocation.
8501  // There is allocation with suballoc.offset, suballoc.size.
8502  ++alloc1stCount;
8503  usedBytes += suballoc.size;
8504 
8505  // 3. Prepare for next iteration.
8506  lastOffset = suballoc.offset + suballoc.size;
8507  ++nextAlloc1stIndex;
8508  }
8509  // We are at the end.
8510  else
8511  {
8512  if(lastOffset < size)
8513  {
8514  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8515  ++unusedRangeCount;
8516  }
8517 
8518  // End of loop.
8519  lastOffset = freeSpace1stTo2ndEnd;
8520  }
8521  }
8522 
8523  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8524  {
8525  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8526  while(lastOffset < size)
8527  {
8528  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8529  while(nextAlloc2ndIndex != SIZE_MAX &&
8530  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8531  {
8532  --nextAlloc2ndIndex;
8533  }
8534 
8535  // Found non-null allocation.
8536  if(nextAlloc2ndIndex != SIZE_MAX)
8537  {
8538  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8539 
8540  // 1. Process free space before this allocation.
8541  if(lastOffset < suballoc.offset)
8542  {
8543  // There is free space from lastOffset to suballoc.offset.
8544  ++unusedRangeCount;
8545  }
8546 
8547  // 2. Process this allocation.
8548  // There is allocation with suballoc.offset, suballoc.size.
8549  ++alloc2ndCount;
8550  usedBytes += suballoc.size;
8551 
8552  // 3. Prepare for next iteration.
8553  lastOffset = suballoc.offset + suballoc.size;
8554  --nextAlloc2ndIndex;
8555  }
8556  // We are at the end.
8557  else
8558  {
8559  if(lastOffset < size)
8560  {
8561  // There is free space from lastOffset to size.
8562  ++unusedRangeCount;
8563  }
8564 
8565  // End of loop.
8566  lastOffset = size;
8567  }
8568  }
8569  }
8570 
8571  const VkDeviceSize unusedBytes = size - usedBytes;
8572  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8573 
8574  // SECOND PASS
8575  lastOffset = 0;
8576 
8577  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8578  {
8579  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8580  size_t nextAlloc2ndIndex = 0;
8581  while(lastOffset < freeSpace2ndTo1stEnd)
8582  {
8583  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8584  while(nextAlloc2ndIndex < suballoc2ndCount &&
8585  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8586  {
8587  ++nextAlloc2ndIndex;
8588  }
8589 
8590  // Found non-null allocation.
8591  if(nextAlloc2ndIndex < suballoc2ndCount)
8592  {
8593  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8594 
8595  // 1. Process free space before this allocation.
8596  if(lastOffset < suballoc.offset)
8597  {
8598  // There is free space from lastOffset to suballoc.offset.
8599  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8600  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8601  }
8602 
8603  // 2. Process this allocation.
8604  // There is allocation with suballoc.offset, suballoc.size.
8605  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8606 
8607  // 3. Prepare for next iteration.
8608  lastOffset = suballoc.offset + suballoc.size;
8609  ++nextAlloc2ndIndex;
8610  }
8611  // We are at the end.
8612  else
8613  {
8614  if(lastOffset < freeSpace2ndTo1stEnd)
8615  {
8616  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8617  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8618  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8619  }
8620 
8621  // End of loop.
8622  lastOffset = freeSpace2ndTo1stEnd;
8623  }
8624  }
8625  }
8626 
8627  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8628  while(lastOffset < freeSpace1stTo2ndEnd)
8629  {
8630  // Find next non-null allocation or move nextAllocIndex to the end.
8631  while(nextAlloc1stIndex < suballoc1stCount &&
8632  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8633  {
8634  ++nextAlloc1stIndex;
8635  }
8636 
8637  // Found non-null allocation.
8638  if(nextAlloc1stIndex < suballoc1stCount)
8639  {
8640  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8641 
8642  // 1. Process free space before this allocation.
8643  if(lastOffset < suballoc.offset)
8644  {
8645  // There is free space from lastOffset to suballoc.offset.
8646  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8647  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8648  }
8649 
8650  // 2. Process this allocation.
8651  // There is allocation with suballoc.offset, suballoc.size.
8652  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8653 
8654  // 3. Prepare for next iteration.
8655  lastOffset = suballoc.offset + suballoc.size;
8656  ++nextAlloc1stIndex;
8657  }
8658  // We are at the end.
8659  else
8660  {
8661  if(lastOffset < freeSpace1stTo2ndEnd)
8662  {
8663  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8664  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8665  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8666  }
8667 
8668  // End of loop.
8669  lastOffset = freeSpace1stTo2ndEnd;
8670  }
8671  }
8672 
8673  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8674  {
8675  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8676  while(lastOffset < size)
8677  {
8678  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8679  while(nextAlloc2ndIndex != SIZE_MAX &&
8680  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8681  {
8682  --nextAlloc2ndIndex;
8683  }
8684 
8685  // Found non-null allocation.
8686  if(nextAlloc2ndIndex != SIZE_MAX)
8687  {
8688  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8689 
8690  // 1. Process free space before this allocation.
8691  if(lastOffset < suballoc.offset)
8692  {
8693  // There is free space from lastOffset to suballoc.offset.
8694  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8695  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8696  }
8697 
8698  // 2. Process this allocation.
8699  // There is allocation with suballoc.offset, suballoc.size.
8700  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8701 
8702  // 3. Prepare for next iteration.
8703  lastOffset = suballoc.offset + suballoc.size;
8704  --nextAlloc2ndIndex;
8705  }
8706  // We are at the end.
8707  else
8708  {
8709  if(lastOffset < size)
8710  {
8711  // There is free space from lastOffset to size.
8712  const VkDeviceSize unusedRangeSize = size - lastOffset;
8713  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8714  }
8715 
8716  // End of loop.
8717  lastOffset = size;
8718  }
8719  }
8720  }
8721 
8722  PrintDetailedMap_End(json);
8723 }
8724 #endif // #if VMA_STATS_STRING_ENABLED
8725 
8726 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8727  uint32_t currentFrameIndex,
8728  uint32_t frameInUseCount,
8729  VkDeviceSize bufferImageGranularity,
8730  VkDeviceSize allocSize,
8731  VkDeviceSize allocAlignment,
8732  bool upperAddress,
8733  VmaSuballocationType allocType,
8734  bool canMakeOtherLost,
8735  uint32_t strategy,
8736  VmaAllocationRequest* pAllocationRequest)
8737 {
8738  VMA_ASSERT(allocSize > 0);
8739  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8740  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8741  VMA_HEAVY_ASSERT(Validate());
8742 
8743  const VkDeviceSize size = GetSize();
8744  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8745  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8746 
8747  if(upperAddress)
8748  {
8749  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8750  {
8751  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8752  return false;
8753  }
8754 
8755  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8756  if(allocSize > size)
8757  {
8758  return false;
8759  }
8760  VkDeviceSize resultBaseOffset = size - allocSize;
8761  if(!suballocations2nd.empty())
8762  {
8763  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8764  resultBaseOffset = lastSuballoc.offset - allocSize;
8765  if(allocSize > lastSuballoc.offset)
8766  {
8767  return false;
8768  }
8769  }
8770 
8771  // Start from offset equal to end of free space.
8772  VkDeviceSize resultOffset = resultBaseOffset;
8773 
8774  // Apply VMA_DEBUG_MARGIN at the end.
8775  if(VMA_DEBUG_MARGIN > 0)
8776  {
8777  if(resultOffset < VMA_DEBUG_MARGIN)
8778  {
8779  return false;
8780  }
8781  resultOffset -= VMA_DEBUG_MARGIN;
8782  }
8783 
8784  // Apply alignment.
8785  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8786 
8787  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8788  // Make bigger alignment if necessary.
8789  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8790  {
8791  bool bufferImageGranularityConflict = false;
8792  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8793  {
8794  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8795  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8796  {
8797  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8798  {
8799  bufferImageGranularityConflict = true;
8800  break;
8801  }
8802  }
8803  else
8804  // Already on previous page.
8805  break;
8806  }
8807  if(bufferImageGranularityConflict)
8808  {
8809  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8810  }
8811  }
8812 
8813  // There is enough free space.
8814  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8815  suballocations1st.back().offset + suballocations1st.back().size :
8816  0;
8817  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8818  {
8819  // Check previous suballocations for BufferImageGranularity conflicts.
8820  // If conflict exists, allocation cannot be made here.
8821  if(bufferImageGranularity > 1)
8822  {
8823  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8824  {
8825  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8826  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8827  {
8828  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8829  {
8830  return false;
8831  }
8832  }
8833  else
8834  {
8835  // Already on next page.
8836  break;
8837  }
8838  }
8839  }
8840 
8841  // All tests passed: Success.
8842  pAllocationRequest->offset = resultOffset;
8843  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8844  pAllocationRequest->sumItemSize = 0;
8845  // pAllocationRequest->item unused.
8846  pAllocationRequest->itemsToMakeLostCount = 0;
8847  return true;
8848  }
8849  }
8850  else // !upperAddress
8851  {
8852  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8853  {
8854  // Try to allocate at the end of 1st vector.
8855 
8856  VkDeviceSize resultBaseOffset = 0;
8857  if(!suballocations1st.empty())
8858  {
8859  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8860  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8861  }
8862 
8863  // Start from offset equal to beginning of free space.
8864  VkDeviceSize resultOffset = resultBaseOffset;
8865 
8866  // Apply VMA_DEBUG_MARGIN at the beginning.
8867  if(VMA_DEBUG_MARGIN > 0)
8868  {
8869  resultOffset += VMA_DEBUG_MARGIN;
8870  }
8871 
8872  // Apply alignment.
8873  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8874 
8875  // Check previous suballocations for BufferImageGranularity conflicts.
8876  // Make bigger alignment if necessary.
8877  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8878  {
8879  bool bufferImageGranularityConflict = false;
8880  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8881  {
8882  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8883  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8884  {
8885  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8886  {
8887  bufferImageGranularityConflict = true;
8888  break;
8889  }
8890  }
8891  else
8892  // Already on previous page.
8893  break;
8894  }
8895  if(bufferImageGranularityConflict)
8896  {
8897  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8898  }
8899  }
8900 
8901  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8902  suballocations2nd.back().offset : size;
8903 
8904  // There is enough free space at the end after alignment.
8905  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8906  {
8907  // Check next suballocations for BufferImageGranularity conflicts.
8908  // If conflict exists, allocation cannot be made here.
8909  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8910  {
8911  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8912  {
8913  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8914  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8915  {
8916  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8917  {
8918  return false;
8919  }
8920  }
8921  else
8922  {
8923  // Already on previous page.
8924  break;
8925  }
8926  }
8927  }
8928 
8929  // All tests passed: Success.
8930  pAllocationRequest->offset = resultOffset;
8931  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8932  pAllocationRequest->sumItemSize = 0;
8933  // pAllocationRequest->item unused.
8934  pAllocationRequest->itemsToMakeLostCount = 0;
8935  return true;
8936  }
8937  }
8938 
8939  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8940  // beginning of 1st vector as the end of free space.
8941  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8942  {
8943  VMA_ASSERT(!suballocations1st.empty());
8944 
8945  VkDeviceSize resultBaseOffset = 0;
8946  if(!suballocations2nd.empty())
8947  {
8948  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8949  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8950  }
8951 
8952  // Start from offset equal to beginning of free space.
8953  VkDeviceSize resultOffset = resultBaseOffset;
8954 
8955  // Apply VMA_DEBUG_MARGIN at the beginning.
8956  if(VMA_DEBUG_MARGIN > 0)
8957  {
8958  resultOffset += VMA_DEBUG_MARGIN;
8959  }
8960 
8961  // Apply alignment.
8962  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8963 
8964  // Check previous suballocations for BufferImageGranularity conflicts.
8965  // Make bigger alignment if necessary.
8966  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8967  {
8968  bool bufferImageGranularityConflict = false;
8969  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8970  {
8971  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8972  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8973  {
8974  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8975  {
8976  bufferImageGranularityConflict = true;
8977  break;
8978  }
8979  }
8980  else
8981  // Already on previous page.
8982  break;
8983  }
8984  if(bufferImageGranularityConflict)
8985  {
8986  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8987  }
8988  }
8989 
8990  pAllocationRequest->itemsToMakeLostCount = 0;
8991  pAllocationRequest->sumItemSize = 0;
8992  size_t index1st = m_1stNullItemsBeginCount;
8993 
8994  if(canMakeOtherLost)
8995  {
8996  while(index1st < suballocations1st.size() &&
8997  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8998  {
8999  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9000  const VmaSuballocation& suballoc = suballocations1st[index1st];
9001  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9002  {
9003  // No problem.
9004  }
9005  else
9006  {
9007  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9008  if(suballoc.hAllocation->CanBecomeLost() &&
9009  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9010  {
9011  ++pAllocationRequest->itemsToMakeLostCount;
9012  pAllocationRequest->sumItemSize += suballoc.size;
9013  }
9014  else
9015  {
9016  return false;
9017  }
9018  }
9019  ++index1st;
9020  }
9021 
9022  // Check next suballocations for BufferImageGranularity conflicts.
9023  // If conflict exists, we must mark more allocations lost or fail.
9024  if(bufferImageGranularity > 1)
9025  {
9026  while(index1st < suballocations1st.size())
9027  {
9028  const VmaSuballocation& suballoc = suballocations1st[index1st];
9029  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9030  {
9031  if(suballoc.hAllocation != VK_NULL_HANDLE)
9032  {
9033  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9034  if(suballoc.hAllocation->CanBecomeLost() &&
9035  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9036  {
9037  ++pAllocationRequest->itemsToMakeLostCount;
9038  pAllocationRequest->sumItemSize += suballoc.size;
9039  }
9040  else
9041  {
9042  return false;
9043  }
9044  }
9045  }
9046  else
9047  {
9048  // Already on next page.
9049  break;
9050  }
9051  ++index1st;
9052  }
9053  }
9054  }
9055 
9056  // There is enough free space at the end after alignment.
9057  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9058  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9059  {
9060  // Check next suballocations for BufferImageGranularity conflicts.
9061  // If conflict exists, allocation cannot be made here.
9062  if(bufferImageGranularity > 1)
9063  {
9064  for(size_t nextSuballocIndex = index1st;
9065  nextSuballocIndex < suballocations1st.size();
9066  nextSuballocIndex++)
9067  {
9068  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9069  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9070  {
9071  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9072  {
9073  return false;
9074  }
9075  }
9076  else
9077  {
9078  // Already on next page.
9079  break;
9080  }
9081  }
9082  }
9083 
9084  // All tests passed: Success.
9085  pAllocationRequest->offset = resultOffset;
9086  pAllocationRequest->sumFreeSize =
9087  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9088  - resultBaseOffset
9089  - pAllocationRequest->sumItemSize;
9090  // pAllocationRequest->item unused.
9091  return true;
9092  }
9093  }
9094  }
9095 
9096  return false;
9097 }
9098 
9099 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9100  uint32_t currentFrameIndex,
9101  uint32_t frameInUseCount,
9102  VmaAllocationRequest* pAllocationRequest)
9103 {
9104  if(pAllocationRequest->itemsToMakeLostCount == 0)
9105  {
9106  return true;
9107  }
9108 
9109  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9110 
9111  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9112  size_t index1st = m_1stNullItemsBeginCount;
9113  size_t madeLostCount = 0;
9114  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9115  {
9116  VMA_ASSERT(index1st < suballocations1st.size());
9117  VmaSuballocation& suballoc = suballocations1st[index1st];
9118  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9119  {
9120  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9121  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9122  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9123  {
9124  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9125  suballoc.hAllocation = VK_NULL_HANDLE;
9126  m_SumFreeSize += suballoc.size;
9127  ++m_1stNullItemsMiddleCount;
9128  ++madeLostCount;
9129  }
9130  else
9131  {
9132  return false;
9133  }
9134  }
9135  ++index1st;
9136  }
9137 
9138  CleanupAfterFree();
9139  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9140 
9141  return true;
9142 }
9143 
9144 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9145 {
9146  uint32_t lostAllocationCount = 0;
9147 
9148  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9149  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9150  {
9151  VmaSuballocation& suballoc = suballocations1st[i];
9152  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9153  suballoc.hAllocation->CanBecomeLost() &&
9154  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9155  {
9156  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9157  suballoc.hAllocation = VK_NULL_HANDLE;
9158  ++m_1stNullItemsMiddleCount;
9159  m_SumFreeSize += suballoc.size;
9160  ++lostAllocationCount;
9161  }
9162  }
9163 
9164  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9165  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9166  {
9167  VmaSuballocation& suballoc = suballocations2nd[i];
9168  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9169  suballoc.hAllocation->CanBecomeLost() &&
9170  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9171  {
9172  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9173  suballoc.hAllocation = VK_NULL_HANDLE;
9174  ++m_2ndNullItemsCount;
9175  ++lostAllocationCount;
9176  }
9177  }
9178 
9179  if(lostAllocationCount)
9180  {
9181  CleanupAfterFree();
9182  }
9183 
9184  return lostAllocationCount;
9185 }
9186 
9187 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9188 {
9189  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9190  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9191  {
9192  const VmaSuballocation& suballoc = suballocations1st[i];
9193  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9194  {
9195  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9196  {
9197  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9198  return VK_ERROR_VALIDATION_FAILED_EXT;
9199  }
9200  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9201  {
9202  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9203  return VK_ERROR_VALIDATION_FAILED_EXT;
9204  }
9205  }
9206  }
9207 
9208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9209  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9210  {
9211  const VmaSuballocation& suballoc = suballocations2nd[i];
9212  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9213  {
9214  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9215  {
9216  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9217  return VK_ERROR_VALIDATION_FAILED_EXT;
9218  }
9219  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9220  {
9221  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9222  return VK_ERROR_VALIDATION_FAILED_EXT;
9223  }
9224  }
9225  }
9226 
9227  return VK_SUCCESS;
9228 }
9229 
9230 void VmaBlockMetadata_Linear::Alloc(
9231  const VmaAllocationRequest& request,
9232  VmaSuballocationType type,
9233  VkDeviceSize allocSize,
9234  bool upperAddress,
9235  VmaAllocation hAllocation)
9236 {
9237  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9238 
9239  if(upperAddress)
9240  {
9241  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9242  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9243  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9244  suballocations2nd.push_back(newSuballoc);
9245  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9246  }
9247  else
9248  {
9249  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9250 
9251  // First allocation.
9252  if(suballocations1st.empty())
9253  {
9254  suballocations1st.push_back(newSuballoc);
9255  }
9256  else
9257  {
9258  // New allocation at the end of 1st vector.
9259  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9260  {
9261  // Check if it fits before the end of the block.
9262  VMA_ASSERT(request.offset + allocSize <= GetSize());
9263  suballocations1st.push_back(newSuballoc);
9264  }
9265  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9266  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9267  {
9268  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9269 
9270  switch(m_2ndVectorMode)
9271  {
9272  case SECOND_VECTOR_EMPTY:
9273  // First allocation from second part ring buffer.
9274  VMA_ASSERT(suballocations2nd.empty());
9275  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9276  break;
9277  case SECOND_VECTOR_RING_BUFFER:
9278  // 2-part ring buffer is already started.
9279  VMA_ASSERT(!suballocations2nd.empty());
9280  break;
9281  case SECOND_VECTOR_DOUBLE_STACK:
9282  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9283  break;
9284  default:
9285  VMA_ASSERT(0);
9286  }
9287 
9288  suballocations2nd.push_back(newSuballoc);
9289  }
9290  else
9291  {
9292  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9293  }
9294  }
9295  }
9296 
9297  m_SumFreeSize -= newSuballoc.size;
9298 }
9299 
9300 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9301 {
9302  FreeAtOffset(allocation->GetOffset());
9303 }
9304 
9305 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9306 {
9307  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9308  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9309 
9310  if(!suballocations1st.empty())
9311  {
9312  // First allocation: Mark it as next empty at the beginning.
9313  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9314  if(firstSuballoc.offset == offset)
9315  {
9316  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9317  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9318  m_SumFreeSize += firstSuballoc.size;
9319  ++m_1stNullItemsBeginCount;
9320  CleanupAfterFree();
9321  return;
9322  }
9323  }
9324 
9325  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9326  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9327  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9328  {
9329  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9330  if(lastSuballoc.offset == offset)
9331  {
9332  m_SumFreeSize += lastSuballoc.size;
9333  suballocations2nd.pop_back();
9334  CleanupAfterFree();
9335  return;
9336  }
9337  }
9338  // Last allocation in 1st vector.
9339  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9340  {
9341  VmaSuballocation& lastSuballoc = suballocations1st.back();
9342  if(lastSuballoc.offset == offset)
9343  {
9344  m_SumFreeSize += lastSuballoc.size;
9345  suballocations1st.pop_back();
9346  CleanupAfterFree();
9347  return;
9348  }
9349  }
9350 
9351  // Item from the middle of 1st vector.
9352  {
9353  VmaSuballocation refSuballoc;
9354  refSuballoc.offset = offset;
9355  // Rest of members stays uninitialized intentionally for better performance.
9356  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9357  suballocations1st.begin() + m_1stNullItemsBeginCount,
9358  suballocations1st.end(),
9359  refSuballoc);
9360  if(it != suballocations1st.end())
9361  {
9362  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9363  it->hAllocation = VK_NULL_HANDLE;
9364  ++m_1stNullItemsMiddleCount;
9365  m_SumFreeSize += it->size;
9366  CleanupAfterFree();
9367  return;
9368  }
9369  }
9370 
9371  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9372  {
9373  // Item from the middle of 2nd vector.
9374  VmaSuballocation refSuballoc;
9375  refSuballoc.offset = offset;
9376  // Rest of members stays uninitialized intentionally for better performance.
9377  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9378  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9379  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9380  if(it != suballocations2nd.end())
9381  {
9382  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9383  it->hAllocation = VK_NULL_HANDLE;
9384  ++m_2ndNullItemsCount;
9385  m_SumFreeSize += it->size;
9386  CleanupAfterFree();
9387  return;
9388  }
9389  }
9390 
9391  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9392 }
9393 
9394 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9395 {
9396  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9397  const size_t suballocCount = AccessSuballocations1st().size();
9398  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9399 }
9400 
9401 void VmaBlockMetadata_Linear::CleanupAfterFree()
9402 {
9403  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9404  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9405 
9406  if(IsEmpty())
9407  {
9408  suballocations1st.clear();
9409  suballocations2nd.clear();
9410  m_1stNullItemsBeginCount = 0;
9411  m_1stNullItemsMiddleCount = 0;
9412  m_2ndNullItemsCount = 0;
9413  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9414  }
9415  else
9416  {
9417  const size_t suballoc1stCount = suballocations1st.size();
9418  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9419  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9420 
9421  // Find more null items at the beginning of 1st vector.
9422  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9423  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9424  {
9425  ++m_1stNullItemsBeginCount;
9426  --m_1stNullItemsMiddleCount;
9427  }
9428 
9429  // Find more null items at the end of 1st vector.
9430  while(m_1stNullItemsMiddleCount > 0 &&
9431  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9432  {
9433  --m_1stNullItemsMiddleCount;
9434  suballocations1st.pop_back();
9435  }
9436 
9437  // Find more null items at the end of 2nd vector.
9438  while(m_2ndNullItemsCount > 0 &&
9439  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9440  {
9441  --m_2ndNullItemsCount;
9442  suballocations2nd.pop_back();
9443  }
9444 
9445  if(ShouldCompact1st())
9446  {
9447  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9448  size_t srcIndex = m_1stNullItemsBeginCount;
9449  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9450  {
9451  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9452  {
9453  ++srcIndex;
9454  }
9455  if(dstIndex != srcIndex)
9456  {
9457  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9458  }
9459  ++srcIndex;
9460  }
9461  suballocations1st.resize(nonNullItemCount);
9462  m_1stNullItemsBeginCount = 0;
9463  m_1stNullItemsMiddleCount = 0;
9464  }
9465 
9466  // 2nd vector became empty.
9467  if(suballocations2nd.empty())
9468  {
9469  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9470  }
9471 
9472  // 1st vector became empty.
9473  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9474  {
9475  suballocations1st.clear();
9476  m_1stNullItemsBeginCount = 0;
9477 
9478  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9479  {
9480  // Swap 1st with 2nd. Now 2nd is empty.
9481  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9482  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9483  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9484  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9485  {
9486  ++m_1stNullItemsBeginCount;
9487  --m_1stNullItemsMiddleCount;
9488  }
9489  m_2ndNullItemsCount = 0;
9490  m_1stVectorIndex ^= 1;
9491  }
9492  }
9493  }
9494 
9495  VMA_HEAVY_ASSERT(Validate());
9496 }
9497 
9498 
9500 // class VmaBlockMetadata_Buddy
9501 
9502 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9503  VmaBlockMetadata(hAllocator),
9504  m_Root(VMA_NULL),
9505  m_AllocationCount(0),
9506  m_FreeCount(1),
9507  m_SumFreeSize(0)
9508 {
9509  memset(m_FreeList, 0, sizeof(m_FreeList));
9510 }
9511 
9512 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9513 {
9514  DeleteNode(m_Root);
9515 }
9516 
9517 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9518 {
9519  VmaBlockMetadata::Init(size);
9520 
9521  m_UsableSize = VmaPrevPow2(size);
9522  m_SumFreeSize = m_UsableSize;
9523 
9524  // Calculate m_LevelCount.
9525  m_LevelCount = 1;
9526  while(m_LevelCount < MAX_LEVELS &&
9527  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9528  {
9529  ++m_LevelCount;
9530  }
9531 
9532  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9533  rootNode->offset = 0;
9534  rootNode->type = Node::TYPE_FREE;
9535  rootNode->parent = VMA_NULL;
9536  rootNode->buddy = VMA_NULL;
9537 
9538  m_Root = rootNode;
9539  AddToFreeListFront(0, rootNode);
9540 }
9541 
9542 bool VmaBlockMetadata_Buddy::Validate() const
9543 {
9544  // Validate tree.
9545  ValidationContext ctx;
9546  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9547  {
9548  VMA_VALIDATE(false && "ValidateNode failed.");
9549  }
9550  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9551  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9552 
9553  // Validate free node lists.
9554  for(uint32_t level = 0; level < m_LevelCount; ++level)
9555  {
9556  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9557  m_FreeList[level].front->free.prev == VMA_NULL);
9558 
9559  for(Node* node = m_FreeList[level].front;
9560  node != VMA_NULL;
9561  node = node->free.next)
9562  {
9563  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9564 
9565  if(node->free.next == VMA_NULL)
9566  {
9567  VMA_VALIDATE(m_FreeList[level].back == node);
9568  }
9569  else
9570  {
9571  VMA_VALIDATE(node->free.next->free.prev == node);
9572  }
9573  }
9574  }
9575 
9576  // Validate that free lists ar higher levels are empty.
9577  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9578  {
9579  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9580  }
9581 
9582  return true;
9583 }
9584 
9585 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9586 {
9587  for(uint32_t level = 0; level < m_LevelCount; ++level)
9588  {
9589  if(m_FreeList[level].front != VMA_NULL)
9590  {
9591  return LevelToNodeSize(level);
9592  }
9593  }
9594  return 0;
9595 }
9596 
9597 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9598 {
9599  const VkDeviceSize unusableSize = GetUnusableSize();
9600 
9601  outInfo.blockCount = 1;
9602 
9603  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9604  outInfo.usedBytes = outInfo.unusedBytes = 0;
9605 
9606  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9607  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9608  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9609 
9610  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9611 
9612  if(unusableSize > 0)
9613  {
9614  ++outInfo.unusedRangeCount;
9615  outInfo.unusedBytes += unusableSize;
9616  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9617  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9618  }
9619 }
9620 
9621 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9622 {
9623  const VkDeviceSize unusableSize = GetUnusableSize();
9624 
9625  inoutStats.size += GetSize();
9626  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9627  inoutStats.allocationCount += m_AllocationCount;
9628  inoutStats.unusedRangeCount += m_FreeCount;
9629  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9630 
9631  if(unusableSize > 0)
9632  {
9633  ++inoutStats.unusedRangeCount;
9634  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9635  }
9636 }
9637 
9638 #if VMA_STATS_STRING_ENABLED
9639 
9640 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9641 {
9642  // TODO optimize
9643  VmaStatInfo stat;
9644  CalcAllocationStatInfo(stat);
9645 
9646  PrintDetailedMap_Begin(
9647  json,
9648  stat.unusedBytes,
9649  stat.allocationCount,
9650  stat.unusedRangeCount);
9651 
9652  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9653 
9654  const VkDeviceSize unusableSize = GetUnusableSize();
9655  if(unusableSize > 0)
9656  {
9657  PrintDetailedMap_UnusedRange(json,
9658  m_UsableSize, // offset
9659  unusableSize); // size
9660  }
9661 
9662  PrintDetailedMap_End(json);
9663 }
9664 
9665 #endif // #if VMA_STATS_STRING_ENABLED
9666 
9667 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9668  uint32_t currentFrameIndex,
9669  uint32_t frameInUseCount,
9670  VkDeviceSize bufferImageGranularity,
9671  VkDeviceSize allocSize,
9672  VkDeviceSize allocAlignment,
9673  bool upperAddress,
9674  VmaSuballocationType allocType,
9675  bool canMakeOtherLost,
9676  uint32_t strategy,
9677  VmaAllocationRequest* pAllocationRequest)
9678 {
9679  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9680 
9681  // Simple way to respect bufferImageGranularity. May be optimized some day.
9682  // Whenever it might be an OPTIMAL image...
9683  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9684  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9685  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9686  {
9687  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9688  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9689  }
9690 
9691  if(allocSize > m_UsableSize)
9692  {
9693  return false;
9694  }
9695 
9696  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9697  for(uint32_t level = targetLevel + 1; level--; )
9698  {
9699  for(Node* freeNode = m_FreeList[level].front;
9700  freeNode != VMA_NULL;
9701  freeNode = freeNode->free.next)
9702  {
9703  if(freeNode->offset % allocAlignment == 0)
9704  {
9705  pAllocationRequest->offset = freeNode->offset;
9706  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9707  pAllocationRequest->sumItemSize = 0;
9708  pAllocationRequest->itemsToMakeLostCount = 0;
9709  pAllocationRequest->customData = (void*)(uintptr_t)level;
9710  return true;
9711  }
9712  }
9713  }
9714 
9715  return false;
9716 }
9717 
9718 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9719  uint32_t currentFrameIndex,
9720  uint32_t frameInUseCount,
9721  VmaAllocationRequest* pAllocationRequest)
9722 {
9723  /*
9724  Lost allocations are not supported in buddy allocator at the moment.
9725  Support might be added in the future.
9726  */
9727  return pAllocationRequest->itemsToMakeLostCount == 0;
9728 }
9729 
9730 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9731 {
9732  /*
9733  Lost allocations are not supported in buddy allocator at the moment.
9734  Support might be added in the future.
9735  */
9736  return 0;
9737 }
9738 
9739 void VmaBlockMetadata_Buddy::Alloc(
9740  const VmaAllocationRequest& request,
9741  VmaSuballocationType type,
9742  VkDeviceSize allocSize,
9743  bool upperAddress,
9744  VmaAllocation hAllocation)
9745 {
9746  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9747  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9748 
9749  Node* currNode = m_FreeList[currLevel].front;
9750  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9751  while(currNode->offset != request.offset)
9752  {
9753  currNode = currNode->free.next;
9754  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9755  }
9756 
9757  // Go down, splitting free nodes.
9758  while(currLevel < targetLevel)
9759  {
9760  // currNode is already first free node at currLevel.
9761  // Remove it from list of free nodes at this currLevel.
9762  RemoveFromFreeList(currLevel, currNode);
9763 
9764  const uint32_t childrenLevel = currLevel + 1;
9765 
9766  // Create two free sub-nodes.
9767  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9768  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9769 
9770  leftChild->offset = currNode->offset;
9771  leftChild->type = Node::TYPE_FREE;
9772  leftChild->parent = currNode;
9773  leftChild->buddy = rightChild;
9774 
9775  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9776  rightChild->type = Node::TYPE_FREE;
9777  rightChild->parent = currNode;
9778  rightChild->buddy = leftChild;
9779 
9780  // Convert current currNode to split type.
9781  currNode->type = Node::TYPE_SPLIT;
9782  currNode->split.leftChild = leftChild;
9783 
9784  // Add child nodes to free list. Order is important!
9785  AddToFreeListFront(childrenLevel, rightChild);
9786  AddToFreeListFront(childrenLevel, leftChild);
9787 
9788  ++m_FreeCount;
9789  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9790  ++currLevel;
9791  currNode = m_FreeList[currLevel].front;
9792 
9793  /*
9794  We can be sure that currNode, as left child of node previously split,
9795  also fullfills the alignment requirement.
9796  */
9797  }
9798 
9799  // Remove from free list.
9800  VMA_ASSERT(currLevel == targetLevel &&
9801  currNode != VMA_NULL &&
9802  currNode->type == Node::TYPE_FREE);
9803  RemoveFromFreeList(currLevel, currNode);
9804 
9805  // Convert to allocation node.
9806  currNode->type = Node::TYPE_ALLOCATION;
9807  currNode->allocation.alloc = hAllocation;
9808 
9809  ++m_AllocationCount;
9810  --m_FreeCount;
9811  m_SumFreeSize -= allocSize;
9812 }
9813 
9814 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9815 {
9816  if(node->type == Node::TYPE_SPLIT)
9817  {
9818  DeleteNode(node->split.leftChild->buddy);
9819  DeleteNode(node->split.leftChild);
9820  }
9821 
9822  vma_delete(GetAllocationCallbacks(), node);
9823 }
9824 
9825 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9826 {
9827  VMA_VALIDATE(level < m_LevelCount);
9828  VMA_VALIDATE(curr->parent == parent);
9829  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9830  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9831  switch(curr->type)
9832  {
9833  case Node::TYPE_FREE:
9834  // curr->free.prev, next are validated separately.
9835  ctx.calculatedSumFreeSize += levelNodeSize;
9836  ++ctx.calculatedFreeCount;
9837  break;
9838  case Node::TYPE_ALLOCATION:
9839  ++ctx.calculatedAllocationCount;
9840  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9841  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9842  break;
9843  case Node::TYPE_SPLIT:
9844  {
9845  const uint32_t childrenLevel = level + 1;
9846  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9847  const Node* const leftChild = curr->split.leftChild;
9848  VMA_VALIDATE(leftChild != VMA_NULL);
9849  VMA_VALIDATE(leftChild->offset == curr->offset);
9850  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9851  {
9852  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9853  }
9854  const Node* const rightChild = leftChild->buddy;
9855  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9856  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9857  {
9858  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9859  }
9860  }
9861  break;
9862  default:
9863  return false;
9864  }
9865 
9866  return true;
9867 }
9868 
9869 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9870 {
9871  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9872  uint32_t level = 0;
9873  VkDeviceSize currLevelNodeSize = m_UsableSize;
9874  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9875  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9876  {
9877  ++level;
9878  currLevelNodeSize = nextLevelNodeSize;
9879  nextLevelNodeSize = currLevelNodeSize >> 1;
9880  }
9881  return level;
9882 }
9883 
9884 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9885 {
9886  // Find node and level.
9887  Node* node = m_Root;
9888  VkDeviceSize nodeOffset = 0;
9889  uint32_t level = 0;
9890  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9891  while(node->type == Node::TYPE_SPLIT)
9892  {
9893  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9894  if(offset < nodeOffset + nextLevelSize)
9895  {
9896  node = node->split.leftChild;
9897  }
9898  else
9899  {
9900  node = node->split.leftChild->buddy;
9901  nodeOffset += nextLevelSize;
9902  }
9903  ++level;
9904  levelNodeSize = nextLevelSize;
9905  }
9906 
9907  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9908  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9909 
9910  ++m_FreeCount;
9911  --m_AllocationCount;
9912  m_SumFreeSize += alloc->GetSize();
9913 
9914  node->type = Node::TYPE_FREE;
9915 
9916  // Join free nodes if possible.
9917  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9918  {
9919  RemoveFromFreeList(level, node->buddy);
9920  Node* const parent = node->parent;
9921 
9922  vma_delete(GetAllocationCallbacks(), node->buddy);
9923  vma_delete(GetAllocationCallbacks(), node);
9924  parent->type = Node::TYPE_FREE;
9925 
9926  node = parent;
9927  --level;
9928  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9929  --m_FreeCount;
9930  }
9931 
9932  AddToFreeListFront(level, node);
9933 }
9934 
9935 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9936 {
9937  switch(node->type)
9938  {
9939  case Node::TYPE_FREE:
9940  ++outInfo.unusedRangeCount;
9941  outInfo.unusedBytes += levelNodeSize;
9942  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9943  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9944  break;
9945  case Node::TYPE_ALLOCATION:
9946  {
9947  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9948  ++outInfo.allocationCount;
9949  outInfo.usedBytes += allocSize;
9950  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9951  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9952 
9953  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9954  if(unusedRangeSize > 0)
9955  {
9956  ++outInfo.unusedRangeCount;
9957  outInfo.unusedBytes += unusedRangeSize;
9958  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9959  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9960  }
9961  }
9962  break;
9963  case Node::TYPE_SPLIT:
9964  {
9965  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9966  const Node* const leftChild = node->split.leftChild;
9967  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9968  const Node* const rightChild = leftChild->buddy;
9969  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9970  }
9971  break;
9972  default:
9973  VMA_ASSERT(0);
9974  }
9975 }
9976 
9977 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9978 {
9979  VMA_ASSERT(node->type == Node::TYPE_FREE);
9980 
9981  // List is empty.
9982  Node* const frontNode = m_FreeList[level].front;
9983  if(frontNode == VMA_NULL)
9984  {
9985  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9986  node->free.prev = node->free.next = VMA_NULL;
9987  m_FreeList[level].front = m_FreeList[level].back = node;
9988  }
9989  else
9990  {
9991  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9992  node->free.prev = VMA_NULL;
9993  node->free.next = frontNode;
9994  frontNode->free.prev = node;
9995  m_FreeList[level].front = node;
9996  }
9997 }
9998 
9999 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10000 {
10001  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10002 
10003  // It is at the front.
10004  if(node->free.prev == VMA_NULL)
10005  {
10006  VMA_ASSERT(m_FreeList[level].front == node);
10007  m_FreeList[level].front = node->free.next;
10008  }
10009  else
10010  {
10011  Node* const prevFreeNode = node->free.prev;
10012  VMA_ASSERT(prevFreeNode->free.next == node);
10013  prevFreeNode->free.next = node->free.next;
10014  }
10015 
10016  // It is at the back.
10017  if(node->free.next == VMA_NULL)
10018  {
10019  VMA_ASSERT(m_FreeList[level].back == node);
10020  m_FreeList[level].back = node->free.prev;
10021  }
10022  else
10023  {
10024  Node* const nextFreeNode = node->free.next;
10025  VMA_ASSERT(nextFreeNode->free.prev == node);
10026  nextFreeNode->free.prev = node->free.prev;
10027  }
10028 }
10029 
10030 #if VMA_STATS_STRING_ENABLED
10031 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10032 {
10033  switch(node->type)
10034  {
10035  case Node::TYPE_FREE:
10036  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10037  break;
10038  case Node::TYPE_ALLOCATION:
10039  {
10040  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10041  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10042  if(allocSize < levelNodeSize)
10043  {
10044  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10045  }
10046  }
10047  break;
10048  case Node::TYPE_SPLIT:
10049  {
10050  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10051  const Node* const leftChild = node->split.leftChild;
10052  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10053  const Node* const rightChild = leftChild->buddy;
10054  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10055  }
10056  break;
10057  default:
10058  VMA_ASSERT(0);
10059  }
10060 }
10061 #endif // #if VMA_STATS_STRING_ENABLED
10062 
10063 
10065 // class VmaDeviceMemoryBlock
10066 
10067 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10068  m_pMetadata(VMA_NULL),
10069  m_MemoryTypeIndex(UINT32_MAX),
10070  m_Id(0),
10071  m_hMemory(VK_NULL_HANDLE),
10072  m_MapCount(0),
10073  m_pMappedData(VMA_NULL)
10074 {
10075 }
10076 
10077 void VmaDeviceMemoryBlock::Init(
10078  VmaAllocator hAllocator,
10079  uint32_t newMemoryTypeIndex,
10080  VkDeviceMemory newMemory,
10081  VkDeviceSize newSize,
10082  uint32_t id,
10083  uint32_t algorithm)
10084 {
10085  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10086 
10087  m_MemoryTypeIndex = newMemoryTypeIndex;
10088  m_Id = id;
10089  m_hMemory = newMemory;
10090 
10091  switch(algorithm)
10092  {
10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10095  break;
10097  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10098  break;
10099  default:
10100  VMA_ASSERT(0);
10101  // Fall-through.
10102  case 0:
10103  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10104  }
10105  m_pMetadata->Init(newSize);
10106 }
10107 
10108 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10109 {
10110  // This is the most important assert in the entire library.
10111  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10112  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10113 
10114  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10115  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10116  m_hMemory = VK_NULL_HANDLE;
10117 
10118  vma_delete(allocator, m_pMetadata);
10119  m_pMetadata = VMA_NULL;
10120 }
10121 
10122 bool VmaDeviceMemoryBlock::Validate() const
10123 {
10124  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10125  (m_pMetadata->GetSize() != 0));
10126 
10127  return m_pMetadata->Validate();
10128 }
10129 
10130 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10131 {
10132  void* pData = nullptr;
10133  VkResult res = Map(hAllocator, 1, &pData);
10134  if(res != VK_SUCCESS)
10135  {
10136  return res;
10137  }
10138 
10139  res = m_pMetadata->CheckCorruption(pData);
10140 
10141  Unmap(hAllocator, 1);
10142 
10143  return res;
10144 }
10145 
10146 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10147 {
10148  if(count == 0)
10149  {
10150  return VK_SUCCESS;
10151  }
10152 
10153  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10154  if(m_MapCount != 0)
10155  {
10156  m_MapCount += count;
10157  VMA_ASSERT(m_pMappedData != VMA_NULL);
10158  if(ppData != VMA_NULL)
10159  {
10160  *ppData = m_pMappedData;
10161  }
10162  return VK_SUCCESS;
10163  }
10164  else
10165  {
10166  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10167  hAllocator->m_hDevice,
10168  m_hMemory,
10169  0, // offset
10170  VK_WHOLE_SIZE,
10171  0, // flags
10172  &m_pMappedData);
10173  if(result == VK_SUCCESS)
10174  {
10175  if(ppData != VMA_NULL)
10176  {
10177  *ppData = m_pMappedData;
10178  }
10179  m_MapCount = count;
10180  }
10181  return result;
10182  }
10183 }
10184 
10185 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10186 {
10187  if(count == 0)
10188  {
10189  return;
10190  }
10191 
10192  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10193  if(m_MapCount >= count)
10194  {
10195  m_MapCount -= count;
10196  if(m_MapCount == 0)
10197  {
10198  m_pMappedData = VMA_NULL;
10199  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10200  }
10201  }
10202  else
10203  {
10204  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10205  }
10206 }
10207 
10208 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10209 {
10210  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10211  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10212 
10213  void* pData;
10214  VkResult res = Map(hAllocator, 1, &pData);
10215  if(res != VK_SUCCESS)
10216  {
10217  return res;
10218  }
10219 
10220  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10221  VmaWriteMagicValue(pData, allocOffset + allocSize);
10222 
10223  Unmap(hAllocator, 1);
10224 
10225  return VK_SUCCESS;
10226 }
10227 
10228 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10229 {
10230  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10231  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10232 
10233  void* pData;
10234  VkResult res = Map(hAllocator, 1, &pData);
10235  if(res != VK_SUCCESS)
10236  {
10237  return res;
10238  }
10239 
10240  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10241  {
10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10243  }
10244  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10245  {
10246  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10247  }
10248 
10249  Unmap(hAllocator, 1);
10250 
10251  return VK_SUCCESS;
10252 }
10253 
10254 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10255  const VmaAllocator hAllocator,
10256  const VmaAllocation hAllocation,
10257  VkBuffer hBuffer)
10258 {
10259  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10260  hAllocation->GetBlock() == this);
10261  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10262  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10263  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10264  hAllocator->m_hDevice,
10265  hBuffer,
10266  m_hMemory,
10267  hAllocation->GetOffset());
10268 }
10269 
10270 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10271  const VmaAllocator hAllocator,
10272  const VmaAllocation hAllocation,
10273  VkImage hImage)
10274 {
10275  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10276  hAllocation->GetBlock() == this);
10277  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10278  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10279  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10280  hAllocator->m_hDevice,
10281  hImage,
10282  m_hMemory,
10283  hAllocation->GetOffset());
10284 }
10285 
10286 static void InitStatInfo(VmaStatInfo& outInfo)
10287 {
10288  memset(&outInfo, 0, sizeof(outInfo));
10289  outInfo.allocationSizeMin = UINT64_MAX;
10290  outInfo.unusedRangeSizeMin = UINT64_MAX;
10291 }
10292 
10293 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10294 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10295 {
10296  inoutInfo.blockCount += srcInfo.blockCount;
10297  inoutInfo.allocationCount += srcInfo.allocationCount;
10298  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10299  inoutInfo.usedBytes += srcInfo.usedBytes;
10300  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10301  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10302  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10303  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10304  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10305 }
10306 
10307 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10308 {
10309  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10310  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10311  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10312  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10313 }
10314 
10315 VmaPool_T::VmaPool_T(
10316  VmaAllocator hAllocator,
10317  const VmaPoolCreateInfo& createInfo,
10318  VkDeviceSize preferredBlockSize) :
10319  m_BlockVector(
10320  hAllocator,
10321  createInfo.memoryTypeIndex,
10322  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10323  createInfo.minBlockCount,
10324  createInfo.maxBlockCount,
10325  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10326  createInfo.frameInUseCount,
10327  true, // isCustomPool
10328  createInfo.blockSize != 0, // explicitBlockSize
10329  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10330  m_Id(0)
10331 {
10332 }
10333 
10334 VmaPool_T::~VmaPool_T()
10335 {
10336 }
10337 
10338 #if VMA_STATS_STRING_ENABLED
10339 
10340 #endif // #if VMA_STATS_STRING_ENABLED
10341 
10342 VmaBlockVector::VmaBlockVector(
10343  VmaAllocator hAllocator,
10344  uint32_t memoryTypeIndex,
10345  VkDeviceSize preferredBlockSize,
10346  size_t minBlockCount,
10347  size_t maxBlockCount,
10348  VkDeviceSize bufferImageGranularity,
10349  uint32_t frameInUseCount,
10350  bool isCustomPool,
10351  bool explicitBlockSize,
10352  uint32_t algorithm) :
10353  m_hAllocator(hAllocator),
10354  m_MemoryTypeIndex(memoryTypeIndex),
10355  m_PreferredBlockSize(preferredBlockSize),
10356  m_MinBlockCount(minBlockCount),
10357  m_MaxBlockCount(maxBlockCount),
10358  m_BufferImageGranularity(bufferImageGranularity),
10359  m_FrameInUseCount(frameInUseCount),
10360  m_IsCustomPool(isCustomPool),
10361  m_ExplicitBlockSize(explicitBlockSize),
10362  m_Algorithm(algorithm),
10363  m_HasEmptyBlock(false),
10364  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10365  m_pDefragmentator(VMA_NULL),
10366  m_NextBlockId(0)
10367 {
10368 }
10369 
10370 VmaBlockVector::~VmaBlockVector()
10371 {
10372  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10373 
10374  for(size_t i = m_Blocks.size(); i--; )
10375  {
10376  m_Blocks[i]->Destroy(m_hAllocator);
10377  vma_delete(m_hAllocator, m_Blocks[i]);
10378  }
10379 }
10380 
10381 VkResult VmaBlockVector::CreateMinBlocks()
10382 {
10383  for(size_t i = 0; i < m_MinBlockCount; ++i)
10384  {
10385  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10386  if(res != VK_SUCCESS)
10387  {
10388  return res;
10389  }
10390  }
10391  return VK_SUCCESS;
10392 }
10393 
10394 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10395 {
10396  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10397 
10398  const size_t blockCount = m_Blocks.size();
10399 
10400  pStats->size = 0;
10401  pStats->unusedSize = 0;
10402  pStats->allocationCount = 0;
10403  pStats->unusedRangeCount = 0;
10404  pStats->unusedRangeSizeMax = 0;
10405  pStats->blockCount = blockCount;
10406 
10407  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10408  {
10409  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10410  VMA_ASSERT(pBlock);
10411  VMA_HEAVY_ASSERT(pBlock->Validate());
10412  pBlock->m_pMetadata->AddPoolStats(*pStats);
10413  }
10414 }
10415 
10416 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10417 {
10418  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10419  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10420  (VMA_DEBUG_MARGIN > 0) &&
10421  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10422 }
10423 
10424 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10425 
10426 VkResult VmaBlockVector::Allocate(
10427  VmaPool hCurrentPool,
10428  uint32_t currentFrameIndex,
10429  VkDeviceSize size,
10430  VkDeviceSize alignment,
10431  const VmaAllocationCreateInfo& createInfo,
10432  VmaSuballocationType suballocType,
10433  VmaAllocation* pAllocation)
10434 {
10435  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10436  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10437  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10438  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10439  const bool canCreateNewBlock =
10440  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10441  (m_Blocks.size() < m_MaxBlockCount);
10442  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10443 
10444  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10445  // Which in turn is available only when maxBlockCount = 1.
10446  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10447  {
10448  canMakeOtherLost = false;
10449  }
10450 
10451  // Upper address can only be used with linear allocator and within single memory block.
10452  if(isUpperAddress &&
10453  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10454  {
10455  return VK_ERROR_FEATURE_NOT_PRESENT;
10456  }
10457 
10458  // Validate strategy.
10459  switch(strategy)
10460  {
10461  case 0:
10463  break;
10467  break;
10468  default:
10469  return VK_ERROR_FEATURE_NOT_PRESENT;
10470  }
10471 
10472  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10473  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10474  {
10475  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10476  }
10477 
10478  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10479 
10480  /*
10481  Under certain condition, this whole section can be skipped for optimization, so
10482  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10483  e.g. for custom pools with linear algorithm.
10484  */
10485  if(!canMakeOtherLost || canCreateNewBlock)
10486  {
10487  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10488  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10490 
10491  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10492  {
10493  // Use only last block.
10494  if(!m_Blocks.empty())
10495  {
10496  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10497  VMA_ASSERT(pCurrBlock);
10498  VkResult res = AllocateFromBlock(
10499  pCurrBlock,
10500  hCurrentPool,
10501  currentFrameIndex,
10502  size,
10503  alignment,
10504  allocFlagsCopy,
10505  createInfo.pUserData,
10506  suballocType,
10507  strategy,
10508  pAllocation);
10509  if(res == VK_SUCCESS)
10510  {
10511  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10512  return VK_SUCCESS;
10513  }
10514  }
10515  }
10516  else
10517  {
10519  {
10520  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10521  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10522  {
10523  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10524  VMA_ASSERT(pCurrBlock);
10525  VkResult res = AllocateFromBlock(
10526  pCurrBlock,
10527  hCurrentPool,
10528  currentFrameIndex,
10529  size,
10530  alignment,
10531  allocFlagsCopy,
10532  createInfo.pUserData,
10533  suballocType,
10534  strategy,
10535  pAllocation);
10536  if(res == VK_SUCCESS)
10537  {
10538  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10539  return VK_SUCCESS;
10540  }
10541  }
10542  }
10543  else // WORST_FIT, FIRST_FIT
10544  {
10545  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10546  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10547  {
10548  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10549  VMA_ASSERT(pCurrBlock);
10550  VkResult res = AllocateFromBlock(
10551  pCurrBlock,
10552  hCurrentPool,
10553  currentFrameIndex,
10554  size,
10555  alignment,
10556  allocFlagsCopy,
10557  createInfo.pUserData,
10558  suballocType,
10559  strategy,
10560  pAllocation);
10561  if(res == VK_SUCCESS)
10562  {
10563  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10564  return VK_SUCCESS;
10565  }
10566  }
10567  }
10568  }
10569 
10570  // 2. Try to create new block.
10571  if(canCreateNewBlock)
10572  {
10573  // Calculate optimal size for new block.
10574  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10575  uint32_t newBlockSizeShift = 0;
10576  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10577 
10578  if(!m_ExplicitBlockSize)
10579  {
10580  // Allocate 1/8, 1/4, 1/2 as first blocks.
10581  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10582  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10583  {
10584  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10585  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10586  {
10587  newBlockSize = smallerNewBlockSize;
10588  ++newBlockSizeShift;
10589  }
10590  else
10591  {
10592  break;
10593  }
10594  }
10595  }
10596 
10597  size_t newBlockIndex = 0;
10598  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10599  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10600  if(!m_ExplicitBlockSize)
10601  {
10602  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10603  {
10604  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10605  if(smallerNewBlockSize >= size)
10606  {
10607  newBlockSize = smallerNewBlockSize;
10608  ++newBlockSizeShift;
10609  res = CreateBlock(newBlockSize, &newBlockIndex);
10610  }
10611  else
10612  {
10613  break;
10614  }
10615  }
10616  }
10617 
10618  if(res == VK_SUCCESS)
10619  {
10620  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10621  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10622 
10623  res = AllocateFromBlock(
10624  pBlock,
10625  hCurrentPool,
10626  currentFrameIndex,
10627  size,
10628  alignment,
10629  allocFlagsCopy,
10630  createInfo.pUserData,
10631  suballocType,
10632  strategy,
10633  pAllocation);
10634  if(res == VK_SUCCESS)
10635  {
10636  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10637  return VK_SUCCESS;
10638  }
10639  else
10640  {
10641  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10642  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10643  }
10644  }
10645  }
10646  }
10647 
10648  // 3. Try to allocate from existing blocks with making other allocations lost.
10649  if(canMakeOtherLost)
10650  {
10651  uint32_t tryIndex = 0;
10652  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10653  {
10654  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10655  VmaAllocationRequest bestRequest = {};
10656  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10657 
10658  // 1. Search existing allocations.
10660  {
10661  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10662  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10663  {
10664  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10665  VMA_ASSERT(pCurrBlock);
10666  VmaAllocationRequest currRequest = {};
10667  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10668  currentFrameIndex,
10669  m_FrameInUseCount,
10670  m_BufferImageGranularity,
10671  size,
10672  alignment,
10673  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10674  suballocType,
10675  canMakeOtherLost,
10676  strategy,
10677  &currRequest))
10678  {
10679  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10680  if(pBestRequestBlock == VMA_NULL ||
10681  currRequestCost < bestRequestCost)
10682  {
10683  pBestRequestBlock = pCurrBlock;
10684  bestRequest = currRequest;
10685  bestRequestCost = currRequestCost;
10686 
10687  if(bestRequestCost == 0)
10688  {
10689  break;
10690  }
10691  }
10692  }
10693  }
10694  }
10695  else // WORST_FIT, FIRST_FIT
10696  {
10697  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10698  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10699  {
10700  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10701  VMA_ASSERT(pCurrBlock);
10702  VmaAllocationRequest currRequest = {};
10703  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10704  currentFrameIndex,
10705  m_FrameInUseCount,
10706  m_BufferImageGranularity,
10707  size,
10708  alignment,
10709  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10710  suballocType,
10711  canMakeOtherLost,
10712  strategy,
10713  &currRequest))
10714  {
10715  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10716  if(pBestRequestBlock == VMA_NULL ||
10717  currRequestCost < bestRequestCost ||
10719  {
10720  pBestRequestBlock = pCurrBlock;
10721  bestRequest = currRequest;
10722  bestRequestCost = currRequestCost;
10723 
10724  if(bestRequestCost == 0 ||
10726  {
10727  break;
10728  }
10729  }
10730  }
10731  }
10732  }
10733 
10734  if(pBestRequestBlock != VMA_NULL)
10735  {
10736  if(mapped)
10737  {
10738  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10739  if(res != VK_SUCCESS)
10740  {
10741  return res;
10742  }
10743  }
10744 
10745  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10746  currentFrameIndex,
10747  m_FrameInUseCount,
10748  &bestRequest))
10749  {
10750  // We no longer have an empty Allocation.
10751  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10752  {
10753  m_HasEmptyBlock = false;
10754  }
10755  // Allocate from this pBlock.
10756  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10757  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10758  (*pAllocation)->InitBlockAllocation(
10759  hCurrentPool,
10760  pBestRequestBlock,
10761  bestRequest.offset,
10762  alignment,
10763  size,
10764  suballocType,
10765  mapped,
10766  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10767  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10768  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10769  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10770  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10771  {
10772  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10773  }
10774  if(IsCorruptionDetectionEnabled())
10775  {
10776  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10777  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10778  }
10779  return VK_SUCCESS;
10780  }
10781  // else: Some allocations must have been touched while we are here. Next try.
10782  }
10783  else
10784  {
10785  // Could not find place in any of the blocks - break outer loop.
10786  break;
10787  }
10788  }
10789  /* Maximum number of tries exceeded - a very unlike event when many other
10790  threads are simultaneously touching allocations making it impossible to make
10791  lost at the same time as we try to allocate. */
10792  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10793  {
10794  return VK_ERROR_TOO_MANY_OBJECTS;
10795  }
10796  }
10797 
10798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10799 }
10800 
10801 void VmaBlockVector::Free(
10802  VmaAllocation hAllocation)
10803 {
10804  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10805 
10806  // Scope for lock.
10807  {
10808  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10809 
10810  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10811 
10812  if(IsCorruptionDetectionEnabled())
10813  {
10814  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10815  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10816  }
10817 
10818  if(hAllocation->IsPersistentMap())
10819  {
10820  pBlock->Unmap(m_hAllocator, 1);
10821  }
10822 
10823  pBlock->m_pMetadata->Free(hAllocation);
10824  VMA_HEAVY_ASSERT(pBlock->Validate());
10825 
10826  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10827 
10828  // pBlock became empty after this deallocation.
10829  if(pBlock->m_pMetadata->IsEmpty())
10830  {
10831  // Already has empty Allocation. We don't want to have two, so delete this one.
10832  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10833  {
10834  pBlockToDelete = pBlock;
10835  Remove(pBlock);
10836  }
10837  // We now have first empty block.
10838  else
10839  {
10840  m_HasEmptyBlock = true;
10841  }
10842  }
10843  // pBlock didn't become empty, but we have another empty block - find and free that one.
10844  // (This is optional, heuristics.)
10845  else if(m_HasEmptyBlock)
10846  {
10847  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10848  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10849  {
10850  pBlockToDelete = pLastBlock;
10851  m_Blocks.pop_back();
10852  m_HasEmptyBlock = false;
10853  }
10854  }
10855 
10856  IncrementallySortBlocks();
10857  }
10858 
10859  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10860  // lock, for performance reason.
10861  if(pBlockToDelete != VMA_NULL)
10862  {
10863  VMA_DEBUG_LOG(" Deleted empty allocation");
10864  pBlockToDelete->Destroy(m_hAllocator);
10865  vma_delete(m_hAllocator, pBlockToDelete);
10866  }
10867 }
10868 
10869 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10870 {
10871  VkDeviceSize result = 0;
10872  for(size_t i = m_Blocks.size(); i--; )
10873  {
10874  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10875  if(result >= m_PreferredBlockSize)
10876  {
10877  break;
10878  }
10879  }
10880  return result;
10881 }
10882 
10883 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10884 {
10885  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10886  {
10887  if(m_Blocks[blockIndex] == pBlock)
10888  {
10889  VmaVectorRemove(m_Blocks, blockIndex);
10890  return;
10891  }
10892  }
10893  VMA_ASSERT(0);
10894 }
10895 
10896 void VmaBlockVector::IncrementallySortBlocks()
10897 {
10898  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10899  {
10900  // Bubble sort only until first swap.
10901  for(size_t i = 1; i < m_Blocks.size(); ++i)
10902  {
10903  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10904  {
10905  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10906  return;
10907  }
10908  }
10909  }
10910 }
10911 
10912 VkResult VmaBlockVector::AllocateFromBlock(
10913  VmaDeviceMemoryBlock* pBlock,
10914  VmaPool hCurrentPool,
10915  uint32_t currentFrameIndex,
10916  VkDeviceSize size,
10917  VkDeviceSize alignment,
10918  VmaAllocationCreateFlags allocFlags,
10919  void* pUserData,
10920  VmaSuballocationType suballocType,
10921  uint32_t strategy,
10922  VmaAllocation* pAllocation)
10923 {
10924  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10925  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10926  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10927  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10928 
10929  VmaAllocationRequest currRequest = {};
10930  if(pBlock->m_pMetadata->CreateAllocationRequest(
10931  currentFrameIndex,
10932  m_FrameInUseCount,
10933  m_BufferImageGranularity,
10934  size,
10935  alignment,
10936  isUpperAddress,
10937  suballocType,
10938  false, // canMakeOtherLost
10939  strategy,
10940  &currRequest))
10941  {
10942  // Allocate from pCurrBlock.
10943  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10944 
10945  if(mapped)
10946  {
10947  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10948  if(res != VK_SUCCESS)
10949  {
10950  return res;
10951  }
10952  }
10953 
10954  // We no longer have an empty Allocation.
10955  if(pBlock->m_pMetadata->IsEmpty())
10956  {
10957  m_HasEmptyBlock = false;
10958  }
10959 
10960  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10961  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10962  (*pAllocation)->InitBlockAllocation(
10963  hCurrentPool,
10964  pBlock,
10965  currRequest.offset,
10966  alignment,
10967  size,
10968  suballocType,
10969  mapped,
10970  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10971  VMA_HEAVY_ASSERT(pBlock->Validate());
10972  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10973  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10974  {
10975  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10976  }
10977  if(IsCorruptionDetectionEnabled())
10978  {
10979  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10980  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10981  }
10982  return VK_SUCCESS;
10983  }
10984  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10985 }
10986 
10987 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10988 {
10989  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10990  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10991  allocInfo.allocationSize = blockSize;
10992  VkDeviceMemory mem = VK_NULL_HANDLE;
10993  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10994  if(res < 0)
10995  {
10996  return res;
10997  }
10998 
10999  // New VkDeviceMemory successfully created.
11000 
11001  // Create new Allocation for it.
11002  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11003  pBlock->Init(
11004  m_hAllocator,
11005  m_MemoryTypeIndex,
11006  mem,
11007  allocInfo.allocationSize,
11008  m_NextBlockId++,
11009  m_Algorithm);
11010 
11011  m_Blocks.push_back(pBlock);
11012  if(pNewBlockIndex != VMA_NULL)
11013  {
11014  *pNewBlockIndex = m_Blocks.size() - 1;
11015  }
11016 
11017  return VK_SUCCESS;
11018 }
11019 
11020 #if VMA_STATS_STRING_ENABLED
11021 
11022 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11023 {
11024  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11025 
11026  json.BeginObject();
11027 
11028  if(m_IsCustomPool)
11029  {
11030  json.WriteString("MemoryTypeIndex");
11031  json.WriteNumber(m_MemoryTypeIndex);
11032 
11033  json.WriteString("BlockSize");
11034  json.WriteNumber(m_PreferredBlockSize);
11035 
11036  json.WriteString("BlockCount");
11037  json.BeginObject(true);
11038  if(m_MinBlockCount > 0)
11039  {
11040  json.WriteString("Min");
11041  json.WriteNumber((uint64_t)m_MinBlockCount);
11042  }
11043  if(m_MaxBlockCount < SIZE_MAX)
11044  {
11045  json.WriteString("Max");
11046  json.WriteNumber((uint64_t)m_MaxBlockCount);
11047  }
11048  json.WriteString("Cur");
11049  json.WriteNumber((uint64_t)m_Blocks.size());
11050  json.EndObject();
11051 
11052  if(m_FrameInUseCount > 0)
11053  {
11054  json.WriteString("FrameInUseCount");
11055  json.WriteNumber(m_FrameInUseCount);
11056  }
11057 
11058  if(m_Algorithm != 0)
11059  {
11060  json.WriteString("Algorithm");
11061  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11062  }
11063  }
11064  else
11065  {
11066  json.WriteString("PreferredBlockSize");
11067  json.WriteNumber(m_PreferredBlockSize);
11068  }
11069 
11070  json.WriteString("Blocks");
11071  json.BeginObject();
11072  for(size_t i = 0; i < m_Blocks.size(); ++i)
11073  {
11074  json.BeginString();
11075  json.ContinueString(m_Blocks[i]->GetId());
11076  json.EndString();
11077 
11078  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11079  }
11080  json.EndObject();
11081 
11082  json.EndObject();
11083 }
11084 
11085 #endif // #if VMA_STATS_STRING_ENABLED
11086 
11087 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11088  VmaAllocator hAllocator,
11089  uint32_t currentFrameIndex)
11090 {
11091  if(m_pDefragmentator == VMA_NULL)
11092  {
11093  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11094  hAllocator,
11095  this,
11096  currentFrameIndex);
11097  }
11098 
11099  return m_pDefragmentator;
11100 }
11101 
11102 VkResult VmaBlockVector::Defragment(
11103  VmaDefragmentationStats* pDefragmentationStats,
11104  VkDeviceSize& maxBytesToMove,
11105  uint32_t& maxAllocationsToMove)
11106 {
11107  if(m_pDefragmentator == VMA_NULL)
11108  {
11109  return VK_SUCCESS;
11110  }
11111 
11112  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11113 
11114  // Defragment.
11115  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11116 
11117  // Accumulate statistics.
11118  if(pDefragmentationStats != VMA_NULL)
11119  {
11120  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11121  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11122  pDefragmentationStats->bytesMoved += bytesMoved;
11123  pDefragmentationStats->allocationsMoved += allocationsMoved;
11124  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11125  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11126  maxBytesToMove -= bytesMoved;
11127  maxAllocationsToMove -= allocationsMoved;
11128  }
11129 
11130  // Free empty blocks.
11131  m_HasEmptyBlock = false;
11132  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11133  {
11134  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11135  if(pBlock->m_pMetadata->IsEmpty())
11136  {
11137  if(m_Blocks.size() > m_MinBlockCount)
11138  {
11139  if(pDefragmentationStats != VMA_NULL)
11140  {
11141  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11142  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11143  }
11144 
11145  VmaVectorRemove(m_Blocks, blockIndex);
11146  pBlock->Destroy(m_hAllocator);
11147  vma_delete(m_hAllocator, pBlock);
11148  }
11149  else
11150  {
11151  m_HasEmptyBlock = true;
11152  }
11153  }
11154  }
11155 
11156  return result;
11157 }
11158 
11159 void VmaBlockVector::DestroyDefragmentator()
11160 {
11161  if(m_pDefragmentator != VMA_NULL)
11162  {
11163  vma_delete(m_hAllocator, m_pDefragmentator);
11164  m_pDefragmentator = VMA_NULL;
11165  }
11166 }
11167 
11168 void VmaBlockVector::MakePoolAllocationsLost(
11169  uint32_t currentFrameIndex,
11170  size_t* pLostAllocationCount)
11171 {
11172  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11173  size_t lostAllocationCount = 0;
11174  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11175  {
11176  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11177  VMA_ASSERT(pBlock);
11178  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11179  }
11180  if(pLostAllocationCount != VMA_NULL)
11181  {
11182  *pLostAllocationCount = lostAllocationCount;
11183  }
11184 }
11185 
11186 VkResult VmaBlockVector::CheckCorruption()
11187 {
11188  if(!IsCorruptionDetectionEnabled())
11189  {
11190  return VK_ERROR_FEATURE_NOT_PRESENT;
11191  }
11192 
11193  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11194  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11195  {
11196  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11197  VMA_ASSERT(pBlock);
11198  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11199  if(res != VK_SUCCESS)
11200  {
11201  return res;
11202  }
11203  }
11204  return VK_SUCCESS;
11205 }
11206 
11207 void VmaBlockVector::AddStats(VmaStats* pStats)
11208 {
11209  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11210  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11211 
11212  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11213 
11214  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11215  {
11216  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11217  VMA_ASSERT(pBlock);
11218  VMA_HEAVY_ASSERT(pBlock->Validate());
11219  VmaStatInfo allocationStatInfo;
11220  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11221  VmaAddStatInfo(pStats->total, allocationStatInfo);
11222  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11223  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11224  }
11225 }
11226 
11228 // VmaDefragmentator members definition
11229 
11230 VmaDefragmentator::VmaDefragmentator(
11231  VmaAllocator hAllocator,
11232  VmaBlockVector* pBlockVector,
11233  uint32_t currentFrameIndex) :
11234  m_hAllocator(hAllocator),
11235  m_pBlockVector(pBlockVector),
11236  m_CurrentFrameIndex(currentFrameIndex),
11237  m_BytesMoved(0),
11238  m_AllocationsMoved(0),
11239  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11240  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11241 {
11242  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11243 }
11244 
11245 VmaDefragmentator::~VmaDefragmentator()
11246 {
11247  for(size_t i = m_Blocks.size(); i--; )
11248  {
11249  vma_delete(m_hAllocator, m_Blocks[i]);
11250  }
11251 }
11252 
11253 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11254 {
11255  AllocationInfo allocInfo;
11256  allocInfo.m_hAllocation = hAlloc;
11257  allocInfo.m_pChanged = pChanged;
11258  m_Allocations.push_back(allocInfo);
11259 }
11260 
11261 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11262 {
11263  // It has already been mapped for defragmentation.
11264  if(m_pMappedDataForDefragmentation)
11265  {
11266  *ppMappedData = m_pMappedDataForDefragmentation;
11267  return VK_SUCCESS;
11268  }
11269 
11270  // It is originally mapped.
11271  if(m_pBlock->GetMappedData())
11272  {
11273  *ppMappedData = m_pBlock->GetMappedData();
11274  return VK_SUCCESS;
11275  }
11276 
11277  // Map on first usage.
11278  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11279  *ppMappedData = m_pMappedDataForDefragmentation;
11280  return res;
11281 }
11282 
11283 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11284 {
11285  if(m_pMappedDataForDefragmentation != VMA_NULL)
11286  {
11287  m_pBlock->Unmap(hAllocator, 1);
11288  }
11289 }
11290 
11291 VkResult VmaDefragmentator::DefragmentRound(
11292  VkDeviceSize maxBytesToMove,
11293  uint32_t maxAllocationsToMove)
11294 {
11295  if(m_Blocks.empty())
11296  {
11297  return VK_SUCCESS;
11298  }
11299 
11300  size_t srcBlockIndex = m_Blocks.size() - 1;
11301  size_t srcAllocIndex = SIZE_MAX;
11302  for(;;)
11303  {
11304  // 1. Find next allocation to move.
11305  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11306  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11307  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11308  {
11309  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11310  {
11311  // Finished: no more allocations to process.
11312  if(srcBlockIndex == 0)
11313  {
11314  return VK_SUCCESS;
11315  }
11316  else
11317  {
11318  --srcBlockIndex;
11319  srcAllocIndex = SIZE_MAX;
11320  }
11321  }
11322  else
11323  {
11324  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11325  }
11326  }
11327 
11328  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11329  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11330 
11331  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11332  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11333  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11334  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11335 
11336  // 2. Try to find new place for this allocation in preceding or current block.
11337  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11338  {
11339  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11340  VmaAllocationRequest dstAllocRequest;
11341  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11342  m_CurrentFrameIndex,
11343  m_pBlockVector->GetFrameInUseCount(),
11344  m_pBlockVector->GetBufferImageGranularity(),
11345  size,
11346  alignment,
11347  false, // upperAddress
11348  suballocType,
11349  false, // canMakeOtherLost
11351  &dstAllocRequest) &&
11352  MoveMakesSense(
11353  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11354  {
11355  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11356 
11357  // Reached limit on number of allocations or bytes to move.
11358  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11359  (m_BytesMoved + size > maxBytesToMove))
11360  {
11361  return VK_INCOMPLETE;
11362  }
11363 
11364  void* pDstMappedData = VMA_NULL;
11365  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11366  if(res != VK_SUCCESS)
11367  {
11368  return res;
11369  }
11370 
11371  void* pSrcMappedData = VMA_NULL;
11372  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11373  if(res != VK_SUCCESS)
11374  {
11375  return res;
11376  }
11377 
11378  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11379  memcpy(
11380  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11381  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11382  static_cast<size_t>(size));
11383 
11384  if(VMA_DEBUG_MARGIN > 0)
11385  {
11386  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11387  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11388  }
11389 
11390  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11391  dstAllocRequest,
11392  suballocType,
11393  size,
11394  false, // upperAddress
11395  allocInfo.m_hAllocation);
11396  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11397 
11398  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11399 
11400  if(allocInfo.m_pChanged != VMA_NULL)
11401  {
11402  *allocInfo.m_pChanged = VK_TRUE;
11403  }
11404 
11405  ++m_AllocationsMoved;
11406  m_BytesMoved += size;
11407 
11408  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11409 
11410  break;
11411  }
11412  }
11413 
11414  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11415 
11416  if(srcAllocIndex > 0)
11417  {
11418  --srcAllocIndex;
11419  }
11420  else
11421  {
11422  if(srcBlockIndex > 0)
11423  {
11424  --srcBlockIndex;
11425  srcAllocIndex = SIZE_MAX;
11426  }
11427  else
11428  {
11429  return VK_SUCCESS;
11430  }
11431  }
11432  }
11433 }
11434 
11435 VkResult VmaDefragmentator::Defragment(
11436  VkDeviceSize maxBytesToMove,
11437  uint32_t maxAllocationsToMove)
11438 {
11439  if(m_Allocations.empty())
11440  {
11441  return VK_SUCCESS;
11442  }
11443 
11444  // Create block info for each block.
11445  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11446  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11447  {
11448  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11449  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11450  m_Blocks.push_back(pBlockInfo);
11451  }
11452 
11453  // Sort them by m_pBlock pointer value.
11454  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11455 
11456  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11457  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11458  {
11459  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11460  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11461  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11462  {
11463  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11464  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11465  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11466  {
11467  (*it)->m_Allocations.push_back(allocInfo);
11468  }
11469  else
11470  {
11471  VMA_ASSERT(0);
11472  }
11473  }
11474  }
11475  m_Allocations.clear();
11476 
11477  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11478  {
11479  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11480  pBlockInfo->CalcHasNonMovableAllocations();
11481  pBlockInfo->SortAllocationsBySizeDescecnding();
11482  }
11483 
11484  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11485  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11486 
11487  // Execute defragmentation rounds (the main part).
11488  VkResult result = VK_SUCCESS;
11489  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11490  {
11491  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11492  }
11493 
11494  // Unmap blocks that were mapped for defragmentation.
11495  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11496  {
11497  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11498  }
11499 
11500  return result;
11501 }
11502 
11503 bool VmaDefragmentator::MoveMakesSense(
11504  size_t dstBlockIndex, VkDeviceSize dstOffset,
11505  size_t srcBlockIndex, VkDeviceSize srcOffset)
11506 {
11507  if(dstBlockIndex < srcBlockIndex)
11508  {
11509  return true;
11510  }
11511  if(dstBlockIndex > srcBlockIndex)
11512  {
11513  return false;
11514  }
11515  if(dstOffset < srcOffset)
11516  {
11517  return true;
11518  }
11519  return false;
11520 }
11521 
11523 // VmaRecorder
11524 
11525 #if VMA_RECORDING_ENABLED
11526 
11527 VmaRecorder::VmaRecorder() :
11528  m_UseMutex(true),
11529  m_Flags(0),
11530  m_File(VMA_NULL),
11531  m_Freq(INT64_MAX),
11532  m_StartCounter(INT64_MAX)
11533 {
11534 }
11535 
11536 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11537 {
11538  m_UseMutex = useMutex;
11539  m_Flags = settings.flags;
11540 
11541  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11542  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11543 
11544  // Open file for writing.
11545  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11546  if(err != 0)
11547  {
11548  return VK_ERROR_INITIALIZATION_FAILED;
11549  }
11550 
11551  // Write header.
11552  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11553  fprintf(m_File, "%s\n", "1,4");
11554 
11555  return VK_SUCCESS;
11556 }
11557 
11558 VmaRecorder::~VmaRecorder()
11559 {
11560  if(m_File != VMA_NULL)
11561  {
11562  fclose(m_File);
11563  }
11564 }
11565 
11566 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11567 {
11568  CallParams callParams;
11569  GetBasicParams(callParams);
11570 
11571  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11572  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11573  Flush();
11574 }
11575 
11576 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11577 {
11578  CallParams callParams;
11579  GetBasicParams(callParams);
11580 
11581  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11582  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11583  Flush();
11584 }
11585 
11586 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11587 {
11588  CallParams callParams;
11589  GetBasicParams(callParams);
11590 
11591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11592  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11593  createInfo.memoryTypeIndex,
11594  createInfo.flags,
11595  createInfo.blockSize,
11596  (uint64_t)createInfo.minBlockCount,
11597  (uint64_t)createInfo.maxBlockCount,
11598  createInfo.frameInUseCount,
11599  pool);
11600  Flush();
11601 }
11602 
11603 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11604 {
11605  CallParams callParams;
11606  GetBasicParams(callParams);
11607 
11608  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11609  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11610  pool);
11611  Flush();
11612 }
11613 
11614 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11615  const VkMemoryRequirements& vkMemReq,
11616  const VmaAllocationCreateInfo& createInfo,
11617  VmaAllocation allocation)
11618 {
11619  CallParams callParams;
11620  GetBasicParams(callParams);
11621 
11622  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11623  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11624  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11625  vkMemReq.size,
11626  vkMemReq.alignment,
11627  vkMemReq.memoryTypeBits,
11628  createInfo.flags,
11629  createInfo.usage,
11630  createInfo.requiredFlags,
11631  createInfo.preferredFlags,
11632  createInfo.memoryTypeBits,
11633  createInfo.pool,
11634  allocation,
11635  userDataStr.GetString());
11636  Flush();
11637 }
11638 
11639 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11640  const VkMemoryRequirements& vkMemReq,
11641  bool requiresDedicatedAllocation,
11642  bool prefersDedicatedAllocation,
11643  const VmaAllocationCreateInfo& createInfo,
11644  VmaAllocation allocation)
11645 {
11646  CallParams callParams;
11647  GetBasicParams(callParams);
11648 
11649  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11650  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11651  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11652  vkMemReq.size,
11653  vkMemReq.alignment,
11654  vkMemReq.memoryTypeBits,
11655  requiresDedicatedAllocation ? 1 : 0,
11656  prefersDedicatedAllocation ? 1 : 0,
11657  createInfo.flags,
11658  createInfo.usage,
11659  createInfo.requiredFlags,
11660  createInfo.preferredFlags,
11661  createInfo.memoryTypeBits,
11662  createInfo.pool,
11663  allocation,
11664  userDataStr.GetString());
11665  Flush();
11666 }
11667 
11668 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11669  const VkMemoryRequirements& vkMemReq,
11670  bool requiresDedicatedAllocation,
11671  bool prefersDedicatedAllocation,
11672  const VmaAllocationCreateInfo& createInfo,
11673  VmaAllocation allocation)
11674 {
11675  CallParams callParams;
11676  GetBasicParams(callParams);
11677 
11678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11679  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11680  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11681  vkMemReq.size,
11682  vkMemReq.alignment,
11683  vkMemReq.memoryTypeBits,
11684  requiresDedicatedAllocation ? 1 : 0,
11685  prefersDedicatedAllocation ? 1 : 0,
11686  createInfo.flags,
11687  createInfo.usage,
11688  createInfo.requiredFlags,
11689  createInfo.preferredFlags,
11690  createInfo.memoryTypeBits,
11691  createInfo.pool,
11692  allocation,
11693  userDataStr.GetString());
11694  Flush();
11695 }
11696 
11697 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11698  VmaAllocation allocation)
11699 {
11700  CallParams callParams;
11701  GetBasicParams(callParams);
11702 
11703  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11704  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11705  allocation);
11706  Flush();
11707 }
11708 
11709 void VmaRecorder::RecordResizeAllocation(
11710  uint32_t frameIndex,
11711  VmaAllocation allocation,
11712  VkDeviceSize newSize)
11713 {
11714  CallParams callParams;
11715  GetBasicParams(callParams);
11716 
11717  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11718  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11719  allocation, newSize);
11720  Flush();
11721 }
11722 
11723 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11724  VmaAllocation allocation,
11725  const void* pUserData)
11726 {
11727  CallParams callParams;
11728  GetBasicParams(callParams);
11729 
11730  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11731  UserDataString userDataStr(
11732  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11733  pUserData);
11734  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11735  allocation,
11736  userDataStr.GetString());
11737  Flush();
11738 }
11739 
11740 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11741  VmaAllocation allocation)
11742 {
11743  CallParams callParams;
11744  GetBasicParams(callParams);
11745 
11746  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11747  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11748  allocation);
11749  Flush();
11750 }
11751 
11752 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11753  VmaAllocation allocation)
11754 {
11755  CallParams callParams;
11756  GetBasicParams(callParams);
11757 
11758  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11759  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11760  allocation);
11761  Flush();
11762 }
11763 
11764 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11765  VmaAllocation allocation)
11766 {
11767  CallParams callParams;
11768  GetBasicParams(callParams);
11769 
11770  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11771  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11772  allocation);
11773  Flush();
11774 }
11775 
11776 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11777  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11778 {
11779  CallParams callParams;
11780  GetBasicParams(callParams);
11781 
11782  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11783  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11784  allocation,
11785  offset,
11786  size);
11787  Flush();
11788 }
11789 
11790 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11791  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11792 {
11793  CallParams callParams;
11794  GetBasicParams(callParams);
11795 
11796  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11797  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11798  allocation,
11799  offset,
11800  size);
11801  Flush();
11802 }
11803 
11804 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11805  const VkBufferCreateInfo& bufCreateInfo,
11806  const VmaAllocationCreateInfo& allocCreateInfo,
11807  VmaAllocation allocation)
11808 {
11809  CallParams callParams;
11810  GetBasicParams(callParams);
11811 
11812  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11813  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11814  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11815  bufCreateInfo.flags,
11816  bufCreateInfo.size,
11817  bufCreateInfo.usage,
11818  bufCreateInfo.sharingMode,
11819  allocCreateInfo.flags,
11820  allocCreateInfo.usage,
11821  allocCreateInfo.requiredFlags,
11822  allocCreateInfo.preferredFlags,
11823  allocCreateInfo.memoryTypeBits,
11824  allocCreateInfo.pool,
11825  allocation,
11826  userDataStr.GetString());
11827  Flush();
11828 }
11829 
11830 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11831  const VkImageCreateInfo& imageCreateInfo,
11832  const VmaAllocationCreateInfo& allocCreateInfo,
11833  VmaAllocation allocation)
11834 {
11835  CallParams callParams;
11836  GetBasicParams(callParams);
11837 
11838  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11839  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11840  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11841  imageCreateInfo.flags,
11842  imageCreateInfo.imageType,
11843  imageCreateInfo.format,
11844  imageCreateInfo.extent.width,
11845  imageCreateInfo.extent.height,
11846  imageCreateInfo.extent.depth,
11847  imageCreateInfo.mipLevels,
11848  imageCreateInfo.arrayLayers,
11849  imageCreateInfo.samples,
11850  imageCreateInfo.tiling,
11851  imageCreateInfo.usage,
11852  imageCreateInfo.sharingMode,
11853  imageCreateInfo.initialLayout,
11854  allocCreateInfo.flags,
11855  allocCreateInfo.usage,
11856  allocCreateInfo.requiredFlags,
11857  allocCreateInfo.preferredFlags,
11858  allocCreateInfo.memoryTypeBits,
11859  allocCreateInfo.pool,
11860  allocation,
11861  userDataStr.GetString());
11862  Flush();
11863 }
11864 
11865 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11866  VmaAllocation allocation)
11867 {
11868  CallParams callParams;
11869  GetBasicParams(callParams);
11870 
11871  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11872  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11873  allocation);
11874  Flush();
11875 }
11876 
11877 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11878  VmaAllocation allocation)
11879 {
11880  CallParams callParams;
11881  GetBasicParams(callParams);
11882 
11883  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11884  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11885  allocation);
11886  Flush();
11887 }
11888 
11889 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11890  VmaAllocation allocation)
11891 {
11892  CallParams callParams;
11893  GetBasicParams(callParams);
11894 
11895  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11896  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11897  allocation);
11898  Flush();
11899 }
11900 
11901 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11902  VmaAllocation allocation)
11903 {
11904  CallParams callParams;
11905  GetBasicParams(callParams);
11906 
11907  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11908  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11909  allocation);
11910  Flush();
11911 }
11912 
11913 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11914  VmaPool pool)
11915 {
11916  CallParams callParams;
11917  GetBasicParams(callParams);
11918 
11919  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11920  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11921  pool);
11922  Flush();
11923 }
11924 
11925 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11926 {
11927  if(pUserData != VMA_NULL)
11928  {
11929  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11930  {
11931  m_Str = (const char*)pUserData;
11932  }
11933  else
11934  {
11935  sprintf_s(m_PtrStr, "%p", pUserData);
11936  m_Str = m_PtrStr;
11937  }
11938  }
11939  else
11940  {
11941  m_Str = "";
11942  }
11943 }
11944 
11945 void VmaRecorder::WriteConfiguration(
11946  const VkPhysicalDeviceProperties& devProps,
11947  const VkPhysicalDeviceMemoryProperties& memProps,
11948  bool dedicatedAllocationExtensionEnabled)
11949 {
11950  fprintf(m_File, "Config,Begin\n");
11951 
11952  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11953  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11954  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11955  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11956  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11957  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11958 
11959  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11960  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11961  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11962 
11963  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11964  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11965  {
11966  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11967  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11968  }
11969  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11970  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11971  {
11972  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11973  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11974  }
11975 
11976  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11977 
11978  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11979  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11980  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11981  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11982  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11983  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11984  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11985  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11986  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11987 
11988  fprintf(m_File, "Config,End\n");
11989 }
11990 
11991 void VmaRecorder::GetBasicParams(CallParams& outParams)
11992 {
11993  outParams.threadId = GetCurrentThreadId();
11994 
11995  LARGE_INTEGER counter;
11996  QueryPerformanceCounter(&counter);
11997  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11998 }
11999 
12000 void VmaRecorder::Flush()
12001 {
12002  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
12003  {
12004  fflush(m_File);
12005  }
12006 }
12007 
12008 #endif // #if VMA_RECORDING_ENABLED
12009 
12011 // VmaAllocator_T
12012 
12013 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12014  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12015  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12016  m_hDevice(pCreateInfo->device),
12017  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12018  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12019  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12020  m_PreferredLargeHeapBlockSize(0),
12021  m_PhysicalDevice(pCreateInfo->physicalDevice),
12022  m_CurrentFrameIndex(0),
12023  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12024  m_NextPoolId(0)
12026  ,m_pRecorder(VMA_NULL)
12027 #endif
12028 {
12029  if(VMA_DEBUG_DETECT_CORRUPTION)
12030  {
12031  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12032  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12033  }
12034 
12035  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12036 
12037 #if !(VMA_DEDICATED_ALLOCATION)
12039  {
12040  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12041  }
12042 #endif
12043 
12044  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12045  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12046  memset(&m_MemProps, 0, sizeof(m_MemProps));
12047 
12048  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12049  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12050 
12051  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12052  {
12053  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12054  }
12055 
12056  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12057  {
12058  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12059  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12060  }
12061 
12062  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12063 
12064  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12065  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12066 
12067  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12068  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12069  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12070  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12071 
12072  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12073  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12074 
12075  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12076  {
12077  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12078  {
12079  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12080  if(limit != VK_WHOLE_SIZE)
12081  {
12082  m_HeapSizeLimit[heapIndex] = limit;
12083  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12084  {
12085  m_MemProps.memoryHeaps[heapIndex].size = limit;
12086  }
12087  }
12088  }
12089  }
12090 
12091  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12092  {
12093  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12094 
12095  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12096  this,
12097  memTypeIndex,
12098  preferredBlockSize,
12099  0,
12100  SIZE_MAX,
12101  GetBufferImageGranularity(),
12102  pCreateInfo->frameInUseCount,
12103  false, // isCustomPool
12104  false, // explicitBlockSize
12105  false); // linearAlgorithm
12106  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12107  // becase minBlockCount is 0.
12108  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12109 
12110  }
12111 }
12112 
12113 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12114 {
12115  VkResult res = VK_SUCCESS;
12116 
12117  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12118  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12119  {
12120 #if VMA_RECORDING_ENABLED
12121  m_pRecorder = vma_new(this, VmaRecorder)();
12122  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12123  if(res != VK_SUCCESS)
12124  {
12125  return res;
12126  }
12127  m_pRecorder->WriteConfiguration(
12128  m_PhysicalDeviceProperties,
12129  m_MemProps,
12130  m_UseKhrDedicatedAllocation);
12131  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12132 #else
12133  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12134  return VK_ERROR_FEATURE_NOT_PRESENT;
12135 #endif
12136  }
12137 
12138  return res;
12139 }
12140 
12141 VmaAllocator_T::~VmaAllocator_T()
12142 {
12143 #if VMA_RECORDING_ENABLED
12144  if(m_pRecorder != VMA_NULL)
12145  {
12146  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12147  vma_delete(this, m_pRecorder);
12148  }
12149 #endif
12150 
12151  VMA_ASSERT(m_Pools.empty());
12152 
12153  for(size_t i = GetMemoryTypeCount(); i--; )
12154  {
12155  vma_delete(this, m_pDedicatedAllocations[i]);
12156  vma_delete(this, m_pBlockVectors[i]);
12157  }
12158 }
12159 
12160 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12161 {
12162 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12163  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12164  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12165  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12166  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12167  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12168  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12169  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12170  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12171  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12172  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12173  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12174  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12175  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12176  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12177  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12178  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12179 #if VMA_DEDICATED_ALLOCATION
12180  if(m_UseKhrDedicatedAllocation)
12181  {
12182  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12183  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12184  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12185  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12186  }
12187 #endif // #if VMA_DEDICATED_ALLOCATION
12188 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12189 
12190 #define VMA_COPY_IF_NOT_NULL(funcName) \
12191  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12192 
12193  if(pVulkanFunctions != VMA_NULL)
12194  {
12195  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12196  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12197  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12198  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12199  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12200  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12201  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12202  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12203  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12204  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12207  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12208  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12209  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12210  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12211 #if VMA_DEDICATED_ALLOCATION
12212  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12213  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12214 #endif
12215  }
12216 
12217 #undef VMA_COPY_IF_NOT_NULL
12218 
12219  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12220  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12221  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12228  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12229  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12230  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12233  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12234  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12235  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12236  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12237 #if VMA_DEDICATED_ALLOCATION
12238  if(m_UseKhrDedicatedAllocation)
12239  {
12240  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12241  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12242  }
12243 #endif
12244 }
12245 
12246 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12247 {
12248  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12249  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12250  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12251  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12252 }
12253 
12254 VkResult VmaAllocator_T::AllocateMemoryOfType(
12255  VkDeviceSize size,
12256  VkDeviceSize alignment,
12257  bool dedicatedAllocation,
12258  VkBuffer dedicatedBuffer,
12259  VkImage dedicatedImage,
12260  const VmaAllocationCreateInfo& createInfo,
12261  uint32_t memTypeIndex,
12262  VmaSuballocationType suballocType,
12263  VmaAllocation* pAllocation)
12264 {
12265  VMA_ASSERT(pAllocation != VMA_NULL);
12266  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12267 
12268  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12269 
12270  // If memory type is not HOST_VISIBLE, disable MAPPED.
12271  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12272  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12273  {
12274  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12275  }
12276 
12277  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12278  VMA_ASSERT(blockVector);
12279 
12280  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12281  bool preferDedicatedMemory =
12282  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12283  dedicatedAllocation ||
12284  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12285  size > preferredBlockSize / 2;
12286 
12287  if(preferDedicatedMemory &&
12288  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12289  finalCreateInfo.pool == VK_NULL_HANDLE)
12290  {
12292  }
12293 
12294  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12295  {
12296  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12297  {
12298  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12299  }
12300  else
12301  {
12302  return AllocateDedicatedMemory(
12303  size,
12304  suballocType,
12305  memTypeIndex,
12306  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12307  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12308  finalCreateInfo.pUserData,
12309  dedicatedBuffer,
12310  dedicatedImage,
12311  pAllocation);
12312  }
12313  }
12314  else
12315  {
12316  VkResult res = blockVector->Allocate(
12317  VK_NULL_HANDLE, // hCurrentPool
12318  m_CurrentFrameIndex.load(),
12319  size,
12320  alignment,
12321  finalCreateInfo,
12322  suballocType,
12323  pAllocation);
12324  if(res == VK_SUCCESS)
12325  {
12326  return res;
12327  }
12328 
12329  // 5. Try dedicated memory.
12330  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12331  {
12332  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12333  }
12334  else
12335  {
12336  res = AllocateDedicatedMemory(
12337  size,
12338  suballocType,
12339  memTypeIndex,
12340  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12341  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12342  finalCreateInfo.pUserData,
12343  dedicatedBuffer,
12344  dedicatedImage,
12345  pAllocation);
12346  if(res == VK_SUCCESS)
12347  {
12348  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12349  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12350  return VK_SUCCESS;
12351  }
12352  else
12353  {
12354  // Everything failed: Return error code.
12355  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12356  return res;
12357  }
12358  }
12359  }
12360 }
12361 
12362 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12363  VkDeviceSize size,
12364  VmaSuballocationType suballocType,
12365  uint32_t memTypeIndex,
12366  bool map,
12367  bool isUserDataString,
12368  void* pUserData,
12369  VkBuffer dedicatedBuffer,
12370  VkImage dedicatedImage,
12371  VmaAllocation* pAllocation)
12372 {
12373  VMA_ASSERT(pAllocation);
12374 
12375  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12376  allocInfo.memoryTypeIndex = memTypeIndex;
12377  allocInfo.allocationSize = size;
12378 
12379 #if VMA_DEDICATED_ALLOCATION
12380  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12381  if(m_UseKhrDedicatedAllocation)
12382  {
12383  if(dedicatedBuffer != VK_NULL_HANDLE)
12384  {
12385  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12386  dedicatedAllocInfo.buffer = dedicatedBuffer;
12387  allocInfo.pNext = &dedicatedAllocInfo;
12388  }
12389  else if(dedicatedImage != VK_NULL_HANDLE)
12390  {
12391  dedicatedAllocInfo.image = dedicatedImage;
12392  allocInfo.pNext = &dedicatedAllocInfo;
12393  }
12394  }
12395 #endif // #if VMA_DEDICATED_ALLOCATION
12396 
12397  // Allocate VkDeviceMemory.
12398  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12399  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12400  if(res < 0)
12401  {
12402  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12403  return res;
12404  }
12405 
12406  void* pMappedData = VMA_NULL;
12407  if(map)
12408  {
12409  res = (*m_VulkanFunctions.vkMapMemory)(
12410  m_hDevice,
12411  hMemory,
12412  0,
12413  VK_WHOLE_SIZE,
12414  0,
12415  &pMappedData);
12416  if(res < 0)
12417  {
12418  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12419  FreeVulkanMemory(memTypeIndex, size, hMemory);
12420  return res;
12421  }
12422  }
12423 
12424  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12425  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12426  (*pAllocation)->SetUserData(this, pUserData);
12427  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12428  {
12429  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12430  }
12431 
12432  // Register it in m_pDedicatedAllocations.
12433  {
12434  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12435  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12436  VMA_ASSERT(pDedicatedAllocations);
12437  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12438  }
12439 
12440  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12441 
12442  return VK_SUCCESS;
12443 }
12444 
12445 void VmaAllocator_T::GetBufferMemoryRequirements(
12446  VkBuffer hBuffer,
12447  VkMemoryRequirements& memReq,
12448  bool& requiresDedicatedAllocation,
12449  bool& prefersDedicatedAllocation) const
12450 {
12451 #if VMA_DEDICATED_ALLOCATION
12452  if(m_UseKhrDedicatedAllocation)
12453  {
12454  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12455  memReqInfo.buffer = hBuffer;
12456 
12457  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12458 
12459  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12460  memReq2.pNext = &memDedicatedReq;
12461 
12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12463 
12464  memReq = memReq2.memoryRequirements;
12465  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12466  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12467  }
12468  else
12469 #endif // #if VMA_DEDICATED_ALLOCATION
12470  {
12471  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12472  requiresDedicatedAllocation = false;
12473  prefersDedicatedAllocation = false;
12474  }
12475 }
12476 
12477 void VmaAllocator_T::GetImageMemoryRequirements(
12478  VkImage hImage,
12479  VkMemoryRequirements& memReq,
12480  bool& requiresDedicatedAllocation,
12481  bool& prefersDedicatedAllocation) const
12482 {
12483 #if VMA_DEDICATED_ALLOCATION
12484  if(m_UseKhrDedicatedAllocation)
12485  {
12486  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12487  memReqInfo.image = hImage;
12488 
12489  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12490 
12491  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12492  memReq2.pNext = &memDedicatedReq;
12493 
12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12495 
12496  memReq = memReq2.memoryRequirements;
12497  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12498  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12499  }
12500  else
12501 #endif // #if VMA_DEDICATED_ALLOCATION
12502  {
12503  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12504  requiresDedicatedAllocation = false;
12505  prefersDedicatedAllocation = false;
12506  }
12507 }
12508 
12509 VkResult VmaAllocator_T::AllocateMemory(
12510  const VkMemoryRequirements& vkMemReq,
12511  bool requiresDedicatedAllocation,
12512  bool prefersDedicatedAllocation,
12513  VkBuffer dedicatedBuffer,
12514  VkImage dedicatedImage,
12515  const VmaAllocationCreateInfo& createInfo,
12516  VmaSuballocationType suballocType,
12517  VmaAllocation* pAllocation)
12518 {
12519  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12520 
12521  if(vkMemReq.size == 0)
12522  {
12523  return VK_ERROR_VALIDATION_FAILED_EXT;
12524  }
12525  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12526  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12527  {
12528  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12529  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12530  }
12531  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12533  {
12534  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12536  }
12537  if(requiresDedicatedAllocation)
12538  {
12539  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12540  {
12541  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12542  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12543  }
12544  if(createInfo.pool != VK_NULL_HANDLE)
12545  {
12546  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12548  }
12549  }
12550  if((createInfo.pool != VK_NULL_HANDLE) &&
12551  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12552  {
12553  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12554  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12555  }
12556 
12557  if(createInfo.pool != VK_NULL_HANDLE)
12558  {
12559  const VkDeviceSize alignmentForPool = VMA_MAX(
12560  vkMemReq.alignment,
12561  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12562  return createInfo.pool->m_BlockVector.Allocate(
12563  createInfo.pool,
12564  m_CurrentFrameIndex.load(),
12565  vkMemReq.size,
12566  alignmentForPool,
12567  createInfo,
12568  suballocType,
12569  pAllocation);
12570  }
12571  else
12572  {
12573  // Bit mask of memory Vulkan types acceptable for this allocation.
12574  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12575  uint32_t memTypeIndex = UINT32_MAX;
12576  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12577  if(res == VK_SUCCESS)
12578  {
12579  VkDeviceSize alignmentForMemType = VMA_MAX(
12580  vkMemReq.alignment,
12581  GetMemoryTypeMinAlignment(memTypeIndex));
12582 
12583  res = AllocateMemoryOfType(
12584  vkMemReq.size,
12585  alignmentForMemType,
12586  requiresDedicatedAllocation || prefersDedicatedAllocation,
12587  dedicatedBuffer,
12588  dedicatedImage,
12589  createInfo,
12590  memTypeIndex,
12591  suballocType,
12592  pAllocation);
12593  // Succeeded on first try.
12594  if(res == VK_SUCCESS)
12595  {
12596  return res;
12597  }
12598  // Allocation from this memory type failed. Try other compatible memory types.
12599  else
12600  {
12601  for(;;)
12602  {
12603  // Remove old memTypeIndex from list of possibilities.
12604  memoryTypeBits &= ~(1u << memTypeIndex);
12605  // Find alternative memTypeIndex.
12606  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12607  if(res == VK_SUCCESS)
12608  {
12609  alignmentForMemType = VMA_MAX(
12610  vkMemReq.alignment,
12611  GetMemoryTypeMinAlignment(memTypeIndex));
12612 
12613  res = AllocateMemoryOfType(
12614  vkMemReq.size,
12615  alignmentForMemType,
12616  requiresDedicatedAllocation || prefersDedicatedAllocation,
12617  dedicatedBuffer,
12618  dedicatedImage,
12619  createInfo,
12620  memTypeIndex,
12621  suballocType,
12622  pAllocation);
12623  // Allocation from this alternative memory type succeeded.
12624  if(res == VK_SUCCESS)
12625  {
12626  return res;
12627  }
12628  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12629  }
12630  // No other matching memory type index could be found.
12631  else
12632  {
12633  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12634  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12635  }
12636  }
12637  }
12638  }
12639  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12640  else
12641  return res;
12642  }
12643 }
12644 
12645 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12646 {
12647  VMA_ASSERT(allocation);
12648 
12649  if(TouchAllocation(allocation))
12650  {
12651  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12652  {
12653  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12654  }
12655 
12656  switch(allocation->GetType())
12657  {
12658  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12659  {
12660  VmaBlockVector* pBlockVector = VMA_NULL;
12661  VmaPool hPool = allocation->GetPool();
12662  if(hPool != VK_NULL_HANDLE)
12663  {
12664  pBlockVector = &hPool->m_BlockVector;
12665  }
12666  else
12667  {
12668  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12669  pBlockVector = m_pBlockVectors[memTypeIndex];
12670  }
12671  pBlockVector->Free(allocation);
12672  }
12673  break;
12674  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12675  FreeDedicatedMemory(allocation);
12676  break;
12677  default:
12678  VMA_ASSERT(0);
12679  }
12680  }
12681 
12682  allocation->SetUserData(this, VMA_NULL);
12683  vma_delete(this, allocation);
12684 }
12685 
12686 VkResult VmaAllocator_T::ResizeAllocation(
12687  const VmaAllocation alloc,
12688  VkDeviceSize newSize)
12689 {
12690  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12691  {
12692  return VK_ERROR_VALIDATION_FAILED_EXT;
12693  }
12694  if(newSize == alloc->GetSize())
12695  {
12696  return VK_SUCCESS;
12697  }
12698 
12699  switch(alloc->GetType())
12700  {
12701  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12702  return VK_ERROR_FEATURE_NOT_PRESENT;
12703  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12704  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12705  {
12706  alloc->ChangeSize(newSize);
12707  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12708  return VK_SUCCESS;
12709  }
12710  else
12711  {
12712  return VK_ERROR_OUT_OF_POOL_MEMORY;
12713  }
12714  default:
12715  VMA_ASSERT(0);
12716  return VK_ERROR_VALIDATION_FAILED_EXT;
12717  }
12718 }
12719 
12720 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12721 {
12722  // Initialize.
12723  InitStatInfo(pStats->total);
12724  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12725  InitStatInfo(pStats->memoryType[i]);
12726  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12727  InitStatInfo(pStats->memoryHeap[i]);
12728 
12729  // Process default pools.
12730  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12731  {
12732  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12733  VMA_ASSERT(pBlockVector);
12734  pBlockVector->AddStats(pStats);
12735  }
12736 
12737  // Process custom pools.
12738  {
12739  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12740  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12741  {
12742  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12743  }
12744  }
12745 
12746  // Process dedicated allocations.
12747  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12748  {
12749  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12750  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12751  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12752  VMA_ASSERT(pDedicatedAllocVector);
12753  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12754  {
12755  VmaStatInfo allocationStatInfo;
12756  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12757  VmaAddStatInfo(pStats->total, allocationStatInfo);
12758  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12759  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12760  }
12761  }
12762 
12763  // Postprocess.
12764  VmaPostprocessCalcStatInfo(pStats->total);
12765  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12766  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12767  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12768  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12769 }
12770 
12771 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12772 
12773 VkResult VmaAllocator_T::Defragment(
12774  VmaAllocation* pAllocations,
12775  size_t allocationCount,
12776  VkBool32* pAllocationsChanged,
12777  const VmaDefragmentationInfo* pDefragmentationInfo,
12778  VmaDefragmentationStats* pDefragmentationStats)
12779 {
12780  if(pAllocationsChanged != VMA_NULL)
12781  {
12782  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12783  }
12784  if(pDefragmentationStats != VMA_NULL)
12785  {
12786  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12787  }
12788 
12789  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12790 
12791  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12792 
12793  const size_t poolCount = m_Pools.size();
12794 
12795  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12796  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12797  {
12798  VmaAllocation hAlloc = pAllocations[allocIndex];
12799  VMA_ASSERT(hAlloc);
12800  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12801  // DedicatedAlloc cannot be defragmented.
12802  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12803  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12804  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12805  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12806  // Lost allocation cannot be defragmented.
12807  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12808  {
12809  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12810 
12811  const VmaPool hAllocPool = hAlloc->GetPool();
12812  // This allocation belongs to custom pool.
12813  if(hAllocPool != VK_NULL_HANDLE)
12814  {
12815  // Pools with linear or buddy algorithm are not defragmented.
12816  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12817  {
12818  pAllocBlockVector = &hAllocPool->m_BlockVector;
12819  }
12820  }
12821  // This allocation belongs to general pool.
12822  else
12823  {
12824  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12825  }
12826 
12827  if(pAllocBlockVector != VMA_NULL)
12828  {
12829  VmaDefragmentator* const pDefragmentator =
12830  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12831  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12832  &pAllocationsChanged[allocIndex] : VMA_NULL;
12833  pDefragmentator->AddAllocation(hAlloc, pChanged);
12834  }
12835  }
12836  }
12837 
12838  VkResult result = VK_SUCCESS;
12839 
12840  // ======== Main processing.
12841 
12842  VkDeviceSize maxBytesToMove = SIZE_MAX;
12843  uint32_t maxAllocationsToMove = UINT32_MAX;
12844  if(pDefragmentationInfo != VMA_NULL)
12845  {
12846  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12847  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12848  }
12849 
12850  // Process standard memory.
12851  for(uint32_t memTypeIndex = 0;
12852  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12853  ++memTypeIndex)
12854  {
12855  // Only HOST_VISIBLE memory types can be defragmented.
12856  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12857  {
12858  result = m_pBlockVectors[memTypeIndex]->Defragment(
12859  pDefragmentationStats,
12860  maxBytesToMove,
12861  maxAllocationsToMove);
12862  }
12863  }
12864 
12865  // Process custom pools.
12866  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12867  {
12868  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12869  pDefragmentationStats,
12870  maxBytesToMove,
12871  maxAllocationsToMove);
12872  }
12873 
12874  // ======== Destroy defragmentators.
12875 
12876  // Process custom pools.
12877  for(size_t poolIndex = poolCount; poolIndex--; )
12878  {
12879  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12880  }
12881 
12882  // Process standard memory.
12883  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12884  {
12885  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12886  {
12887  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12888  }
12889  }
12890 
12891  return result;
12892 }
12893 
12894 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12895 {
12896  if(hAllocation->CanBecomeLost())
12897  {
12898  /*
12899  Warning: This is a carefully designed algorithm.
12900  Do not modify unless you really know what you're doing :)
12901  */
12902  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12903  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12904  for(;;)
12905  {
12906  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12907  {
12908  pAllocationInfo->memoryType = UINT32_MAX;
12909  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12910  pAllocationInfo->offset = 0;
12911  pAllocationInfo->size = hAllocation->GetSize();
12912  pAllocationInfo->pMappedData = VMA_NULL;
12913  pAllocationInfo->pUserData = hAllocation->GetUserData();
12914  return;
12915  }
12916  else if(localLastUseFrameIndex == localCurrFrameIndex)
12917  {
12918  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12919  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12920  pAllocationInfo->offset = hAllocation->GetOffset();
12921  pAllocationInfo->size = hAllocation->GetSize();
12922  pAllocationInfo->pMappedData = VMA_NULL;
12923  pAllocationInfo->pUserData = hAllocation->GetUserData();
12924  return;
12925  }
12926  else // Last use time earlier than current time.
12927  {
12928  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12929  {
12930  localLastUseFrameIndex = localCurrFrameIndex;
12931  }
12932  }
12933  }
12934  }
12935  else
12936  {
12937 #if VMA_STATS_STRING_ENABLED
12938  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12939  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12940  for(;;)
12941  {
12942  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12943  if(localLastUseFrameIndex == localCurrFrameIndex)
12944  {
12945  break;
12946  }
12947  else // Last use time earlier than current time.
12948  {
12949  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12950  {
12951  localLastUseFrameIndex = localCurrFrameIndex;
12952  }
12953  }
12954  }
12955 #endif
12956 
12957  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12958  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12959  pAllocationInfo->offset = hAllocation->GetOffset();
12960  pAllocationInfo->size = hAllocation->GetSize();
12961  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12962  pAllocationInfo->pUserData = hAllocation->GetUserData();
12963  }
12964 }
12965 
12966 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12967 {
12968  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12969  if(hAllocation->CanBecomeLost())
12970  {
12971  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12972  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12973  for(;;)
12974  {
12975  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12976  {
12977  return false;
12978  }
12979  else if(localLastUseFrameIndex == localCurrFrameIndex)
12980  {
12981  return true;
12982  }
12983  else // Last use time earlier than current time.
12984  {
12985  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12986  {
12987  localLastUseFrameIndex = localCurrFrameIndex;
12988  }
12989  }
12990  }
12991  }
12992  else
12993  {
12994 #if VMA_STATS_STRING_ENABLED
12995  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12996  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12997  for(;;)
12998  {
12999  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
13000  if(localLastUseFrameIndex == localCurrFrameIndex)
13001  {
13002  break;
13003  }
13004  else // Last use time earlier than current time.
13005  {
13006  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
13007  {
13008  localLastUseFrameIndex = localCurrFrameIndex;
13009  }
13010  }
13011  }
13012 #endif
13013 
13014  return true;
13015  }
13016 }
13017 
13018 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13019 {
13020  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13021 
13022  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13023 
13024  if(newCreateInfo.maxBlockCount == 0)
13025  {
13026  newCreateInfo.maxBlockCount = SIZE_MAX;
13027  }
13028  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13029  {
13030  return VK_ERROR_INITIALIZATION_FAILED;
13031  }
13032 
13033  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13034 
13035  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13036 
13037  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13038  if(res != VK_SUCCESS)
13039  {
13040  vma_delete(this, *pPool);
13041  *pPool = VMA_NULL;
13042  return res;
13043  }
13044 
13045  // Add to m_Pools.
13046  {
13047  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13048  (*pPool)->SetId(m_NextPoolId++);
13049  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13050  }
13051 
13052  return VK_SUCCESS;
13053 }
13054 
13055 void VmaAllocator_T::DestroyPool(VmaPool pool)
13056 {
13057  // Remove from m_Pools.
13058  {
13059  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13060  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13061  VMA_ASSERT(success && "Pool not found in Allocator.");
13062  }
13063 
13064  vma_delete(this, pool);
13065 }
13066 
13067 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13068 {
13069  pool->m_BlockVector.GetPoolStats(pPoolStats);
13070 }
13071 
13072 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13073 {
13074  m_CurrentFrameIndex.store(frameIndex);
13075 }
13076 
13077 void VmaAllocator_T::MakePoolAllocationsLost(
13078  VmaPool hPool,
13079  size_t* pLostAllocationCount)
13080 {
13081  hPool->m_BlockVector.MakePoolAllocationsLost(
13082  m_CurrentFrameIndex.load(),
13083  pLostAllocationCount);
13084 }
13085 
13086 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13087 {
13088  return hPool->m_BlockVector.CheckCorruption();
13089 }
13090 
13091 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13092 {
13093  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13094 
13095  // Process default pools.
13096  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13097  {
13098  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13099  {
13100  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13101  VMA_ASSERT(pBlockVector);
13102  VkResult localRes = pBlockVector->CheckCorruption();
13103  switch(localRes)
13104  {
13105  case VK_ERROR_FEATURE_NOT_PRESENT:
13106  break;
13107  case VK_SUCCESS:
13108  finalRes = VK_SUCCESS;
13109  break;
13110  default:
13111  return localRes;
13112  }
13113  }
13114  }
13115 
13116  // Process custom pools.
13117  {
13118  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13119  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13120  {
13121  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13122  {
13123  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13124  switch(localRes)
13125  {
13126  case VK_ERROR_FEATURE_NOT_PRESENT:
13127  break;
13128  case VK_SUCCESS:
13129  finalRes = VK_SUCCESS;
13130  break;
13131  default:
13132  return localRes;
13133  }
13134  }
13135  }
13136  }
13137 
13138  return finalRes;
13139 }
13140 
13141 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13142 {
13143  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13144  (*pAllocation)->InitLost();
13145 }
13146 
13147 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13148 {
13149  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13150 
13151  VkResult res;
13152  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13153  {
13154  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13155  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13156  {
13157  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13158  if(res == VK_SUCCESS)
13159  {
13160  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13161  }
13162  }
13163  else
13164  {
13165  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13166  }
13167  }
13168  else
13169  {
13170  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13171  }
13172 
13173  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13174  {
13175  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13176  }
13177 
13178  return res;
13179 }
13180 
13181 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13182 {
13183  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13184  {
13185  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13186  }
13187 
13188  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13189 
13190  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13191  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13192  {
13193  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13194  m_HeapSizeLimit[heapIndex] += size;
13195  }
13196 }
13197 
13198 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13199 {
13200  if(hAllocation->CanBecomeLost())
13201  {
13202  return VK_ERROR_MEMORY_MAP_FAILED;
13203  }
13204 
13205  switch(hAllocation->GetType())
13206  {
13207  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13208  {
13209  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13210  char *pBytes = VMA_NULL;
13211  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13212  if(res == VK_SUCCESS)
13213  {
13214  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13215  hAllocation->BlockAllocMap();
13216  }
13217  return res;
13218  }
13219  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13220  return hAllocation->DedicatedAllocMap(this, ppData);
13221  default:
13222  VMA_ASSERT(0);
13223  return VK_ERROR_MEMORY_MAP_FAILED;
13224  }
13225 }
13226 
13227 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13228 {
13229  switch(hAllocation->GetType())
13230  {
13231  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13232  {
13233  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13234  hAllocation->BlockAllocUnmap();
13235  pBlock->Unmap(this, 1);
13236  }
13237  break;
13238  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13239  hAllocation->DedicatedAllocUnmap(this);
13240  break;
13241  default:
13242  VMA_ASSERT(0);
13243  }
13244 }
13245 
13246 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13247 {
13248  VkResult res = VK_SUCCESS;
13249  switch(hAllocation->GetType())
13250  {
13251  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13252  res = GetVulkanFunctions().vkBindBufferMemory(
13253  m_hDevice,
13254  hBuffer,
13255  hAllocation->GetMemory(),
13256  0); //memoryOffset
13257  break;
13258  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13259  {
13260  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13261  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13262  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13263  break;
13264  }
13265  default:
13266  VMA_ASSERT(0);
13267  }
13268  return res;
13269 }
13270 
13271 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13272 {
13273  VkResult res = VK_SUCCESS;
13274  switch(hAllocation->GetType())
13275  {
13276  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13277  res = GetVulkanFunctions().vkBindImageMemory(
13278  m_hDevice,
13279  hImage,
13280  hAllocation->GetMemory(),
13281  0); //memoryOffset
13282  break;
13283  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13284  {
13285  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13286  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13287  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13288  break;
13289  }
13290  default:
13291  VMA_ASSERT(0);
13292  }
13293  return res;
13294 }
13295 
13296 void VmaAllocator_T::FlushOrInvalidateAllocation(
13297  VmaAllocation hAllocation,
13298  VkDeviceSize offset, VkDeviceSize size,
13299  VMA_CACHE_OPERATION op)
13300 {
13301  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13302  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13303  {
13304  const VkDeviceSize allocationSize = hAllocation->GetSize();
13305  VMA_ASSERT(offset <= allocationSize);
13306 
13307  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13308 
13309  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13310  memRange.memory = hAllocation->GetMemory();
13311 
13312  switch(hAllocation->GetType())
13313  {
13314  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13315  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13316  if(size == VK_WHOLE_SIZE)
13317  {
13318  memRange.size = allocationSize - memRange.offset;
13319  }
13320  else
13321  {
13322  VMA_ASSERT(offset + size <= allocationSize);
13323  memRange.size = VMA_MIN(
13324  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13325  allocationSize - memRange.offset);
13326  }
13327  break;
13328 
13329  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13330  {
13331  // 1. Still within this allocation.
13332  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13333  if(size == VK_WHOLE_SIZE)
13334  {
13335  size = allocationSize - offset;
13336  }
13337  else
13338  {
13339  VMA_ASSERT(offset + size <= allocationSize);
13340  }
13341  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13342 
13343  // 2. Adjust to whole block.
13344  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13345  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13346  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13347  memRange.offset += allocationOffset;
13348  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13349 
13350  break;
13351  }
13352 
13353  default:
13354  VMA_ASSERT(0);
13355  }
13356 
13357  switch(op)
13358  {
13359  case VMA_CACHE_FLUSH:
13360  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13361  break;
13362  case VMA_CACHE_INVALIDATE:
13363  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13364  break;
13365  default:
13366  VMA_ASSERT(0);
13367  }
13368  }
13369  // else: Just ignore this call.
13370 }
13371 
13372 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13373 {
13374  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13375 
13376  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13377  {
13378  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13379  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13380  VMA_ASSERT(pDedicatedAllocations);
13381  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13382  VMA_ASSERT(success);
13383  }
13384 
13385  VkDeviceMemory hMemory = allocation->GetMemory();
13386 
13387  /*
13388  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13389  before vkFreeMemory.
13390 
13391  if(allocation->GetMappedData() != VMA_NULL)
13392  {
13393  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13394  }
13395  */
13396 
13397  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13398 
13399  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13400 }
13401 
13402 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13403 {
13404  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13405  !hAllocation->CanBecomeLost() &&
13406  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13407  {
13408  void* pData = VMA_NULL;
13409  VkResult res = Map(hAllocation, &pData);
13410  if(res == VK_SUCCESS)
13411  {
13412  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13413  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13414  Unmap(hAllocation);
13415  }
13416  else
13417  {
13418  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13419  }
13420  }
13421 }
13422 
13423 #if VMA_STATS_STRING_ENABLED
13424 
13425 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13426 {
13427  bool dedicatedAllocationsStarted = false;
13428  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13429  {
13430  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13431  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13432  VMA_ASSERT(pDedicatedAllocVector);
13433  if(pDedicatedAllocVector->empty() == false)
13434  {
13435  if(dedicatedAllocationsStarted == false)
13436  {
13437  dedicatedAllocationsStarted = true;
13438  json.WriteString("DedicatedAllocations");
13439  json.BeginObject();
13440  }
13441 
13442  json.BeginString("Type ");
13443  json.ContinueString(memTypeIndex);
13444  json.EndString();
13445 
13446  json.BeginArray();
13447 
13448  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13449  {
13450  json.BeginObject(true);
13451  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13452  hAlloc->PrintParameters(json);
13453  json.EndObject();
13454  }
13455 
13456  json.EndArray();
13457  }
13458  }
13459  if(dedicatedAllocationsStarted)
13460  {
13461  json.EndObject();
13462  }
13463 
13464  {
13465  bool allocationsStarted = false;
13466  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13467  {
13468  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13469  {
13470  if(allocationsStarted == false)
13471  {
13472  allocationsStarted = true;
13473  json.WriteString("DefaultPools");
13474  json.BeginObject();
13475  }
13476 
13477  json.BeginString("Type ");
13478  json.ContinueString(memTypeIndex);
13479  json.EndString();
13480 
13481  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13482  }
13483  }
13484  if(allocationsStarted)
13485  {
13486  json.EndObject();
13487  }
13488  }
13489 
13490  // Custom pools
13491  {
13492  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13493  const size_t poolCount = m_Pools.size();
13494  if(poolCount > 0)
13495  {
13496  json.WriteString("Pools");
13497  json.BeginObject();
13498  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13499  {
13500  json.BeginString();
13501  json.ContinueString(m_Pools[poolIndex]->GetId());
13502  json.EndString();
13503 
13504  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13505  }
13506  json.EndObject();
13507  }
13508  }
13509 }
13510 
13511 #endif // #if VMA_STATS_STRING_ENABLED
13512 
13514 // Public interface
13515 
13516 VkResult vmaCreateAllocator(
13517  const VmaAllocatorCreateInfo* pCreateInfo,
13518  VmaAllocator* pAllocator)
13519 {
13520  VMA_ASSERT(pCreateInfo && pAllocator);
13521  VMA_DEBUG_LOG("vmaCreateAllocator");
13522  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13523  return (*pAllocator)->Init(pCreateInfo);
13524 }
13525 
13526 void vmaDestroyAllocator(
13527  VmaAllocator allocator)
13528 {
13529  if(allocator != VK_NULL_HANDLE)
13530  {
13531  VMA_DEBUG_LOG("vmaDestroyAllocator");
13532  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13533  vma_delete(&allocationCallbacks, allocator);
13534  }
13535 }
13536 
13538  VmaAllocator allocator,
13539  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13540 {
13541  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13542  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13543 }
13544 
13546  VmaAllocator allocator,
13547  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13548 {
13549  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13550  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13551 }
13552 
13554  VmaAllocator allocator,
13555  uint32_t memoryTypeIndex,
13556  VkMemoryPropertyFlags* pFlags)
13557 {
13558  VMA_ASSERT(allocator && pFlags);
13559  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13560  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13561 }
13562 
13564  VmaAllocator allocator,
13565  uint32_t frameIndex)
13566 {
13567  VMA_ASSERT(allocator);
13568  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13569 
13570  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13571 
13572  allocator->SetCurrentFrameIndex(frameIndex);
13573 }
13574 
13575 void vmaCalculateStats(
13576  VmaAllocator allocator,
13577  VmaStats* pStats)
13578 {
13579  VMA_ASSERT(allocator && pStats);
13580  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13581  allocator->CalculateStats(pStats);
13582 }
13583 
13584 #if VMA_STATS_STRING_ENABLED
13585 
13586 void vmaBuildStatsString(
13587  VmaAllocator allocator,
13588  char** ppStatsString,
13589  VkBool32 detailedMap)
13590 {
13591  VMA_ASSERT(allocator && ppStatsString);
13592  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13593 
13594  VmaStringBuilder sb(allocator);
13595  {
13596  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13597  json.BeginObject();
13598 
13599  VmaStats stats;
13600  allocator->CalculateStats(&stats);
13601 
13602  json.WriteString("Total");
13603  VmaPrintStatInfo(json, stats.total);
13604 
13605  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13606  {
13607  json.BeginString("Heap ");
13608  json.ContinueString(heapIndex);
13609  json.EndString();
13610  json.BeginObject();
13611 
13612  json.WriteString("Size");
13613  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13614 
13615  json.WriteString("Flags");
13616  json.BeginArray(true);
13617  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13618  {
13619  json.WriteString("DEVICE_LOCAL");
13620  }
13621  json.EndArray();
13622 
13623  if(stats.memoryHeap[heapIndex].blockCount > 0)
13624  {
13625  json.WriteString("Stats");
13626  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13627  }
13628 
13629  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13630  {
13631  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13632  {
13633  json.BeginString("Type ");
13634  json.ContinueString(typeIndex);
13635  json.EndString();
13636 
13637  json.BeginObject();
13638 
13639  json.WriteString("Flags");
13640  json.BeginArray(true);
13641  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13642  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13643  {
13644  json.WriteString("DEVICE_LOCAL");
13645  }
13646  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13647  {
13648  json.WriteString("HOST_VISIBLE");
13649  }
13650  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13651  {
13652  json.WriteString("HOST_COHERENT");
13653  }
13654  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13655  {
13656  json.WriteString("HOST_CACHED");
13657  }
13658  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13659  {
13660  json.WriteString("LAZILY_ALLOCATED");
13661  }
13662  json.EndArray();
13663 
13664  if(stats.memoryType[typeIndex].blockCount > 0)
13665  {
13666  json.WriteString("Stats");
13667  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13668  }
13669 
13670  json.EndObject();
13671  }
13672  }
13673 
13674  json.EndObject();
13675  }
13676  if(detailedMap == VK_TRUE)
13677  {
13678  allocator->PrintDetailedMap(json);
13679  }
13680 
13681  json.EndObject();
13682  }
13683 
13684  const size_t len = sb.GetLength();
13685  char* const pChars = vma_new_array(allocator, char, len + 1);
13686  if(len > 0)
13687  {
13688  memcpy(pChars, sb.GetData(), len);
13689  }
13690  pChars[len] = '\0';
13691  *ppStatsString = pChars;
13692 }
13693 
13694 void vmaFreeStatsString(
13695  VmaAllocator allocator,
13696  char* pStatsString)
13697 {
13698  if(pStatsString != VMA_NULL)
13699  {
13700  VMA_ASSERT(allocator);
13701  size_t len = strlen(pStatsString);
13702  vma_delete_array(allocator, pStatsString, len + 1);
13703  }
13704 }
13705 
13706 #endif // #if VMA_STATS_STRING_ENABLED
13707 
13708 /*
13709 This function is not protected by any mutex because it just reads immutable data.
13710 */
13711 VkResult vmaFindMemoryTypeIndex(
13712  VmaAllocator allocator,
13713  uint32_t memoryTypeBits,
13714  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13715  uint32_t* pMemoryTypeIndex)
13716 {
13717  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13718  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13719  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13720 
13721  if(pAllocationCreateInfo->memoryTypeBits != 0)
13722  {
13723  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13724  }
13725 
13726  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13727  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13728 
13729  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13730  if(mapped)
13731  {
13732  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13733  }
13734 
13735  // Convert usage to requiredFlags and preferredFlags.
13736  switch(pAllocationCreateInfo->usage)
13737  {
13739  break;
13741  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13742  {
13743  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13744  }
13745  break;
13747  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13748  break;
13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13751  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13752  {
13753  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13754  }
13755  break;
13757  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13758  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13759  break;
13760  default:
13761  break;
13762  }
13763 
13764  *pMemoryTypeIndex = UINT32_MAX;
13765  uint32_t minCost = UINT32_MAX;
13766  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13767  memTypeIndex < allocator->GetMemoryTypeCount();
13768  ++memTypeIndex, memTypeBit <<= 1)
13769  {
13770  // This memory type is acceptable according to memoryTypeBits bitmask.
13771  if((memTypeBit & memoryTypeBits) != 0)
13772  {
13773  const VkMemoryPropertyFlags currFlags =
13774  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13775  // This memory type contains requiredFlags.
13776  if((requiredFlags & ~currFlags) == 0)
13777  {
13778  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13779  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13780  // Remember memory type with lowest cost.
13781  if(currCost < minCost)
13782  {
13783  *pMemoryTypeIndex = memTypeIndex;
13784  if(currCost == 0)
13785  {
13786  return VK_SUCCESS;
13787  }
13788  minCost = currCost;
13789  }
13790  }
13791  }
13792  }
13793  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13794 }
13795 
13797  VmaAllocator allocator,
13798  const VkBufferCreateInfo* pBufferCreateInfo,
13799  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13800  uint32_t* pMemoryTypeIndex)
13801 {
13802  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13803  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13804  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13805  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13806 
13807  const VkDevice hDev = allocator->m_hDevice;
13808  VkBuffer hBuffer = VK_NULL_HANDLE;
13809  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13810  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13811  if(res == VK_SUCCESS)
13812  {
13813  VkMemoryRequirements memReq = {};
13814  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13815  hDev, hBuffer, &memReq);
13816 
13817  res = vmaFindMemoryTypeIndex(
13818  allocator,
13819  memReq.memoryTypeBits,
13820  pAllocationCreateInfo,
13821  pMemoryTypeIndex);
13822 
13823  allocator->GetVulkanFunctions().vkDestroyBuffer(
13824  hDev, hBuffer, allocator->GetAllocationCallbacks());
13825  }
13826  return res;
13827 }
13828 
13830  VmaAllocator allocator,
13831  const VkImageCreateInfo* pImageCreateInfo,
13832  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13833  uint32_t* pMemoryTypeIndex)
13834 {
13835  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13836  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13837  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13838  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13839 
13840  const VkDevice hDev = allocator->m_hDevice;
13841  VkImage hImage = VK_NULL_HANDLE;
13842  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13843  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13844  if(res == VK_SUCCESS)
13845  {
13846  VkMemoryRequirements memReq = {};
13847  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13848  hDev, hImage, &memReq);
13849 
13850  res = vmaFindMemoryTypeIndex(
13851  allocator,
13852  memReq.memoryTypeBits,
13853  pAllocationCreateInfo,
13854  pMemoryTypeIndex);
13855 
13856  allocator->GetVulkanFunctions().vkDestroyImage(
13857  hDev, hImage, allocator->GetAllocationCallbacks());
13858  }
13859  return res;
13860 }
13861 
13862 VkResult vmaCreatePool(
13863  VmaAllocator allocator,
13864  const VmaPoolCreateInfo* pCreateInfo,
13865  VmaPool* pPool)
13866 {
13867  VMA_ASSERT(allocator && pCreateInfo && pPool);
13868 
13869  VMA_DEBUG_LOG("vmaCreatePool");
13870 
13871  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13872 
13873  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13874 
13875 #if VMA_RECORDING_ENABLED
13876  if(allocator->GetRecorder() != VMA_NULL)
13877  {
13878  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13879  }
13880 #endif
13881 
13882  return res;
13883 }
13884 
13885 void vmaDestroyPool(
13886  VmaAllocator allocator,
13887  VmaPool pool)
13888 {
13889  VMA_ASSERT(allocator);
13890 
13891  if(pool == VK_NULL_HANDLE)
13892  {
13893  return;
13894  }
13895 
13896  VMA_DEBUG_LOG("vmaDestroyPool");
13897 
13898  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13899 
13900 #if VMA_RECORDING_ENABLED
13901  if(allocator->GetRecorder() != VMA_NULL)
13902  {
13903  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13904  }
13905 #endif
13906 
13907  allocator->DestroyPool(pool);
13908 }
13909 
13910 void vmaGetPoolStats(
13911  VmaAllocator allocator,
13912  VmaPool pool,
13913  VmaPoolStats* pPoolStats)
13914 {
13915  VMA_ASSERT(allocator && pool && pPoolStats);
13916 
13917  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13918 
13919  allocator->GetPoolStats(pool, pPoolStats);
13920 }
13921 
13923  VmaAllocator allocator,
13924  VmaPool pool,
13925  size_t* pLostAllocationCount)
13926 {
13927  VMA_ASSERT(allocator && pool);
13928 
13929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13930 
13931 #if VMA_RECORDING_ENABLED
13932  if(allocator->GetRecorder() != VMA_NULL)
13933  {
13934  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13935  }
13936 #endif
13937 
13938  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13939 }
13940 
13941 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13942 {
13943  VMA_ASSERT(allocator && pool);
13944 
13945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13946 
13947  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13948 
13949  return allocator->CheckPoolCorruption(pool);
13950 }
13951 
13952 VkResult vmaAllocateMemory(
13953  VmaAllocator allocator,
13954  const VkMemoryRequirements* pVkMemoryRequirements,
13955  const VmaAllocationCreateInfo* pCreateInfo,
13956  VmaAllocation* pAllocation,
13957  VmaAllocationInfo* pAllocationInfo)
13958 {
13959  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13960 
13961  VMA_DEBUG_LOG("vmaAllocateMemory");
13962 
13963  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13964 
13965  VkResult result = allocator->AllocateMemory(
13966  *pVkMemoryRequirements,
13967  false, // requiresDedicatedAllocation
13968  false, // prefersDedicatedAllocation
13969  VK_NULL_HANDLE, // dedicatedBuffer
13970  VK_NULL_HANDLE, // dedicatedImage
13971  *pCreateInfo,
13972  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13973  pAllocation);
13974 
13975 #if VMA_RECORDING_ENABLED
13976  if(allocator->GetRecorder() != VMA_NULL)
13977  {
13978  allocator->GetRecorder()->RecordAllocateMemory(
13979  allocator->GetCurrentFrameIndex(),
13980  *pVkMemoryRequirements,
13981  *pCreateInfo,
13982  *pAllocation);
13983  }
13984 #endif
13985 
13986  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13987  {
13988  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13989  }
13990 
13991  return result;
13992 }
13993 
13995  VmaAllocator allocator,
13996  VkBuffer buffer,
13997  const VmaAllocationCreateInfo* pCreateInfo,
13998  VmaAllocation* pAllocation,
13999  VmaAllocationInfo* pAllocationInfo)
14000 {
14001  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14002 
14003  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
14004 
14005  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14006 
14007  VkMemoryRequirements vkMemReq = {};
14008  bool requiresDedicatedAllocation = false;
14009  bool prefersDedicatedAllocation = false;
14010  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14011  requiresDedicatedAllocation,
14012  prefersDedicatedAllocation);
14013 
14014  VkResult result = allocator->AllocateMemory(
14015  vkMemReq,
14016  requiresDedicatedAllocation,
14017  prefersDedicatedAllocation,
14018  buffer, // dedicatedBuffer
14019  VK_NULL_HANDLE, // dedicatedImage
14020  *pCreateInfo,
14021  VMA_SUBALLOCATION_TYPE_BUFFER,
14022  pAllocation);
14023 
14024 #if VMA_RECORDING_ENABLED
14025  if(allocator->GetRecorder() != VMA_NULL)
14026  {
14027  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14028  allocator->GetCurrentFrameIndex(),
14029  vkMemReq,
14030  requiresDedicatedAllocation,
14031  prefersDedicatedAllocation,
14032  *pCreateInfo,
14033  *pAllocation);
14034  }
14035 #endif
14036 
14037  if(pAllocationInfo && result == VK_SUCCESS)
14038  {
14039  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14040  }
14041 
14042  return result;
14043 }
14044 
14045 VkResult vmaAllocateMemoryForImage(
14046  VmaAllocator allocator,
14047  VkImage image,
14048  const VmaAllocationCreateInfo* pCreateInfo,
14049  VmaAllocation* pAllocation,
14050  VmaAllocationInfo* pAllocationInfo)
14051 {
14052  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14053 
14054  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14055 
14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14057 
14058  VkMemoryRequirements vkMemReq = {};
14059  bool requiresDedicatedAllocation = false;
14060  bool prefersDedicatedAllocation = false;
14061  allocator->GetImageMemoryRequirements(image, vkMemReq,
14062  requiresDedicatedAllocation, prefersDedicatedAllocation);
14063 
14064  VkResult result = allocator->AllocateMemory(
14065  vkMemReq,
14066  requiresDedicatedAllocation,
14067  prefersDedicatedAllocation,
14068  VK_NULL_HANDLE, // dedicatedBuffer
14069  image, // dedicatedImage
14070  *pCreateInfo,
14071  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14072  pAllocation);
14073 
14074 #if VMA_RECORDING_ENABLED
14075  if(allocator->GetRecorder() != VMA_NULL)
14076  {
14077  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14078  allocator->GetCurrentFrameIndex(),
14079  vkMemReq,
14080  requiresDedicatedAllocation,
14081  prefersDedicatedAllocation,
14082  *pCreateInfo,
14083  *pAllocation);
14084  }
14085 #endif
14086 
14087  if(pAllocationInfo && result == VK_SUCCESS)
14088  {
14089  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14090  }
14091 
14092  return result;
14093 }
14094 
14095 void vmaFreeMemory(
14096  VmaAllocator allocator,
14097  VmaAllocation allocation)
14098 {
14099  VMA_ASSERT(allocator);
14100 
14101  if(allocation == VK_NULL_HANDLE)
14102  {
14103  return;
14104  }
14105 
14106  VMA_DEBUG_LOG("vmaFreeMemory");
14107 
14108  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14109 
14110 #if VMA_RECORDING_ENABLED
14111  if(allocator->GetRecorder() != VMA_NULL)
14112  {
14113  allocator->GetRecorder()->RecordFreeMemory(
14114  allocator->GetCurrentFrameIndex(),
14115  allocation);
14116  }
14117 #endif
14118 
14119  allocator->FreeMemory(allocation);
14120 }
14121 
14122 VkResult vmaResizeAllocation(
14123  VmaAllocator allocator,
14124  VmaAllocation allocation,
14125  VkDeviceSize newSize)
14126 {
14127  VMA_ASSERT(allocator && allocation);
14128 
14129  VMA_DEBUG_LOG("vmaResizeAllocation");
14130 
14131  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14132 
14133 #if VMA_RECORDING_ENABLED
14134  if(allocator->GetRecorder() != VMA_NULL)
14135  {
14136  allocator->GetRecorder()->RecordResizeAllocation(
14137  allocator->GetCurrentFrameIndex(),
14138  allocation,
14139  newSize);
14140  }
14141 #endif
14142 
14143  return allocator->ResizeAllocation(allocation, newSize);
14144 }
14145 
14147  VmaAllocator allocator,
14148  VmaAllocation allocation,
14149  VmaAllocationInfo* pAllocationInfo)
14150 {
14151  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14152 
14153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14154 
14155 #if VMA_RECORDING_ENABLED
14156  if(allocator->GetRecorder() != VMA_NULL)
14157  {
14158  allocator->GetRecorder()->RecordGetAllocationInfo(
14159  allocator->GetCurrentFrameIndex(),
14160  allocation);
14161  }
14162 #endif
14163 
14164  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14165 }
14166 
14167 VkBool32 vmaTouchAllocation(
14168  VmaAllocator allocator,
14169  VmaAllocation allocation)
14170 {
14171  VMA_ASSERT(allocator && allocation);
14172 
14173  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14174 
14175 #if VMA_RECORDING_ENABLED
14176  if(allocator->GetRecorder() != VMA_NULL)
14177  {
14178  allocator->GetRecorder()->RecordTouchAllocation(
14179  allocator->GetCurrentFrameIndex(),
14180  allocation);
14181  }
14182 #endif
14183 
14184  return allocator->TouchAllocation(allocation);
14185 }
14186 
14188  VmaAllocator allocator,
14189  VmaAllocation allocation,
14190  void* pUserData)
14191 {
14192  VMA_ASSERT(allocator && allocation);
14193 
14194  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14195 
14196  allocation->SetUserData(allocator, pUserData);
14197 
14198 #if VMA_RECORDING_ENABLED
14199  if(allocator->GetRecorder() != VMA_NULL)
14200  {
14201  allocator->GetRecorder()->RecordSetAllocationUserData(
14202  allocator->GetCurrentFrameIndex(),
14203  allocation,
14204  pUserData);
14205  }
14206 #endif
14207 }
14208 
14210  VmaAllocator allocator,
14211  VmaAllocation* pAllocation)
14212 {
14213  VMA_ASSERT(allocator && pAllocation);
14214 
14215  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14216 
14217  allocator->CreateLostAllocation(pAllocation);
14218 
14219 #if VMA_RECORDING_ENABLED
14220  if(allocator->GetRecorder() != VMA_NULL)
14221  {
14222  allocator->GetRecorder()->RecordCreateLostAllocation(
14223  allocator->GetCurrentFrameIndex(),
14224  *pAllocation);
14225  }
14226 #endif
14227 }
14228 
14229 VkResult vmaMapMemory(
14230  VmaAllocator allocator,
14231  VmaAllocation allocation,
14232  void** ppData)
14233 {
14234  VMA_ASSERT(allocator && allocation && ppData);
14235 
14236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14237 
14238  VkResult res = allocator->Map(allocation, ppData);
14239 
14240 #if VMA_RECORDING_ENABLED
14241  if(allocator->GetRecorder() != VMA_NULL)
14242  {
14243  allocator->GetRecorder()->RecordMapMemory(
14244  allocator->GetCurrentFrameIndex(),
14245  allocation);
14246  }
14247 #endif
14248 
14249  return res;
14250 }
14251 
14252 void vmaUnmapMemory(
14253  VmaAllocator allocator,
14254  VmaAllocation allocation)
14255 {
14256  VMA_ASSERT(allocator && allocation);
14257 
14258  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14259 
14260 #if VMA_RECORDING_ENABLED
14261  if(allocator->GetRecorder() != VMA_NULL)
14262  {
14263  allocator->GetRecorder()->RecordUnmapMemory(
14264  allocator->GetCurrentFrameIndex(),
14265  allocation);
14266  }
14267 #endif
14268 
14269  allocator->Unmap(allocation);
14270 }
14271 
14272 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14273 {
14274  VMA_ASSERT(allocator && allocation);
14275 
14276  VMA_DEBUG_LOG("vmaFlushAllocation");
14277 
14278  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14279 
14280  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14281 
14282 #if VMA_RECORDING_ENABLED
14283  if(allocator->GetRecorder() != VMA_NULL)
14284  {
14285  allocator->GetRecorder()->RecordFlushAllocation(
14286  allocator->GetCurrentFrameIndex(),
14287  allocation, offset, size);
14288  }
14289 #endif
14290 }
14291 
14292 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14293 {
14294  VMA_ASSERT(allocator && allocation);
14295 
14296  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14297 
14298  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14299 
14300  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14301 
14302 #if VMA_RECORDING_ENABLED
14303  if(allocator->GetRecorder() != VMA_NULL)
14304  {
14305  allocator->GetRecorder()->RecordInvalidateAllocation(
14306  allocator->GetCurrentFrameIndex(),
14307  allocation, offset, size);
14308  }
14309 #endif
14310 }
14311 
14312 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14313 {
14314  VMA_ASSERT(allocator);
14315 
14316  VMA_DEBUG_LOG("vmaCheckCorruption");
14317 
14318  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14319 
14320  return allocator->CheckCorruption(memoryTypeBits);
14321 }
14322 
14323 VkResult vmaDefragment(
14324  VmaAllocator allocator,
14325  VmaAllocation* pAllocations,
14326  size_t allocationCount,
14327  VkBool32* pAllocationsChanged,
14328  const VmaDefragmentationInfo *pDefragmentationInfo,
14329  VmaDefragmentationStats* pDefragmentationStats)
14330 {
14331  VMA_ASSERT(allocator && pAllocations);
14332 
14333  VMA_DEBUG_LOG("vmaDefragment");
14334 
14335  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14336 
14337  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14338 }
14339 
14340 VkResult vmaBindBufferMemory(
14341  VmaAllocator allocator,
14342  VmaAllocation allocation,
14343  VkBuffer buffer)
14344 {
14345  VMA_ASSERT(allocator && allocation && buffer);
14346 
14347  VMA_DEBUG_LOG("vmaBindBufferMemory");
14348 
14349  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14350 
14351  return allocator->BindBufferMemory(allocation, buffer);
14352 }
14353 
14354 VkResult vmaBindImageMemory(
14355  VmaAllocator allocator,
14356  VmaAllocation allocation,
14357  VkImage image)
14358 {
14359  VMA_ASSERT(allocator && allocation && image);
14360 
14361  VMA_DEBUG_LOG("vmaBindImageMemory");
14362 
14363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14364 
14365  return allocator->BindImageMemory(allocation, image);
14366 }
14367 
14368 VkResult vmaCreateBuffer(
14369  VmaAllocator allocator,
14370  const VkBufferCreateInfo* pBufferCreateInfo,
14371  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14372  VkBuffer* pBuffer,
14373  VmaAllocation* pAllocation,
14374  VmaAllocationInfo* pAllocationInfo)
14375 {
14376  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14377 
14378  if(pBufferCreateInfo->size == 0)
14379  {
14380  return VK_ERROR_VALIDATION_FAILED_EXT;
14381  }
14382 
14383  VMA_DEBUG_LOG("vmaCreateBuffer");
14384 
14385  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14386 
14387  *pBuffer = VK_NULL_HANDLE;
14388  *pAllocation = VK_NULL_HANDLE;
14389 
14390  // 1. Create VkBuffer.
14391  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14392  allocator->m_hDevice,
14393  pBufferCreateInfo,
14394  allocator->GetAllocationCallbacks(),
14395  pBuffer);
14396  if(res >= 0)
14397  {
14398  // 2. vkGetBufferMemoryRequirements.
14399  VkMemoryRequirements vkMemReq = {};
14400  bool requiresDedicatedAllocation = false;
14401  bool prefersDedicatedAllocation = false;
14402  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14403  requiresDedicatedAllocation, prefersDedicatedAllocation);
14404 
14405  // Make sure alignment requirements for specific buffer usages reported
14406  // in Physical Device Properties are included in alignment reported by memory requirements.
14407  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14408  {
14409  VMA_ASSERT(vkMemReq.alignment %
14410  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14411  }
14412  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14413  {
14414  VMA_ASSERT(vkMemReq.alignment %
14415  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14416  }
14417  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14418  {
14419  VMA_ASSERT(vkMemReq.alignment %
14420  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14421  }
14422 
14423  // 3. Allocate memory using allocator.
14424  res = allocator->AllocateMemory(
14425  vkMemReq,
14426  requiresDedicatedAllocation,
14427  prefersDedicatedAllocation,
14428  *pBuffer, // dedicatedBuffer
14429  VK_NULL_HANDLE, // dedicatedImage
14430  *pAllocationCreateInfo,
14431  VMA_SUBALLOCATION_TYPE_BUFFER,
14432  pAllocation);
14433 
14434 #if VMA_RECORDING_ENABLED
14435  if(allocator->GetRecorder() != VMA_NULL)
14436  {
14437  allocator->GetRecorder()->RecordCreateBuffer(
14438  allocator->GetCurrentFrameIndex(),
14439  *pBufferCreateInfo,
14440  *pAllocationCreateInfo,
14441  *pAllocation);
14442  }
14443 #endif
14444 
14445  if(res >= 0)
14446  {
14447  // 3. Bind buffer with memory.
14448  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14449  if(res >= 0)
14450  {
14451  // All steps succeeded.
14452  #if VMA_STATS_STRING_ENABLED
14453  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14454  #endif
14455  if(pAllocationInfo != VMA_NULL)
14456  {
14457  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14458  }
14459 
14460  return VK_SUCCESS;
14461  }
14462  allocator->FreeMemory(*pAllocation);
14463  *pAllocation = VK_NULL_HANDLE;
14464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14465  *pBuffer = VK_NULL_HANDLE;
14466  return res;
14467  }
14468  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14469  *pBuffer = VK_NULL_HANDLE;
14470  return res;
14471  }
14472  return res;
14473 }
14474 
14475 void vmaDestroyBuffer(
14476  VmaAllocator allocator,
14477  VkBuffer buffer,
14478  VmaAllocation allocation)
14479 {
14480  VMA_ASSERT(allocator);
14481 
14482  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14483  {
14484  return;
14485  }
14486 
14487  VMA_DEBUG_LOG("vmaDestroyBuffer");
14488 
14489  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14490 
14491 #if VMA_RECORDING_ENABLED
14492  if(allocator->GetRecorder() != VMA_NULL)
14493  {
14494  allocator->GetRecorder()->RecordDestroyBuffer(
14495  allocator->GetCurrentFrameIndex(),
14496  allocation);
14497  }
14498 #endif
14499 
14500  if(buffer != VK_NULL_HANDLE)
14501  {
14502  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14503  }
14504 
14505  if(allocation != VK_NULL_HANDLE)
14506  {
14507  allocator->FreeMemory(allocation);
14508  }
14509 }
14510 
14511 VkResult vmaCreateImage(
14512  VmaAllocator allocator,
14513  const VkImageCreateInfo* pImageCreateInfo,
14514  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14515  VkImage* pImage,
14516  VmaAllocation* pAllocation,
14517  VmaAllocationInfo* pAllocationInfo)
14518 {
14519  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14520 
14521  if(pImageCreateInfo->extent.width == 0 ||
14522  pImageCreateInfo->extent.height == 0 ||
14523  pImageCreateInfo->extent.depth == 0 ||
14524  pImageCreateInfo->mipLevels == 0 ||
14525  pImageCreateInfo->arrayLayers == 0)
14526  {
14527  return VK_ERROR_VALIDATION_FAILED_EXT;
14528  }
14529 
14530  VMA_DEBUG_LOG("vmaCreateImage");
14531 
14532  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14533 
14534  *pImage = VK_NULL_HANDLE;
14535  *pAllocation = VK_NULL_HANDLE;
14536 
14537  // 1. Create VkImage.
14538  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14539  allocator->m_hDevice,
14540  pImageCreateInfo,
14541  allocator->GetAllocationCallbacks(),
14542  pImage);
14543  if(res >= 0)
14544  {
14545  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14546  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14547  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14548 
14549  // 2. Allocate memory using allocator.
14550  VkMemoryRequirements vkMemReq = {};
14551  bool requiresDedicatedAllocation = false;
14552  bool prefersDedicatedAllocation = false;
14553  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14554  requiresDedicatedAllocation, prefersDedicatedAllocation);
14555 
14556  res = allocator->AllocateMemory(
14557  vkMemReq,
14558  requiresDedicatedAllocation,
14559  prefersDedicatedAllocation,
14560  VK_NULL_HANDLE, // dedicatedBuffer
14561  *pImage, // dedicatedImage
14562  *pAllocationCreateInfo,
14563  suballocType,
14564  pAllocation);
14565 
14566 #if VMA_RECORDING_ENABLED
14567  if(allocator->GetRecorder() != VMA_NULL)
14568  {
14569  allocator->GetRecorder()->RecordCreateImage(
14570  allocator->GetCurrentFrameIndex(),
14571  *pImageCreateInfo,
14572  *pAllocationCreateInfo,
14573  *pAllocation);
14574  }
14575 #endif
14576 
14577  if(res >= 0)
14578  {
14579  // 3. Bind image with memory.
14580  res = allocator->BindImageMemory(*pAllocation, *pImage);
14581  if(res >= 0)
14582  {
14583  // All steps succeeded.
14584  #if VMA_STATS_STRING_ENABLED
14585  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14586  #endif
14587  if(pAllocationInfo != VMA_NULL)
14588  {
14589  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14590  }
14591 
14592  return VK_SUCCESS;
14593  }
14594  allocator->FreeMemory(*pAllocation);
14595  *pAllocation = VK_NULL_HANDLE;
14596  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14597  *pImage = VK_NULL_HANDLE;
14598  return res;
14599  }
14600  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14601  *pImage = VK_NULL_HANDLE;
14602  return res;
14603  }
14604  return res;
14605 }
14606 
14607 void vmaDestroyImage(
14608  VmaAllocator allocator,
14609  VkImage image,
14610  VmaAllocation allocation)
14611 {
14612  VMA_ASSERT(allocator);
14613 
14614  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14615  {
14616  return;
14617  }
14618 
14619  VMA_DEBUG_LOG("vmaDestroyImage");
14620 
14621  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14622 
14623 #if VMA_RECORDING_ENABLED
14624  if(allocator->GetRecorder() != VMA_NULL)
14625  {
14626  allocator->GetRecorder()->RecordDestroyImage(
14627  allocator->GetCurrentFrameIndex(),
14628  allocation);
14629  }
14630 #endif
14631 
14632  if(image != VK_NULL_HANDLE)
14633  {
14634  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14635  }
14636  if(allocation != VK_NULL_HANDLE)
14637  {
14638  allocator->FreeMemory(allocation);
14639  }
14640 }
14641 
14642 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1589
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1891
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1646
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1620
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2216
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1601
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1848
Definition: vk_mem_alloc.h:1951
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1593
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2316
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1643
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2586
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2105
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1488
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2197
Definition: vk_mem_alloc.h:1928
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1582
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2004
Definition: vk_mem_alloc.h:1875
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1655
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2133
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1709
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1640
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1879
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1781
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1598
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1780
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2590
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1672
VmaStatInfo total
Definition: vk_mem_alloc.h:1790
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2598
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1988
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2581
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1599
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1524
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1649
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2147
Definition: vk_mem_alloc.h:2141
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1716
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2326
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1594
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1618
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2025
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2167
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2203
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1580
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2150
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1826
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2576
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2594
Definition: vk_mem_alloc.h:1865
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2012
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1597
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1786
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1530
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1551
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1622
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1556
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2596
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1999
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2213
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1590
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1769
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2162
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1543
Definition: vk_mem_alloc.h:2137
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1935
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1782
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1547
Definition: vk_mem_alloc.h:1962
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2153
Definition: vk_mem_alloc.h:1874
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1596
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1994
Definition: vk_mem_alloc.h:1985
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1772
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1592
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2175
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1658
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2206
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1983
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2018
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1697
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1788
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1915
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1781
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1603
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1628
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1545
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1602
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2189
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1595
Definition: vk_mem_alloc.h:1946
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1636
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2340
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1652
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1781
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1778
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2194
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1955
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2321
Definition: vk_mem_alloc.h:1969
Definition: vk_mem_alloc.h:1981
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2592
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1588
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1776
Definition: vk_mem_alloc.h:1831
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2143
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1625
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1774
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1600
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1604
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1902
Definition: vk_mem_alloc.h:1976
Definition: vk_mem_alloc.h:1858
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2335
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1578
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1591
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2122
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2302
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1966
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2087
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1782
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1612
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1789
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2200
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1782
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2307